├── .gitignore ├── README.md ├── Vagrantfile ├── air-gap.d ├── README.md └── examples │ ├── README.md │ ├── add2hosts.in │ └── air-gapped-registries.conf ├── boxfiles ├── generate_key.sh ├── motd ├── nginx.conf ├── prep_box.sh ├── setup_lb.sh └── setup_nfs_server.sh ├── cluster.sh ├── config.yml ├── deploy ├── 00.prep_environment.sh ├── 01.init_cluster.sh ├── 02.bootstrap_cluster.sh ├── 03.add_masters.sh ├── 04.add_workers.sh ├── 05.setup_helm.sh ├── 06.add_k8s_nfs-sc.sh ├── 07.add_dashboard.sh ├── 08.add_metallb.sh ├── 100.prep_airgap.sh ├── 20.nginx-ingress.sh ├── 21.monitor.sh ├── 22.add_stratos.sh ├── 98.status.sh ├── 99.run-all.sh ├── grafana-dashboards-caasp-cluster.yaml └── grafana-datasources.yaml ├── deploy_caasp.sh ├── destroy_caasp.sh ├── k8s_tests └── test_nfs_pvc.yaml ├── lib.sh ├── libvirt_setup ├── add_hosts_to_net.sh ├── delete_box.sh ├── openSUSE_vagrant_setup.sh ├── update_firewall.sh └── vagrant-libvirt.xml ├── openstack ├── 000.zypper_repos.sh ├── 001.install_packages.sh ├── 002.setup_hosts.sh ├── 050.install_helm_service.sh ├── 051.setup_openstack_client.sh ├── 052.clone_upstream_helm_charts.sh ├── 053.patch-03-ingress.sh ├── 101.setup_kube_stuff.sh ├── 102.deploy_ingress.sh ├── 103.deploy_nfs.sh ├── 104.deploy_mariadb.sh ├── 105.deploy_rabbitMQ.sh ├── 106.deploy_memcached.sh ├── 107.deploy_keystone.sh ├── 108.deploy_heat.sh ├── 109.deploy_horizon.sh ├── 110.deploy_glance.sh ├── 111.deploy_cinder.sh ├── 112.deploy_openvswitch.sh ├── 113.deploy_libvirt.sh ├── 114.deploy_compute_kit.sh ├── 115.setup_public_gateway.sh ├── 200.run-all.sh ├── README.md ├── discover_dashboard_port.sh └── patch-030-ingress.patch ├── rook ├── examples │ ├── mysql.yaml │ ├── test-cephfs-webserver.yaml │ └── wordpress.yaml ├── filesystem.yaml ├── rook_cephfs_setup.sh ├── rook_setup.sh ├── rook_status.sh ├── sc-cephfs.yaml ├── sc.yaml └── switch_default_sc_to_ses.sh └── utils.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | *.sw? 3 | *.box 4 | cluster 5 | caasp_env.conf 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vagrant-caasp -- BETA 2 | An automated deployment of SUSE CaaS Platform (Kubernetes) v4.5 for testing. 3 | 4 | This project is a work in progress and will be cleaned up after some testing and feedback. 5 | Feel free to open issues and/or submit PRs. 6 | 7 | # What you get 8 | * (1-2) Load balancers 9 | * (1-3) Masters 10 | * (1-5) Workers 11 | * (1) Storage node setup with an NFS export for the nfs-client storage provisioner 12 | * (1) Kubernetes Dashboard deployment 13 | * (1) MetalLB instance 14 | * (1) Optional Rook / Ceph / SES setup 15 | 16 | # ASSUMPTIONS 17 | * You're running openSUSE Tumbleweed or Leap 15+ 18 | * You have at least 8GB of RAM to spare 19 | * You have the ability to run VMs with KVM 20 | * You have an internet connection (images pull from internet, box comes from download.suse.de) 21 | * DNS works on your system hosting the virtual machines (if getent hosts \`hostname -s\` hangs, you will encounter errors) 22 | * You enjoy troubleshooting :P 23 | 24 | # INSTALLATION (As root) 25 | ```sh 26 | sysctl -w net.ipv6.conf.all.disable_ipv6=1 # rubygems.org has had issues pulling via IPv6 27 | git clone https://github.com/sigsteve/vagrant-caasp 28 | cd vagrant-caasp 29 | # Install dependent packages and configure vagrant-libvirt 30 | ./libvirt_setup/openSUSE_vagrant_setup.sh 31 | ``` 32 | 33 | # NETWORK SETUP (As root) 34 | ```sh 35 | # Make sure ip forwarding is enabled for the proper interfaces 36 | # Fresh vagrant-libvirt setup 37 | virsh net-create ./libvirt_setup/vagrant-libvirt.xml 38 | # _OR_ if you already have the vagrant-libvirt network 39 | ./libvirt_setup/add_hosts_to_net.sh 40 | # Update host firewall (if applicable) 41 | ./libvirt_setup/update_firewall.sh 42 | ``` 43 | 44 | # ADD BOX (As root) 45 | ```sh 46 | # Find the latest box at http://download.suse.de/ibs/home:/sbecht:/vc-test:/SLE-15-SP1/images/ 47 | vagrant box add vagrant-caasp \ 48 | http://download.suse.de/ibs/home:/sbecht:/vc-test:/SLE-15-SP1/images/ 49 | # _OR_ 50 | # wget/curl the box and 'vagrant box add vagrant-caasp ' 51 | ``` 52 | 53 | # OPTIONAL -- running as a user other than root 54 | ```sh 55 | # Become root (su), then 56 | echo "someuser ALL=(ALL) NOPASSWD: ALL" >/etc/sudoers.d/someuser 57 | visudo -c -f /etc/sudoers.d/someuser 58 | # Add user to libvirt group 59 | usermod --append --groups libvirt someuser 60 | su - someuser 61 | vagrant plugin install vagrant-libvirt 62 | # ssh-keygen if you don't have one already 63 | ssh-copy-id root@localhost 64 | # Add any boxes (if you have boxes installed as other users, you'll need to add them here) 65 | vagrant box add [boxname] /path/to/boxes 66 | ``` 67 | 68 | # USAGE 69 | Examine the config.yml to view the model to choose for the size of each VM. 70 | The config.yml configures the amount of RAM and CPUs for each type of vm as 71 | well as the number of vms for each type: 72 | master, workers, load balancers, storage 73 | 74 | The current model list is 75 | minimal, small, medium, large 76 | 77 | The `deploy_caasp.sh` must be run as either `root` or `sles` user. 78 | 79 | ```sh 80 | # Initial deployment 81 | cd vagrant-caasp 82 | ./deploy_caasp.sh -m < --full > < -a > 83 | # -a will deploy air-gap/registry mirror settings prior to SUSE CaaSP cluster deployment 84 | # --full will attempt to bring the machines up and deploy the cluster. 85 | # Please adjust your memory settings in the config.yml for each machine type. 86 | # Do not run vagrant up, unless you know what you're doing and want the result 87 | Usage deploy_caasp.sh [options..] 88 | -m, --model Which config.yml model to use for vm sizing 89 | Default: "minimal" 90 | -f, --full attempt to bring the machines up and deploy the cluster 91 | -a, --air-gapped Setup CaaSP nodes with substitute registries (for deployment and/or private image access) 92 | -i, --ignore-memory Don't prompt when over allocating memory 93 | -t, --test Do a dry run, don't actually deploy the vms 94 | -v, --verbose [uint8] Verbosity level to pass to skuba -v (default is 1) 95 | -h,-?, --help Show help 96 | ``` 97 | 98 | Once you have a CaaSP cluster provisioned you can start and stop that cluster by using the `cluster.sh` script 99 | ```sh 100 | Usage cluster.sh [options..] [command] 101 | -v, --verbose Make the operation more talkative 102 | -h,-?, --help Show help and exit 103 | 104 | start start a previosly provisioned cluster 105 | stop stop a running cluster 106 | 107 | dashboardInfo get Dashboard IP, PORT and Token 108 | monitoringInfo get URLs and credentials for monitoring stack 109 | ``` 110 | 111 | # INSTALLING CAASP (one step at a time) 112 | After running `deploy_caasp.sh -m ` without the --full option, do the following. 113 | ```sh 114 | vagrant ssh caasp4-master-1 115 | sudo su - sles 116 | cd /vagrant/deploy 117 | # source this 118 | source ./00.prep_environment.sh 119 | # skuba init 120 | ./01.init_cluster.sh 121 | # skuba bootstrap (setup caasp4-master-1) 122 | ./02.bootstrap_cluster.sh 123 | # add extra masters (if masters > 1) 124 | ./03.add_masters.sh 125 | # add workers 126 | ./04.add_workers.sh 127 | # setup helm 128 | ./05.setup_helm.sh 129 | # wait for tiller to come up... Can take a few minutes. 130 | # add NFS storage class (via helm) 131 | ./06.add_k8s_nfs-sc.sh 132 | # add Kubernetes Dashboard 133 | ./07.add_dashboard.sh 134 | # add MetalLB 135 | ./08.add_metallb.sh 136 | ``` 137 | # INSTALLING CAASP (all at once) 138 | ```sh 139 | vagrant ssh caasp4-master-1 140 | sudo su - sles 141 | cd /vagrant/deploy 142 | ./99.run-all.sh 143 | ``` 144 | # Rook + SES / Ceph 145 | ```sh 146 | # For rook, you must deploy with a model that has a tag with _rook. 147 | # See config.yml large_rook for example. 148 | # This will handle all setup and configuration for you. 149 | # Currently the default storage class will remain NFS. 150 | # 151 | # To make SES your default storage class: 152 | /vagrant/rook/switch_default_sc_to_ses.sh 153 | # To see status: 154 | /vagrant/rook/rook_status.sh 155 | 156 | # To use CephFS you must create pools and a filesystem associated. 157 | # To quickly set it up for use and testing you can execute this script 158 | /vagrant/rook/rook_cephfs_setup.sh 159 | 160 | # Example cephfs app at /vagrant/rook/examples/test-cephfs-webserver.yaml 161 | ``` 162 | # OPENSTACK 163 | (details to be documented) 164 | 165 | # CAP 166 | (details to be documented) 167 | 168 | # EXAMPLES 169 | * FULL DEPLOY 170 | [![asciicast](https://asciinema.org/a/pBBBZUKQINb3CwhaVwiTk0Gvx.svg)](https://asciinema.org/a/pBBBZUKQINb3CwhaVwiTk0Gvx) 171 | 172 | * INSTALL 173 | 174 | * DESTROY 175 | ```sh 176 | ./destroy_caasp.sh 177 | ``` 178 | 179 | # NOTES 180 | 181 | 182 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- # vi: set ft=ruby : 2 | 3 | require 'yaml' 4 | require 'pp' 5 | 6 | config_file = 'config.yml' 7 | config_yml = YAML.load_file(config_file) 8 | 9 | # CONFIGURATION='minimal' 10 | # CONFIGURATION='medium' 11 | # CONFIGURATION='large' 12 | # 13 | # Set CAASP_CONFIG_MODEL in your shell env 14 | # to specify which model to use from config.yml 15 | # When running deploy_caasp.sh, specify the model 16 | # with -m 17 | # 18 | # ./deploy_caasp.sh --model large --full 19 | CONFIG_MODEL=ENV.has_key?('CAASP_CONFIG_MODEL') ? ENV["CAASP_CONFIG_MODEL"] : 'minimal' 20 | 21 | Vagrant.configure("2") do |config| 22 | domain = "suselab.com" 23 | lbcount = 2 24 | mastercount = 3 25 | workercount = 5 26 | storagecount = 1 27 | 28 | 1.upto(*mastercount) do |i| 29 | config.vm.define "caasp4-master-#{i}" do |sle| 30 | sle.vm.box = "vagrant-caasp" 31 | sle.vm.hostname = "caasp4-master-#{i}.#{domain}" 32 | sle.vm.provision "shell", inline: "hostnamectl set-hostname #{sle.vm.hostname}" 33 | #sle.vm.provision "shell", inline: "kubeadm config images pull" 34 | sle.vm.provision "shell", inline: "/vagrant/boxfiles/prep_box.sh" 35 | sle.vm.synced_folder ".", "/vagrant", disabled: false, type: "nfs", 36 | :mount_options => ['noatime,soft,nfsvers=3'], 37 | linux__nfs_options: ['rw','no_subtree_check','no_root_squash','async'] 38 | sle.vm.provider :libvirt do |lv| 39 | lv.management_network_mac = "52:50:05:AA:01:0#{i}" 40 | lv.memory = config_yml[CONFIG_MODEL]['nodes']['master']['memory'] 41 | lv.cpus = config_yml[CONFIG_MODEL]['nodes']['master']['cpus'] 42 | extra_disks = config_yml[CONFIG_MODEL]['nodes']['master']['extra_disks'] 43 | if extra_disks > 0 44 | (1..extra_disks).each do |disk_num| 45 | lv.storage :file, :size => config_yml[CONFIG_MODEL]['nodes']['master']['disk_size'] || '40G' 46 | end 47 | end 48 | end 49 | end 50 | end 51 | 52 | 1.upto(*workercount) do |i| 53 | config.vm.define "caasp4-worker-#{i}" do |sle| 54 | sle.vm.box = "vagrant-caasp" 55 | sle.vm.hostname = "caasp4-worker-#{i}.#{domain}" 56 | sle.vm.provision "shell", inline: "hostnamectl set-hostname #{sle.vm.hostname}" 57 | sle.vm.provision "shell", inline: "/vagrant/boxfiles/prep_box.sh" 58 | sle.vm.synced_folder ".", "/vagrant", disabled: false, type: "nfs", 59 | :mount_options => ['noatime,soft,nfsvers=3'], 60 | linux__nfs_options: ['rw','no_subtree_check','no_root_squash','async'] 61 | sle.vm.provider :libvirt do |lv| 62 | lv.management_network_mac = "52:50:05:AA:02:0#{i}" 63 | extra_disks = config_yml[CONFIG_MODEL]['nodes']['worker']['extra_disks'] 64 | if extra_disks > 0 65 | (1..extra_disks).each do |disk_num| 66 | lv.storage :file, :size => config_yml[CONFIG_MODEL]['nodes']['worker']['disk_size'] || '40G' 67 | end 68 | end 69 | lv.memory = config_yml[CONFIG_MODEL]['nodes']['worker']['memory'] 70 | lv.cpus = config_yml[CONFIG_MODEL]['nodes']['worker']['cpus'] 71 | end 72 | end 73 | end 74 | 75 | 1.upto(*lbcount) do |i| 76 | config.vm.define "caasp4-lb-#{i}" do |sle| 77 | sle.vm.box = "vagrant-caasp" 78 | sle.vm.hostname = "caasp4-lb-#{i}.#{domain}" 79 | sle.vm.provision "shell", inline: "hostnamectl set-hostname #{sle.vm.hostname}" 80 | sle.vm.provision "shell", inline: "/vagrant/boxfiles/prep_box.sh" 81 | sle.vm.provision "shell", inline: "/vagrant/boxfiles/setup_lb.sh" 82 | sle.vm.synced_folder ".", "/vagrant", disabled: false, type: "nfs" 83 | sle.vm.provider :libvirt do |lv| 84 | lv.management_network_mac = "52:50:05:AA:03:0#{i}" 85 | lv.memory = config_yml[CONFIG_MODEL]['nodes']['loadbalancer']['memory'] 86 | lv.cpus = config_yml[CONFIG_MODEL]['nodes']['loadbalancer']['cpus'] 87 | extra_disks = config_yml[CONFIG_MODEL]['nodes']['loadbalancer']['extra_disks'] 88 | if extra_disks > 0 89 | (1..extra_disks).each do |disk_num| 90 | lv.storage :file, :size => config_yml[CONFIG_MODEL]['nodes']['loadbalancer']['disk_size'] || '40G' 91 | end 92 | end 93 | end 94 | end 95 | end 96 | 97 | 1.upto(*storagecount) do |i| 98 | config.vm.define "caasp4-storage-#{i}" do |sle| 99 | sle.vm.box = "vagrant-caasp" 100 | sle.vm.hostname = "caasp4-storage-#{i}.#{domain}" 101 | sle.vm.provision "shell", inline: "hostnamectl set-hostname #{sle.vm.hostname}" 102 | sle.vm.provision "shell", inline: "/vagrant/boxfiles/prep_box.sh" 103 | sle.vm.provision "shell", inline: "/vagrant/boxfiles/setup_nfs_server.sh" 104 | sle.vm.synced_folder ".", "/vagrant", disabled: false, type: "nfs" 105 | sle.vm.provider :libvirt do |lv| 106 | lv.management_network_mac = "52:50:05:AA:04:0#{i}" 107 | lv.memory = config_yml[CONFIG_MODEL]['nodes']['storage']['memory'] 108 | lv.cpus = config_yml[CONFIG_MODEL]['nodes']['storage']['cpus'] 109 | extra_disks = config_yml[CONFIG_MODEL]['nodes']['storage']['extra_disks'] 110 | if extra_disks > 0 111 | (1..extra_disks).each do |disk_num| 112 | lv.storage :file, :size => config_yml[CONFIG_MODEL]['nodes']['storage']['disk_size'] || '40G' 113 | end 114 | end 115 | end 116 | end 117 | end 118 | end 119 | -------------------------------------------------------------------------------- /air-gap.d/README.md: -------------------------------------------------------------------------------- 1 | # Air-gapped/Mirrored/Private Registry Setup for vagrant-caasp -- BETA 2 | A guide for adding registry (mirrors) to allow "no internet" deployment of CaaSP 3 | and supporting images with vagrant-caasp. 4 | 5 | This (as with vagrant-caasp) is a work in progress and will be refined up after 6 | some testing and feedback. 7 | 8 | Feel free to open issues and/or submit PRs. 9 | 10 | # What you get 11 | * Ability to install CaaSP using an alternate registry (NOT registry.suse.com) 12 | * Supports secure registries 13 | 14 | # ASSUMPTIONS 15 | * You're already using vagrant-caasp 16 | * You have a registry mirror with at least the required CaaSP deployment images. 17 | (skuba cluster images) 18 | * You enjoy troubleshooting :P 19 | 20 | This directory is referenced by the vagrant-caasp/deploy_caasp.sh script via the 21 | -a (or --air-gap) command line options. 22 | 23 | # See ./examples Directory for more information 24 | 25 | 26 | -------------------------------------------------------------------------------- /air-gap.d/examples/README.md: -------------------------------------------------------------------------------- 1 | # Example Files for Air-Gapped / Registry Mirror Setup 2 | >This directory (./air-gap.d/examples/) contains some example configuration files used for setting up vagrant-caasp 3 | to use a registry mirror instead of the default _registry.suse.com_ to deploy SUSE CaaSP (or other containers for 4 | demos, etc). 5 | 6 | >There are a variety of "air-gap" designs that could be leveraged for a CaaSP deployment, with or without further 7 | environment complexities (http proxy, etc). 8 | 9 | >This method is reconfiguring cri-o's registry sources and specifying a registry and host that is to used as a 10 | redirect/mirror. It allows for secure and insecure registry configurations and accounts for /etc/hosts manipulation 11 | if needed. 12 | 13 | **NOTE:** 14 | The vagrant-caasp command-line option for air-gap/registry mirror setup : '-a' **REQUIRES** at least one configuration 15 | file to be prepared for it to work! See below. 16 | 17 | # Files 18 | * air-gapped-registries.conf (**required** for deploy_caasp.sh '-a' parameter to succeed.) 19 | * registry-ca.crt (optional) 20 | * add2hosts.in (optional) 21 | 22 | # air-gapped-registries.conf 23 | >This file is a replacement file for cri-o's /etc/containers/registries.conf. It should follow the configuration 24 | guidelines in the cri-o documentation. The example file can be modified to point to your registry mirror (change 25 | entries for 'rmtreg151.susetest.com:5000'). Format is **TOML** (format 2). 26 | 27 | References here: 28 | 29 | [SUSE CaaSP Admin Guide](https://documentation.suse.com/suse-caasp/4.0/html/caasp-admin/_miscellaneous.html#_configuring_container_registries_for_cri_o) 30 | 31 | [Github Reference Docs](https://raw.githubusercontent.com/containers/image/master/docs/containers-registries.conf.5.md "From GitHub") 32 | 33 | >First line in the example _air-gapped-registries.conf_ file points to the registry as a target for "unqualified-search- 34 | registries". This is a catch-all for any request that can't be serviced by the defined [[registry]] entries that follow. 35 | 36 | >Each [[registry]] in the example specifies a "mirror" location that will redirect any request that is made against the 37 | listed "location" target. For example, the first [[registry]] is a redirection for any image request located at 38 | 'registry.suse.com/caasp/v4'. These requests will instead be directed to the rmtreg151.susetest.com host. Notice the 39 | location includes the original directory appended to it here. This will depend on how the caasp images were mirrored 40 | and their hierarchy within the registry. 41 | 42 | >The example also has the 'insecure = true' setting for the 'location' - i.e. does it require authentication? Note the 43 | mirror location is also defined as 'insecure'. If you have a secure registry, you can include the ca certificate that 44 | was used to generate the certificate securing it, and the air-gap function will take care of it (see cert explanations below). 45 | 46 | >The 'unqualified-search' indicates that this registry should not be referenced as a catch-all location for images that 47 | can't be found in the list of defined registries. The global target for 'unqualified-search-registries' is already set (above). 48 | 49 | **Registry Example in air-gapped-registries.conf:** 50 | 51 | unqualified-search-registries = ["rmtreg151.susetest.com:5000"] 52 | 53 | `[[registry]] 54 | blocked = false 55 | insecure = true 56 | location = "registry.suse.com/caasp/v4" 57 | mirror = [ {location = "rmtreg151.susetest.com:5000/registry.suse.com/caasp/v4", insecure = true}] 58 | unqualified-search = false 59 | ` 60 | 61 | # registry-ca.crt 62 | >This optional file is a copy of the CA certificate file that came from the CA that generated the server certificate 63 | that is used for a secure registry. By putting a copy of this file into the _/vagrant-caasp/air-gap.d/registry-ca.crt_, the air-gap option for deploy_caasp.sh will copy this into the CaaSP cluster nodes and update their ca certificate 64 | trusts. This will allow for secure communications during image retrievals. 65 | 66 | **NOTE:** 67 | The _registry-ca.crt_ **needs to be owned by root:root with 0640 POSIX rights,** the update-ca-certificates process requires this. 68 | 69 | # add2hosts.in 70 | >This optional file contains text entries that can be appended to the CaaSP nodes _/etc/hosts_ files. This allows for 71 | supplemental DNS resolution when adding registry mirrors or other infrastructure you need for your vagrant-caasp 72 | deployments. 73 | 74 | Recommend adding at least your custom registry mirror so it can be resolved by name. Format must be compatible with /etc/hosts standard (IP Address Hostname Alias). 75 | 76 | --- 77 | 78 | # Procedure 79 | **(Required)** 80 | 81 | ``` 82 | Copy the ./air-gap.d/examples/air-gapped-registries.conf into the parent directory, 83 | ./air-gap.d/ 84 | ``` 85 | 86 | >Modify the example to point to your private registry and namespaces (where the SUSE 87 | CaaS Platform installation images are mirrored. (hint: you can use 'skuba cluster 88 | images' command to list the image and version tag for a particular CaaSP release. 89 | e.g. skuba cluster images | awk '/1.16.2/ {print $2}' - make sure they are available 90 | before you kick off a deployment) 91 | 92 | 93 | **(Optional)** 94 | 95 | ``` 96 | Add a copy of your CA certificate file (into the ./air-gap.d/ directory) 97 | and name it registry-ca.crt. This is the public certificate of the CA that 98 | signed the server certificate used to protect the private registry/mirror. 99 | ``` 100 | 101 | **Note:** This is the public certificate of the CA that signed the server certificate used to protect the private registry/mirror. 102 | 103 | 104 | **(Optional)** 105 | 106 | ``` 107 | Add a file with hosts entries (for your registry or other hosts) to append 108 | them to the SUSE CaaSP nodes. You must name this file 'add2hosts.in'. 109 | ``` 110 | 111 | >This really helps ensure your master and worker nodes can find your host (by name), 112 | which is important if you are using a cert with a DNS entry (or at least a subj alt name). 113 | 114 | 115 | --- 116 | 117 | Run your vagrant-caasp deployment as documented: 118 | 119 | 120 | # Initial deployment 121 | 122 | 123 | cd vagrant-caasp 124 | 125 | ./deploy_caasp.sh -m < --full > < -a > 126 | # -a will deploy air-gap/registry mirror settings prior to SUSE CaaSP cluster deployment 127 | # --full will attempt to bring the machines up and deploy the cluster. 128 | # Please adjust your memory settings in the config.yml for each machine type. 129 | # Do not run vagrant up, unless you know what you're doing and want the result 130 | 131 | Usage deploy_caasp.sh [options..] 132 | -m, --model Which config.yml model to use for vm sizing 133 | Default: "minimal" 134 | -f, --full attempt to bring the machines up and deploy the cluster 135 | -a, --air-gapped Setup CaaSP nodes with substitute registries (for deployment and/or private image access) 136 | -i, --ignore-memory Don't prompt when over allocating memory 137 | -t, --test Do a dry run, don't actually deploy the vms 138 | -v, --verbose [uint8] Verbosity level to pass to skuba -v (default is 1) 139 | -h,-?, --help Show help 140 | 141 | Refer to vagrant-caasp GitHub repository for more information about the ./deploy_caasp.sh 142 | script and options. 143 | 144 | 145 | # NOTES 146 | 147 | 148 | -------------------------------------------------------------------------------- /air-gap.d/examples/add2hosts.in: -------------------------------------------------------------------------------- 1 | # air-gapped registry running on : 2 | 10.0.0.1 your-registry.your-domain.com 3 | # official SUSE image registry : 4 | #52.58.141.149 registry.suse.com 5 | 6 | -------------------------------------------------------------------------------- /air-gap.d/examples/air-gapped-registries.conf: -------------------------------------------------------------------------------- 1 | unqualified-search-registries = ["your-registry.your-domain.com:5000"] 2 | 3 | [[registry]] 4 | blocked = false 5 | insecure = true 6 | location = "registry.suse.com/caasp/v4" 7 | mirror = [ {location = "your-registry.your-domain.com:5000/registry.suse.com/caasp/v4", insecure = true}] 8 | unqualified-search = false 9 | 10 | [[registry]] 11 | blocked = false 12 | insecure = true 13 | location = "registry.suse.com/cap" 14 | mirror = [ {location = "your-registry.your-domain.com:5000/registry.suse.com/cap", insecure = true}] 15 | unqualified-search = false 16 | 17 | [[registry]] 18 | blocked = false 19 | insecure = true 20 | location = "k8s.gcr.io" 21 | mirror = [ {location = "your-registry.your-domain.com:5000/k8s.gcr.io", insecure = true}] 22 | unqualified-search = false 23 | 24 | [[registry]] 25 | blocked = false 26 | insecure = true 27 | location = "docker.io" 28 | mirror = [ {location = "your-registry.your-domain.com:5000/docker.io", insecure = true}] 29 | unqualified-search = false 30 | 31 | [[registry]] 32 | blocked = false 33 | insecure = true 34 | location = "quay.io" 35 | mirror = [ {location = "your-registry.your-domain.com:5000/quay.io", insecure = true}] 36 | unqualified-search = false 37 | 38 | -------------------------------------------------------------------------------- /boxfiles/generate_key.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | if [ ! -f cluster/caasp4-id ]; then 3 | ssh-keygen -t rsa -f cluster/caasp4-id -P '' 4 | chown sles:users cluster/caasp4-id 5 | fi 6 | 7 | -------------------------------------------------------------------------------- /boxfiles/motd: -------------------------------------------------------------------------------- 1 | 2 | 3 | Welcome to your Vagrant-CaaSP Environment! 4 | To get started, become the user sles, by running: 5 | sudo su - sles 6 | 7 | 8 | -------------------------------------------------------------------------------- /boxfiles/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes auto; 3 | 4 | load_module /usr/lib64/nginx/modules/ngx_stream_module.so; 5 | 6 | error_log /var/log/nginx/error.log; 7 | error_log /var/log/nginx/error.log notice; 8 | error_log /var/log/nginx/error.log info; 9 | 10 | events { 11 | worker_connections 1024; 12 | use epoll; 13 | } 14 | 15 | stream { 16 | log_format proxy '$remote_addr [$time_local] ' 17 | '$protocol $status $bytes_sent $bytes_received ' 18 | '$session_time "$upstream_addr"'; 19 | 20 | error_log /var/log/nginx/k8s-masters-lb-error.log; 21 | access_log /var/log/nginx/k8s-masters-lb-access.log proxy; 22 | 23 | upstream k8s-masters { 24 | #hash $remote_addr consistent; 25 | server caasp4-master-1:6443 weight=1 max_fails=2 fail_timeout=5s; 26 | server caasp4-master-2:6443 weight=1 max_fails=2 fail_timeout=5s; 27 | server caasp4-master-3:6443 weight=1 max_fails=2 fail_timeout=5s; 28 | } 29 | server { 30 | listen 6443; 31 | proxy_connect_timeout 5s; 32 | proxy_timeout 30s; 33 | proxy_pass k8s-masters; 34 | } 35 | 36 | upstream dex-backends { 37 | #hash $remote_addr consistent; 38 | server caasp4-master-1:32000 weight=1 max_fails=2 fail_timeout=5s; 39 | server caasp4-master-2:32000 weight=1 max_fails=2 fail_timeout=5s; 40 | server caasp4-master-3:32000 weight=1 max_fails=2 fail_timeout=5s; 41 | } 42 | server { 43 | listen 32000; 44 | proxy_connect_timeout 5s; 45 | proxy_timeout 30s; 46 | proxy_pass dex-backends; 47 | } 48 | 49 | upstream gangway-backends { 50 | #hash $remote_addr consistent; 51 | server caasp4-master-1:32001 weight=1 max_fails=2 fail_timeout=5s; 52 | server caasp4-master-2:32001 weight=1 max_fails=2 fail_timeout=5s; 53 | server caasp4-master-3:32001 weight=1 max_fails=2 fail_timeout=5s; 54 | } 55 | server { 56 | listen 32001; 57 | proxy_connect_timeout 5s; 58 | proxy_timeout 30s; 59 | proxy_pass gangway-backends; 60 | } 61 | } 62 | 63 | -------------------------------------------------------------------------------- /boxfiles/prep_box.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | useradd -m sles 4 | usermod -v 10000000-20000000 -w 10000000-20000000 sles 5 | 6 | echo "sles ALL=(ALL) NOPASSWD: ALL" >/etc/sudoers.d/sles 7 | 8 | mkdir -p ~sles/.ssh 9 | mkdir -p ~root/.ssh 10 | 11 | if [ ! -d /vagrant/cluster ]; then 12 | mkdir /vagrant/cluster 13 | chown -R sles:users /vagrant/cluster 14 | fi 15 | 16 | if [ ! -f /vagrant/cluster/caasp4-id ]; then 17 | ssh-keygen -t rsa -f /vagrant/cluster/caasp4-id -P '' 18 | chown sles /vagrant/cluster/caasp4-id 19 | chown sles /vagrant/cluster/caasp4-id.pub 20 | fi 21 | 22 | cat /vagrant/cluster/caasp4-id.pub >> ~sles/.ssh/authorized_keys 23 | cat /vagrant/cluster/caasp4-id.pub >> ~root/.ssh/authorized_keys 24 | 25 | chmod 700 ~sles/.ssh 26 | chmod 600 ~sles/.ssh/authorized_keys 27 | chmod 700 ~root/.ssh 28 | chmod 600 ~root/.ssh/authorized_keys 29 | chown -R sles ~sles/.ssh 30 | 31 | cp /vagrant/boxfiles/motd /etc/motd 32 | 33 | # rm -f /etc/zypp/repos.d/* 34 | #sed -i 's/DHCLIENT_HOSTNAME_OPTION="AUTO"/DHCLIENT_HOSTNAME_OPTION=""/g' /etc/sysconfig/network/dhcp 35 | myip=$(ip a sh eth0|sed -n 's;.*inet \(.*\)/.*;\1;p') 36 | echo ${myip} $(hostname -f) $(hostname -s) 37 | 38 | -------------------------------------------------------------------------------- /boxfiles/setup_lb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source /vagrant/caasp_env.conf 3 | 4 | cat > /etc/nginx/nginx.conf << EOF 5 | user nginx; 6 | worker_processes auto; 7 | 8 | load_module /usr/lib64/nginx/modules/ngx_stream_module.so; 9 | 10 | error_log /var/log/nginx/error.log; 11 | error_log /var/log/nginx/error.log notice; 12 | error_log /var/log/nginx/error.log info; 13 | 14 | events { 15 | worker_connections 1024; 16 | use epoll; 17 | } 18 | 19 | stream { 20 | log_format proxy '$remote_addr [$time_local] ' 21 | '$protocol $status $bytes_sent $bytes_received ' 22 | '$session_time "$upstream_addr"'; 23 | 24 | error_log /var/log/nginx/k8s-masters-lb-error.log; 25 | access_log /var/log/nginx/k8s-masters-lb-access.log proxy; 26 | 27 | upstream k8s-masters { 28 | EOF 29 | for NUM in $(seq 1 $NMASTERS); do 30 | printf " server caasp4-master-${NUM}:6443 weight=1 max_fails=1;\n" >> /etc/nginx/nginx.conf 31 | done 32 | printf " }\n" >> /etc/nginx/nginx.conf 33 | printf " upstream k8s-workers_http {\n" >> /etc/nginx/nginx.conf 34 | for NUM in $(seq 1 $NWORKERS); do 35 | printf " server caasp4-worker-1:80 weight=1 max_fails=1;\n" >> /etc/nginx/nginx.conf 36 | done 37 | printf " }\n" >> /etc/nginx/nginx.conf 38 | 39 | printf " upstream k8s-workers_https {\n" >> /etc/nginx/nginx.conf 40 | for NUM in $(seq 1 $NWORKERS); do 41 | printf " server caasp4-worker-1:443 weight=1 max_fails=1;\n" >> /etc/nginx/nginx.conf 42 | done 43 | cat >> /etc/nginx/nginx.conf << EOF 44 | } 45 | 46 | server { 47 | listen 6443; 48 | #proxy_connect_timeout 1s; 49 | #proxy_timeout 3s; 50 | proxy_pass k8s-masters; 51 | } 52 | 53 | server { 54 | listen 80; 55 | proxy_pass k8s-workers_http; 56 | } 57 | 58 | server { 59 | listen 443; 60 | proxy_pass k8s-workers_https; 61 | } 62 | 63 | } 64 | EOF 65 | 66 | systemctl enable --now nginx 67 | -------------------------------------------------------------------------------- /boxfiles/setup_nfs_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir /nfs 4 | echo '/nfs *.suselab.com(rw,no_root_squash)' >/etc/exports 5 | systemctl enable --now nfs-server 6 | exportfs -a 7 | -------------------------------------------------------------------------------- /cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source caasp_env.conf 3 | 4 | function printHelp { 5 | cat << EOF 6 | Usage ${0##*/} [options..] [command] 7 | -v, --verbose Make the operation more talkative 8 | -h,-?, --help Show help and exit 9 | 10 | start start a previosly provisioned cluster 11 | stop stop a running cluster 12 | 13 | dashboardInfo get Dashboard IP, PORT and Token 14 | monitoringInfo get URLs and credentials for monitoring stack 15 | EOF 16 | } 17 | 18 | #initialize all the options 19 | VERBOSE=/bin/false 20 | while :; do 21 | case $1 in 22 | -h|-\?|--help) 23 | printHelp 24 | exit 25 | ;; 26 | -v|--verbose) 27 | VERBOSE=/bin/true 28 | ;; 29 | --) #End of all options 30 | shift 31 | break 32 | ;; 33 | -?*) 34 | printf "'$1' is not a valid option\n" >&2 35 | exit 1 36 | ;; 37 | *) #Break out of case, no more options 38 | break 39 | esac 40 | shift 41 | done 42 | 43 | function start_cluster { 44 | $VERBOSE && set -x 45 | # start loadbalancers 46 | printf "Starting loadbalancers:\n" 47 | for NUM in $(seq 1 $NLOADBAL); do 48 | vagrant reload caasp4-lb-${NUM} 49 | done 50 | printf "\n" 51 | 52 | # Starting storage 53 | printf "Starting storage:\n" 54 | for NUM in $(seq 1 $NSTORAGE); do 55 | vagrant reload caasp4-storage-${NUM} 56 | done 57 | printf "\n" 58 | 59 | printf "Starting master nodes\n" 60 | for NUM in $(seq 1 $NMASTERS); do 61 | vagrant reload caasp4-master-${NUM} 62 | done 63 | 64 | #Waiting for masters to become ready 65 | vagrant ssh caasp4-master-1 -c 'sudo -H -u sles bash -c "source /vagrant/utils.sh; wait_for_masters_ready"' 66 | 67 | printf "Starting worker nodes\n" 68 | for NUM in $(seq 1 $NWORKERS); do 69 | vagrant reload caasp4-worker-${NUM} 70 | done 71 | 72 | # Waiting for workers to become ready 73 | vagrant ssh caasp4-master-1 -c 'sudo -H -u sles bash -c "source /vagrant/utils.sh; wait_for_workers_ready"' 74 | 75 | printf "Starting scheduling on nodes.\n" 76 | vagrant ssh caasp4-master-1 -c 'sudo -H -u sles kubectl get nodes -o name | \ 77 | sudo -H -u sles xargs -I{} kubectl uncordon {}' 78 | 79 | printf "Cluster is up and running.....\n" 80 | $VERBOSE && set +x 81 | 82 | } 83 | 84 | function stop_cluster { 85 | $VERBOSE && set -x 86 | # Disable scheduling on the whole cluster. 87 | # This will avoid Kubernetes rescheduling jobs while you are shutting down nodes 88 | printf "Disabling scheduling on cluster nodes:\n" 89 | vagrant ssh caasp4-master-1 -c 'sudo -H -u sles kubectl get nodes -o name | \ 90 | sudo -H -u sles xargs -I{} kubectl cordon {}' 91 | # Gracefully shutdown workers 92 | printf "Shutting down workers:" 93 | for NUM in $(seq 1 $NWORKERS); do 94 | printf " caasp4-worker-${NUM}" 95 | vagrant ssh caasp4-worker-${NUM} -c 'sudo systemctl poweroff' 2> /dev/null 96 | done 97 | printf "\n" 98 | 99 | vagrant ssh caasp4-master-1 -c 'sudo -H -u sles bash -c "source /vagrant/utils.sh; wait_for_workers_notready"' 100 | 101 | # Gracefully shutdown masters 102 | printf "Shutting down masters:" 103 | for NUM in $(seq 1 $NMASTERS); do 104 | printf " caasp4-master-${NUM}" 105 | vagrant ssh caasp4-master-${NUM} -c 'sudo systemctl poweroff' 2>/dev/null 106 | done 107 | printf "\n" 108 | 109 | # Gracefully shutdown loadbalancers 110 | printf "Shutting down loadbalancers:" 111 | for NUM in $(seq 1 $NLOADBAL); do 112 | printf " caasp4-lb-${NUM}" 113 | vagrant ssh caasp4-lb-${NUM} -c 'sudo systemctl poweroff' 2>/dev/null 114 | done 115 | printf "\n" 116 | 117 | # Gracefully shutdown storage 118 | printf "Shutting down storage:" 119 | for NUM in $(seq 1 $NSTORAGE); do 120 | printf " caasp4-storage-${NUM}" 121 | vagrant ssh caasp4-storage-${NUM} -c 'sudo systemctl poweroff' 2>/dev/null 122 | done 123 | printf "\n" 124 | 125 | $VERBOSE && set +x 126 | } 127 | 128 | function get_dashboard_credentials { 129 | local NODE_PORT="$(vagrant ssh caasp4-master-1 -c 'sudo -H -u sles kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services kubernetes-dashboard -n kube-system' 2>/dev/null)" 130 | local NODE_IP="$(vagrant ssh caasp4-master-1 -c 'sudo -H -u sles kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n kube-system' 2>/dev/null)" 131 | local SECRET="$(vagrant ssh caasp4-master-1 -c 'ST=$(sudo -H -u sles kubectl -n kube-system get serviceaccounts admin-user -o jsonpath="{.secrets[0].name}");sudo -H -u sles kubectl -n kube-system get secret ${ST} -o jsonpath="{.data.token}"|base64 -d' 2>/dev/null)" 132 | printf "Access your dashboard at: https://$NODE_IP:$NODE_PORT/\n" 133 | printf "Your login token is: ${SECRET}\n" 134 | 135 | } 136 | 137 | function get_monitoring_credentials { 138 | local CAASP_DOMAIN="$(sed -n 's/^\s*domain\s*= "\(.*\)".*$/\1/p' Vagrantfile)" 139 | cat << EOF 140 | You need to add the following to your /etc/hosts file: 141 | 142 | #vagrant-caasp4 143 | 192.168.121.111 grafana.${CAASP_DOMAIN} prometheus.${CAASP_DOMAIN} prometheus-alert.${CAASP_DOMAIN} 144 | 145 | 146 | Then point your browser to the web interfaces 147 | 148 | Grafana: 149 | url: https://grafana.${CAASP_DOMAIN} 150 | user: admin 151 | pass: $(vagrant ssh caasp4-master-1 -c 'sudo -H -u sles kubectl get secret --namespace monitoring grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo' 2>/dev/null) 152 | 153 | Prometheus: 154 | url: https://prometheus.${CAASP_DOMAIN} 155 | user: admin 156 | pass: linux 157 | 158 | AlertManager: 159 | url: https://prometheus-alertmanager.${CAASP_DOMAIN} 160 | user: admin 161 | pass: linux 162 | EOF 163 | } 164 | 165 | 166 | if [[ $# -ne 1 ]]; then 167 | printf "This tool takes one argument, no more, no less!\n" >&2 168 | exit 1 169 | fi 170 | 171 | case $1 in 172 | start) 173 | start_cluster 174 | ;; 175 | stop) 176 | stop_cluster 177 | ;; 178 | dashboardInfo) 179 | get_dashboard_credentials 180 | ;; 181 | monitoringInfo) 182 | get_monitoring_credentials 183 | ;; 184 | ?*) 185 | printf "'$1' is not a valid command\n" >&2 186 | exit 1 187 | ;; 188 | esac 189 | -------------------------------------------------------------------------------- /config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This configuration file is used to describe different sizes 3 | # of deployments. 4 | # 5 | # For each node type define: 6 | # count: Number of nodes of this type 7 | # cpus: Number of CPUs for node 8 | # memory: RAM for node 9 | # extra_disks: Number of extra disks (vdb, vdc) to create, 10 | # each will have a size of 'disk_Size' 11 | # disk_size: Size of extra disks 12 | minimal: 13 | nodes: 14 | master: 15 | count: 1 16 | cpus: 2 17 | memory: 2048 18 | extra_disks: 0 19 | worker: 20 | count: 1 21 | cpus: 2 22 | memory: 2048 23 | extra_disks: 0 24 | loadbalancer: 25 | count: 1 26 | cpus: 1 27 | memory: 512 28 | extra_disks: 0 29 | storage: 30 | count: 1 31 | cpus: 1 32 | memory: 512 33 | extra_disks: 0 34 | small: 35 | nodes: 36 | master: 37 | count: 1 38 | cpus: 2 39 | memory: 2048 40 | extra_disks: 0 41 | worker: 42 | count: 2 43 | cpus: 2 44 | memory: 2048 45 | extra_disks: 0 46 | loadbalancer: 47 | count: 1 48 | cpus: 1 49 | memory: 512 50 | extra_disks: 0 51 | storage: 52 | count: 1 53 | cpus: 1 54 | memory: 512 55 | extra_disks: 0 56 | medium: 57 | nodes: 58 | master: 59 | count: 3 60 | cpus: 2 61 | memory: 4096 62 | extra_disks: 0 63 | worker: 64 | count: 3 65 | cpus: 4 66 | memory: 4096 67 | extra_disks: 3 68 | loadbalancer: 69 | count: 1 70 | cpus: 1 71 | memory: 512 72 | extra_disks: 0 73 | storage: 74 | count: 1 75 | cpus: 1 76 | memory: 512 77 | extra_disks: 0 78 | large: 79 | nodes: 80 | master: 81 | count: 3 82 | cpus: 2 83 | memory: 4096 84 | extra_disks: 0 85 | worker: 86 | count: 5 87 | cpus: 2 88 | memory: 8192 89 | extra_disks: 0 90 | loadbalancer: 91 | count: 1 92 | cpus: 1 93 | memory: 512 94 | extra_disks: 0 95 | storage: 96 | count: 1 97 | cpus: 1 98 | memory: 512 99 | extra_disks: 0 100 | large_rook: 101 | nodes: 102 | master: 103 | count: 3 104 | cpus: 2 105 | memory: 4096 106 | extra_disks: 0 107 | worker: 108 | count: 5 109 | cpus: 2 110 | memory: 8192 111 | extra_disks: 5 112 | disk_size: 20 113 | loadbalancer: 114 | count: 1 115 | cpus: 1 116 | memory: 512 117 | extra_disks: 0 118 | storage: 119 | count: 1 120 | cpus: 1 121 | memory: 512 122 | extra_disks: 0 123 | large_rook_datahub: 124 | nodes: 125 | master: 126 | count: 3 127 | cpus: 4 128 | memory: 2048 129 | extra_disks: 0 130 | worker: 131 | count: 4 132 | cpus: 6 133 | memory: 16384 134 | extra_disks: 3 135 | disk_size: 20 136 | loadbalancer: 137 | count: 1 138 | cpus: 1 139 | memory: 512 140 | extra_disks: 0 141 | storage: 142 | count: 1 143 | cpus: 1 144 | memory: 512 145 | extra_disks: 0 146 | -------------------------------------------------------------------------------- /deploy/00.prep_environment.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | eval $(ssh-agent -s) 3 | ssh-add /vagrant/cluster/caasp4-id 4 | # XXX: nice error checking 5 | 6 | 7 | -------------------------------------------------------------------------------- /deploy/01.init_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$(hostname -s|grep -c master)" -ne 1 ]; then 3 | echo "This must be run on the master..." 4 | exit 1 5 | fi 6 | 7 | SKUBA_VERBOSITY=$(sed -n 's/^SKUBA_VERBOSITY=\([0-99]\).*/\1/p' /vagrant/caasp_env.conf|tail -1) 8 | SKUBA_VERBOSITY=${SKUBA_VERBOSITY:-1} 9 | 10 | cd /vagrant/cluster 11 | rm -fr caasp4-cluster 2>/dev/null 12 | echo "Initializing cluster..." 13 | set -x 14 | skuba -v ${SKUBA_VERBOSITY} cluster init --control-plane caasp4-lb-1 caasp4-cluster 15 | chmod g+rx caasp4-cluster 16 | set +x 17 | -------------------------------------------------------------------------------- /deploy/02.bootstrap_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd /vagrant/cluster/caasp4-cluster 3 | 4 | SKUBA_VERBOSITY=$(sed -n 's/^SKUBA_VERBOSITY=\([0-99]\).*/\1/p' /vagrant/caasp_env.conf|tail -1) 5 | SKUBA_VERBOSITY=${SKUBA_VERBOSITY:-1} 6 | 7 | echo "Bootstrapping cluster..." 8 | set -x 9 | skuba -v ${SKUBA_VERBOSITY} node bootstrap --user sles --sudo --target caasp4-master-1 caasp4-master-1 10 | 11 | skuba -v ${SKUBA_VERBOSITY} cluster status 12 | set +x 13 | mkdir ~/.kube 14 | ln -sf /vagrant/cluster/caasp4-cluster/admin.conf ~/.kube/config 15 | chmod g+r /vagrant/cluster/caasp4-cluster/admin.conf 16 | 17 | set -x 18 | kubectl get nodes -o wide 19 | set +x 20 | -------------------------------------------------------------------------------- /deploy/03.add_masters.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . /vagrant/caasp_env.conf 3 | 4 | cd /vagrant/cluster/caasp4-cluster 5 | 6 | SKUBA_VERBOSITY=$(sed -n 's/^SKUBA_VERBOSITY=\([0-99]\).*/\1/p' /vagrant/caasp_env.conf|tail -1) 7 | SKUBA_VERBOSITY=${SKUBA_VERBOSITY:-1} 8 | 9 | echo "Adding additional masters..." 10 | set -x 11 | for NUM in $(seq 2 $NMASTERS); do 12 | skuba -v ${SKUBA_VERBOSITY} node join --role master --user sles --sudo --target caasp4-master-${NUM} caasp4-master-${NUM} 13 | done 14 | skuba -v ${SKUBA_VERBOSITY} cluster status 15 | kubectl get nodes -o wide 16 | set +x 17 | -------------------------------------------------------------------------------- /deploy/04.add_workers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd /vagrant/cluster/caasp4-cluster 3 | source /vagrant/caasp_env.conf 4 | source /vagrant/utils.sh 5 | 6 | SKUBA_VERBOSITY=$(sed -n 's/^SKUBA_VERBOSITY=\([0-99]\).*/\1/p' /vagrant/caasp_env.conf|tail -1) 7 | SKUBA_VERBOSITY=${SKUBA_VERBOSITY:-1} 8 | 9 | echo "Adding workers..." 10 | set -x 11 | for NUM in $(seq 1 $NWORKERS); do 12 | skuba -v ${SKUBA_VERBOSITY} node join --role worker --user sles --sudo --target caasp4-worker-${NUM} caasp4-worker-${NUM} 13 | done 14 | set +x 15 | wait_for_masters_ready 16 | wait_for_workers_ready 17 | set -x 18 | skuba -v ${SKUBA_VERBOSITY} cluster status 19 | kubectl get nodes -o wide 20 | set +x 21 | -------------------------------------------------------------------------------- /deploy/05.setup_helm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Setting up helm..." 3 | kubectl create serviceaccount --namespace kube-system tiller 4 | kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller 5 | #helm init 6 | #kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' 7 | helm init --service-account=tiller --stable-repo-url https://charts.helm.sh/stable --wait 8 | helm repo add suse https://kubernetes-charts.suse.com 9 | -------------------------------------------------------------------------------- /deploy/06.add_k8s_nfs-sc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Adding NFS storage class..." 3 | helm install --name=nfs-client --set nfs.server=192.168.121.140 --set nfs.path=/nfs --set storageClass.defaultClass=true stable/nfs-client-provisioner 4 | -------------------------------------------------------------------------------- /deploy/07.add_dashboard.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Installing Kubernetes Dashboard..." 3 | #helm install stable/kubernetes-dashboard --namespace kube-system --name kubernetes-dashboard --set service.type=NodePort 4 | # dashboard chart and dashboard not ready for 1.16.2 5 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended.yaml 6 | kubectl patch svc kubernetes-dashboard --type='json' -p '[{"op":"replace","path":"/spec/type","value":"NodePort"}]' -n kubernetes-dashboard 7 | 8 | cat >/tmp/dashboard-admin.yaml </tmp/admin-user-crb.yaml </dev/null 36 | 37 | #helm status kubernetes-dashboard 38 | 39 | ####export NODE_PORT=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services kubernetes-dashboard -n kube-system) 40 | ####export NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n kube-system) 41 | #### 42 | 43 | #ST=$(kubectl -n kubernetes-dashboard get serviceaccounts kubernetes-dashboard -o jsonpath="{.secrets[0].name}") 44 | #SECRET=$(kubectl -n kubernetes-dashboard get secret ${ST} -o jsonpath="{.data.token}"|base64 -d) 45 | ST=$(kubectl -n kube-system get serviceaccounts admin-user -o jsonpath="{.secrets[0].name}") 46 | SECRET=$(kubectl -n kube-system get secret ${ST} -o jsonpath="{.data.token}"|base64 -d) 47 | export NODE_PORT=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services kubernetes-dashboard -n kubernetes-dashboard) 48 | export NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n kubernetes-dashboard) 49 | 50 | echo " token: $SECRET" >> ~/.kube/config 51 | echo "Access your dashboard at: https://$NODE_IP:$NODE_PORT/" 52 | echo "Your login token is: ${SECRET}" 53 | echo "Or use ~/.kube/config to authenticate with kubeconfig" 54 | 55 | -------------------------------------------------------------------------------- /deploy/08.add_metallb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | MLBCONFIG=/tmp/metallb.yaml 3 | 4 | echo "Setting up MetalLB..." 5 | 6 | kubectl create namespace metallb-system 7 | 8 | cat > ${MLBCONFIG} <> /etc/hosts 9 | echo "Modified /etc/hosts..." 10 | fi 11 | # Test for and integrate ca-cert if present 12 | if [[ -f "/vagrant/air-gap.d/registry-ca.crt" ]]; then 13 | cp /vagrant/air-gap.d/registry-ca.crt /etc/pki/trust/anchors/ 14 | update-ca-certificates 15 | fi 16 | # Restart cri-o with air-gap modifications in place 17 | systemctl restart crio 18 | 19 | -------------------------------------------------------------------------------- /deploy/20.nginx-ingress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source /vagrant/caasp_env.conf 3 | cat > /tmp/nginx-ingress-config-values.yaml << EOF 4 | # Enable the creation of pod security policy 5 | podSecurityPolicy: 6 | enabled: true 7 | 8 | # Create a specific service account 9 | serviceAccount: 10 | create: true 11 | name: nginx-ingress 12 | 13 | # Publish services on port HTTP/80 14 | # Publish services on port HTTPS/443 15 | controller: 16 | service: 17 | externalIPs: 18 | EOF 19 | for NUM in $(seq 0 $(($NWORKERS-1)) ); do 20 | printf " - 192.168.121.13${NUM}\n" >> /tmp/nginx-ingress-config-values.yaml 21 | done 22 | 23 | helm install --name nginx-ingress suse/nginx-ingress \ 24 | --values /tmp/nginx-ingress-config-values.yaml 25 | -------------------------------------------------------------------------------- /deploy/21.monitor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | CAASP_DOMAIN="$(sed -n 's/^\s*domain\s*= "\(.*\)".*$/\1/p' /vagrant/Vagrantfile)" 5 | printf "Creating monitoring namespace\n" 6 | kubectl create namespace monitoring 7 | 8 | # copy the storage secret from default namespace to monitoring namespace 9 | printf "Copy storage secret from default namespace to monitoring namespace\n" 10 | kubectl get secret -o json $(kubectl get secret | awk '{print $1}' | grep nfs-client-provisioner) | \ 11 | sed 's/"namespace": "default"/"namespace": "monitoring"/' | kubectl create -f - 12 | 13 | # We will be using self signed certificates for prometheus and grafana, 14 | # we need to create that (the same certificate will be used for all three URLs) 15 | printf "Createing self signed certificates for prometheus and grafana\n" 16 | cat > /tmp/openssl.conf << EOF 17 | [req] 18 | distinguished_name = req_distinguished_name 19 | req_extensions = v3_req 20 | default_md = sha256 21 | default_bits = 4096 22 | prompt=no 23 | 24 | [req_distinguished_name] 25 | C = US 26 | ST = Utah 27 | L = Provo 28 | O = SUSE Consulting 29 | OU = monitoring 30 | CN = ${CAASP_DOMAIN} 31 | emailAddress = admin@${CAASP_DOMAIN} 32 | 33 | [ v3_req ] 34 | basicConstraints = CA:FALSE 35 | keyUsage = keyEncipherment, dataEncipherment 36 | extendedKeyUsage = serverAuth 37 | subjectAltName = @alt_names 38 | 39 | [alt_names] 40 | DNS.1 = prometheus.${CAASP_DOMAIN} 41 | DNS.2 = prometheus-alertmanager.${CAASP_DOMAIN} 42 | DNS.3 = grafana.${CAASP_DOMAIN} 43 | EOF 44 | 45 | openssl req -x509 -nodes -days 365 -newkey rsa:4096 \ 46 | -keyout /tmp/monitoring.key -out /tmp/monitoring.crt \ 47 | -config /tmp/openssl.conf -extensions 'v3_req' 48 | 49 | # Add the certificate as a secret to kubernetes 50 | printf "Adding certificate as kubernetes secret\n" 51 | kubectl create -n monitoring secret tls monitoring-tls \ 52 | --key /tmp/monitoring.key \ 53 | --cert /tmp/monitoring.crt 54 | 55 | ##################################################### 56 | # Prometheus 57 | ###################################################### 58 | printf "Prometheus:\n" 59 | cat > /tmp/prometheus-config-values.yaml << EOF 60 | # Alertmanager configuration 61 | alertmanager: 62 | enabled: true 63 | ingress: 64 | enabled: true 65 | hosts: 66 | - prometheus-alertmanager.${CAASP_DOMAIN} 67 | annotations: 68 | kubernetes.io/ingress.class: nginx 69 | nginx.ingress.kubernetes.io/auth-type: basic 70 | nginx.ingress.kubernetes.io/auth-secret: prometheus-basic-auth 71 | nginx.ingress.kubernetes.io/auth-realm: "Authentication Required" 72 | tls: 73 | - hosts: 74 | - prometheus-alertmanager.${CAASP_DOMAIN} 75 | secretName: monitoring-tls 76 | persistentVolume: 77 | enabled: true 78 | ## Use a StorageClass 79 | storageClass: nfs-client 80 | ## Create a PersistentVolumeClaim of 2Gi 81 | size: 2Gi 82 | ## Use an existing PersistentVolumeClaim (my-pvc) 83 | #existingClaim: prometheus-alert 84 | 85 | alertmanagerFiles: 86 | alertmanager.yml: 87 | global: 88 | # The smarthost and SMTP sender used for mail notifications. 89 | smtp_from: alertmanager@${CAASP_DOMAIN} 90 | smtp_smarthost: smtp.${CAASP_DOMAIN}:587 91 | smtp_auth_username: admin@${CAASP_DOMAIN} 92 | smtp_auth_password: 93 | smtp_require_tls: true 94 | 95 | route: 96 | # The labels by which incoming alerts are grouped together. 97 | group_by: ['node'] 98 | 99 | # When a new group of alerts is created by an incoming alert, wait at 100 | # least 'group_wait' to send the initial notification. 101 | # This way ensures that you get multiple alerts for the same group that start 102 | # firing shortly after another are batched together on the first 103 | # notification. 104 | group_wait: 30s 105 | 106 | # When the first notification was sent, wait 'group_interval' to send a batch 107 | # of new alerts that started firing for that group. 108 | group_interval: 5m 109 | 110 | # If an alert has successfully been sent, wait 'repeat_interval' to 111 | # resend them. 112 | repeat_interval: 3h 113 | 114 | # A default receiver 115 | receiver: admin-example 116 | 117 | receivers: 118 | - name: 'admin-example' 119 | email_configs: 120 | - to: 'admin@${CAASP_DOMAIN}' 121 | 122 | # Create a specific service account 123 | serviceAccounts: 124 | nodeExporter: 125 | name: prometheus-node-exporter 126 | 127 | # Allow scheduling of node-exporter on master nodes 128 | nodeExporter: 129 | hostNetwork: false 130 | hostPID: false 131 | podSecurityPolicy: 132 | enabled: true 133 | annotations: 134 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default' 135 | apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' 136 | seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' 137 | apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' 138 | tolerations: 139 | - key: node-role.kubernetes.io/master 140 | operator: Exists 141 | effect: NoSchedule 142 | 143 | # Disable Pushgateway 144 | pushgateway: 145 | enabled: false 146 | 147 | # Prometheus configuration 148 | server: 149 | ingress: 150 | enabled: true 151 | hosts: 152 | - prometheus.${CAASP_DOMAIN} 153 | annotations: 154 | kubernetes.io/ingress.class: nginx 155 | nginx.ingress.kubernetes.io/auth-type: basic 156 | nginx.ingress.kubernetes.io/auth-secret: prometheus-basic-auth 157 | nginx.ingress.kubernetes.io/auth-realm: "Authentication Required" 158 | tls: 159 | - hosts: 160 | - prometheus.${CAASP_DOMAIN} 161 | secretName: monitoring-tls 162 | persistentVolume: 163 | enabled: true 164 | ## Use a StorageClass 165 | storageClass: nfs-client 166 | ## Create a PersistentVolumeClaim of 8Gi 167 | size: 8Gi 168 | ## Use an existing PersistentVolumeClaim (my-pvc) 169 | #existingClaim: prometheus 170 | serverFiles: 171 | alerts: {} 172 | rules: 173 | groups: 174 | - name: caasp.node.rules 175 | rules: 176 | - alert: NodeIsNotReady 177 | expr: kube_node_status_condition{condition="Ready",status="false"} == 1 178 | for: 1m 179 | labels: 180 | severity: critical 181 | annotations: 182 | description: '{{ \$labels.node }} is not ready' 183 | - alert: NodeIsOutOfDisk 184 | expr: kube_node_status_condition{condition="OutOfDisk",status="true"} == 1 185 | labels: 186 | severity: critical 187 | annotations: 188 | description: '{{ \$labels.node }} has insufficient free disk space' 189 | - alert: NodeHasDiskPressure 190 | expr: kube_node_status_condition{condition="DiskPressure",status="true"} == 1 191 | labels: 192 | severity: warning 193 | annotations: 194 | description: '{{ \$labels.node }} has insufficient available disk space' 195 | - alert: NodeHasInsufficientMemory 196 | expr: kube_node_status_condition{condition="MemoryPressure",status="true"} == 1 197 | labels: 198 | severity: warning 199 | annotations: 200 | description: '{{ \$labels.node }} has insufficient available memory' 201 | EOF 202 | # We will be using basic authentication for Prometheus 203 | # User: admin 204 | # Password: linux 205 | printf " Adding basic authentication for Prometheus as kubernetes secret\n" 206 | #it is important that the file name is 'auth', otherwise the ingress controller will return a 503 207 | echo 'admin:$apr1$lCPTFdzB$Iubp1DzRYBDFjpJK72FOA0' > /tmp/auth 208 | kubectl create secret generic -n monitoring prometheus-basic-auth --from-file=/tmp/auth 209 | printf " Installing Prometheus\n" 210 | helm install --name prometheus suse/prometheus \ 211 | --namespace monitoring \ 212 | --values /tmp/prometheus-config-values.yaml 213 | 214 | ##################################################### 215 | # Grafana 216 | ###################################################### 217 | printf "Grafana\n" 218 | cat > /tmp/grafana-config-values.yaml << EOF 219 | # Configure admin password 220 | adminPassword: linux 221 | 222 | # Ingress configuration 223 | ingress: 224 | enabled: true 225 | annotations: 226 | kubernetes.io/ingress.class: nginx 227 | hosts: 228 | - grafana.${CAASP_DOMAIN} 229 | tls: 230 | - hosts: 231 | - grafana.${CAASP_DOMAIN} 232 | secretName: monitoring-tls 233 | 234 | # Configure persistent storage 235 | persistence: 236 | enabled: true 237 | accessModes: 238 | - ReadWriteOnce 239 | ## Use a StorageClass 240 | storageClassName: nfs-client 241 | ## Create a PersistentVolumeClaim of 10Gi 242 | size: 10Gi 243 | ## Use an existing PersistentVolumeClaim (my-pvc) 244 | #existingClaim: grafana 245 | 246 | # Enable sidecar for provisioning 247 | sidecar: 248 | datasources: 249 | enabled: true 250 | label: grafana_datasource 251 | dashboards: 252 | enabled: true 253 | label: grafana_dashboard 254 | EOF 255 | 256 | # first of we create the datasource to be used for grafana 257 | kubectl create -f /vagrant/deploy/grafana-datasources.yaml 258 | # deploy the Grafana 259 | helm install --name grafana suse/grafana \ 260 | --namespace monitoring \ 261 | --values /tmp/grafana-config-values.yaml 262 | # and a grafana dashboard as a ConfigMap 263 | kubectl apply -f /vagrant/deploy/grafana-dashboards-caasp-cluster.yaml 264 | 265 | ###################################################### 266 | # # 267 | # Finished, display information # 268 | # # 269 | ###################################################### 270 | clear 271 | kubectl get pods --namespace monitoring 272 | printf "\n You need to add the following to your /etc/hosts file:\n" 273 | cat << EOF 274 | #vagrant-caasp4 275 | 192.168.121.111 grafana.${CAASP_DOMAIN} prometheus.${CAASP_DOMAIN} prometheus-alert.${CAASP_DOMAIN} 276 | 277 | 278 | Then point your browser to the web interfaces 279 | 280 | Grafana: 281 | url: https://grafana.${CAASP_DOMAIN} 282 | user: admin 283 | pass: $(kubectl get secret --namespace monitoring grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo) 284 | 285 | Prometheus: 286 | url: https://prometheus.${CAASP_DOMAIN} 287 | user: admin 288 | pass: linux 289 | 290 | AlertManager: 291 | url: https://prometheus-alertmanager.${CAASP_DOMAIN} 292 | user: admin 293 | pass: linux 294 | 295 | Happy CaaSPing! 296 | EOF 297 | -------------------------------------------------------------------------------- /deploy/22.add_stratos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Adding Stratos Console..." 3 | cat > /tmp/stratos-values.yaml << EOF 4 | # Tag for images - do not edit 5 | consoleVersion: 2.6.1-9e13f7b0c-cap 6 | dockerRegistrySecret: regsecret 7 | # Specify default DB password 8 | dbPassword: changeme 9 | dbProvider: mysql 10 | # Provide Proxy settings if required 11 | #httpProxy: proxy.corp.net 12 | #httpsProxy: proxy.corp.net 13 | #noProxy: localhost 14 | #ftpProxy: proxy.corp.net 15 | #socksProxy: sock-proxy.corp.net 16 | imagePullPolicy: IfNotPresent 17 | # useLb is deprecated - use console.service.type 18 | useLb: false 19 | console: 20 | cookieDomain: 21 | # externalIP deprecated - use console.service.externalIPs 22 | # externalIP: 127.0.0.1 23 | backendLogLevel: info 24 | ssoLogin: false 25 | ssoOptions: 26 | # Session Store Secret 27 | sessionStoreSecret: 28 | # Stratos Services 29 | service: 30 | annotations: [] 31 | externalIPs: [] 32 | loadBalancerIP: 33 | loadBalancerSourceRanges: [] 34 | servicePort: 443 35 | # nodePort: 30000 36 | type: ClusterIP 37 | externalName: 38 | ingress: 39 | ## If true, Ingress will be created 40 | enabled: true 41 | 42 | ## Additional annotations 43 | annotations: {} 44 | 45 | ## Additional labels 46 | extraLabels: {} 47 | 48 | ## Host for the ingress 49 | # Defaults to console.[env.Domain] if env.Domain is set and host is not 50 | host: console.suselab.com 51 | 52 | # Name of secret containing TLS certificate 53 | secretName: 54 | 55 | # crt and key for TLS Certificate (this chart will create the secret based on these) 56 | tls: 57 | crt: 58 | key: 59 | 60 | http: 61 | enabled: true 62 | servicePort: 80 63 | # nodePort: 30001 64 | 65 | # Name of config map that provides the template files for user invitation emails 66 | templatesConfigMapName: 67 | 68 | # Email subject of the user invitation message 69 | userInviteSubject: ~ 70 | 71 | # Whether to perform the volume migration job on install/upgrade (migrate to secrets) 72 | migrateVolumes: true 73 | 74 | # Enable/disable Tech Preview 75 | techPreview: true 76 | 77 | # Use local admin user instead of UAA - set to a password to enable 78 | localAdminPassword: stratos 79 | 80 | images: 81 | console: stratos-console 82 | proxy: stratos-jetstream 83 | postflight: stratos-postflight-job 84 | mariadb: stratos-mariadb 85 | 86 | # Specify which storage class should be used for PVCs 87 | storageClass: nfs-client 88 | #consoleCert: | 89 | # -----BEGIN CERTIFICATE----- 90 | # MIIDXTCCAkWgAwIBAgIJAJooOiQWl1v1MA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV 91 | # ... 92 | # -----END CERTIFICATE----- 93 | #consoleCertKey: | 94 | # -----BEGIN PRIVATE KEY----- 95 | # MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkdgEAAoIBAQDV9+ySh0xZzM41 96 | # ... 97 | # -----END PRIVATE KEYE----- 98 | # MariaDB chart configuration 99 | mariadb: 100 | # Only required for creating the databases 101 | mariadbRootPassword: changeme 102 | adminUser: root 103 | # Credentials for user 104 | mariadbUser: console 105 | mariadbPassword: changeme 106 | mariadbDatabase: console 107 | usePassword: true 108 | resources: 109 | requests: 110 | memory: 256Mi 111 | cpu: 250m 112 | persistence: 113 | enabled: true 114 | accessMode: ReadWriteOnce 115 | size: 1Gi 116 | storageClass: nfs-client 117 | uaa: 118 | protocol: https:// 119 | port: 120 | host: 121 | consoleClient: 122 | consoleClientSecret: 123 | consoleAdminIdentifier: 124 | skipSSLValidation: false 125 | # SCF values compatability 126 | env: 127 | DOMAIN: 128 | UAA_HOST: 129 | UAA_PORT: 2793 130 | # UAA Zone (namespace cf ias deployed to when deployed to Kubernetes) 131 | UAA_ZONE: scf 132 | 133 | # SMTP Settings for Email Sending (User Invites) 134 | # If true, authenticate against the SMTP server using AUTH command. 135 | SMTP_AUTH: "false" 136 | 137 | # SMTP from address 138 | SMTP_FROM_ADDRESS: ~ 139 | 140 | # SMTP server username 141 | SMTP_USER: ~ 142 | 143 | # SMTP server password 144 | SMTP_PASSWORD: ~ 145 | 146 | # SMTP server host address 147 | SMTP_HOST: ~ 148 | 149 | # SMTP server port 150 | SMTP_PORT: "25" 151 | 152 | kube: 153 | # Whether RBAC is enabled in the Kubernetes cluster 154 | auth: "rbac" 155 | external_console_https_port: 8443 156 | storage_class: 157 | persistent: 158 | organization: cap 159 | registry: 160 | hostname: registry.suse.com 161 | username: 162 | password: 163 | email: default 164 | services: 165 | loadbalanced: false 166 | metrics: 167 | enabled: false 168 | EOF 169 | helm install suse/console --name stratos-console --namespace stratos --values /tmp/stratos-values.yaml 170 | rm /tmp/stratos-values.yaml 171 | -------------------------------------------------------------------------------- /deploy/98.status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | kubectl get po -A 3 | kubectl get no 4 | cd /vagrant/cluster/caasp4-cluster 5 | skuba cluster status 6 | -------------------------------------------------------------------------------- /deploy/99.run-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | eval $(ssh-agent -s) 3 | ssh-add /vagrant/cluster/caasp4-id 4 | . /vagrant/caasp_env.conf 5 | cd /vagrant/deploy 6 | 7 | ./01.init_cluster.sh 8 | ./02.bootstrap_cluster.sh 9 | ./03.add_masters.sh 10 | ./04.add_workers.sh 11 | ./05.setup_helm.sh 12 | printf "Waiting for tiller to become available. This can take a couple of minutes." 13 | while [[ $(kubectl --namespace kube-system get pods | egrep -c "tiller-deploy-.* 1/1 Running") -eq 0 ]] 14 | do 15 | printf "." 16 | sleep 5 17 | done 18 | printf "\n" 19 | ./06.add_k8s_nfs-sc.sh 20 | ./07.add_dashboard.sh 21 | ./08.add_metallb.sh 22 | if [[ "${MODEL}" =~ "_rook" ]]; then 23 | echo "Setting up rook..." 24 | /vagrant/rook/rook_setup.sh 25 | fi 26 | ./98.status.sh 27 | ST=$(kubectl -n kube-system get serviceaccounts admin-user -o jsonpath="{.secrets[0].name}") 28 | SECRET=$(kubectl -n kube-system get secret ${ST} -o jsonpath="{.data.token}"|base64 -d) 29 | #NODE_PORT=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services kubernetes-dashboard -n kube-system) 30 | #NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n kube-system) 31 | export NODE_PORT=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services kubernetes-dashboard -n kubernetes-dashboard) 32 | export NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n kubernetes-dashboard) 33 | 34 | echo "Access your dashboard at: https://$NODE_IP:$NODE_PORT/" 35 | echo "Your login token is: ${SECRET}" 36 | echo 37 | echo "Happy CaaSPing!" 38 | echo 39 | -------------------------------------------------------------------------------- /deploy/grafana-datasources.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ConfigMap 3 | apiVersion: v1 4 | metadata: 5 | name: grafana-datasources 6 | namespace: monitoring 7 | labels: 8 | grafana_datasource: "1" 9 | data: 10 | datasource.yaml: |- 11 | apiVersion: 1 12 | deleteDatasources: 13 | - name: Prometheus 14 | orgId: 1 15 | datasources: 16 | - name: Prometheus 17 | type: prometheus 18 | url: http://prometheus-server.monitoring.svc.cluster.local:80 19 | access: proxy 20 | orgId: 1 21 | isDefault: true 22 | -------------------------------------------------------------------------------- /deploy_caasp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | function printHelp { 5 | cat << EOF 6 | Usage ${0##*/} [options..] 7 | -m, --model Which config.yml model to use for vm sizing 8 | Default: "minimal" 9 | -f, --full attempt to bring the machines up and deploy the cluster 10 | -a, --air-gapped Setup CaaSP nodes with substitute registries (for deployment and/or private image access) 11 | -i, --ignore-memory Don't prompt when over allocating memory 12 | -t, --test Do a dry run, don't actually deploy the vms 13 | -v, --verbose [uint8] Verbosity level to pass to skuba -v (default is 1) 14 | -h,-?, --help Show help 15 | EOF 16 | } 17 | 18 | 19 | # Get all models in the file 20 | VALID_MODELS=`egrep '^[a-zA-Z]' config.yml |tr ':' ' '|tr -d '\r\n' ` 21 | 22 | function validate_model { 23 | local result=0 24 | if [[ " $VALID_MODELS " =~ .*\ $1\ .* ]]; then 25 | result=1 26 | fi 27 | echo "$result" 28 | } 29 | 30 | function invalid_model { 31 | echo "Invalid model option, must be one of '$VALID_MODELS'." 32 | echo "Update config.yml if needed." 33 | exit 1 34 | } 35 | 36 | CAASP_CONFIG_MODEL="minimal" 37 | DO_MEMORY_CHECK=true 38 | FULL_DEPLOYMENT=false 39 | AIR_GAPPED=false 40 | DO_DRY_RUN=false 41 | PARAMS="" 42 | while (( "$#" )); do 43 | case "$1" in 44 | -h|-\?|--help) 45 | printHelp 46 | exit 47 | ;; 48 | -m|--model) 49 | CAASP_CONFIG_MODEL=$2 50 | shift 2 51 | ;; 52 | -f|--full) 53 | FULL_DEPLOYMENT=true 54 | shift 55 | ;; 56 | -a|--air-gapped) 57 | AIR_GAPPED=true 58 | shift 59 | ;; 60 | -i|--ignore-memory) 61 | DO_MEMORY_CHECK=false 62 | shift 63 | ;; 64 | -t|--test) 65 | DO_DRY_RUN=true 66 | shift 67 | ;; 68 | -v|--verbose) 69 | SKUBA_VERBOSITY=$2 70 | shift 2 71 | ;; 72 | --) # end argument parsing 73 | shift 74 | break 75 | ;; 76 | -*|--*=) # unsupported flags 77 | echo "Error: Unsupported flag $1" >&2 78 | exit 1 79 | ;; 80 | *) # preserve positional arguments 81 | PARAMS="$PARAMS $1" 82 | shift 83 | ;; 84 | esac 85 | done 86 | 87 | # set positional arguments in their proper place 88 | eval set -- "$PARAMS" 89 | 90 | res=$(validate_model $CAASP_CONFIG_MODEL) 91 | if [ "$res" -eq "0" ]; then 92 | invalid_model 93 | fi 94 | 95 | # This is so Vagrantfile can read the 96 | # selected model 97 | export CAASP_CONFIG_MODEL 98 | 99 | # read in the config.yml and write out the caasp_env.conf 100 | source lib.sh 101 | 102 | # Collect System Requirements 103 | master_ram="${CAASP_CONFIG_MODEL}_nodes_master_memory" 104 | master_cpus="${CAASP_CONFIG_MODEL}_nodes_master_cpus" 105 | MASTERMEM=${!master_ram} 106 | worker_ram="${CAASP_CONFIG_MODEL}_nodes_worker_memory" 107 | worker_cpus="${CAASP_CONFIG_MODEL}_nodes_worker_cpus" 108 | WORKERMEM=${!worker_ram} 109 | lb_ram="${CAASP_CONFIG_MODEL}_nodes_loadbalancer_memory" 110 | lb_cpus="${CAASP_CONFIG_MODEL}_nodes_loadbalancer_cpus" 111 | LBMEM=${!lb_ram} 112 | storage_ram="${CAASP_CONFIG_MODEL}_nodes_storage_memory" 113 | storage_cpus="${CAASP_CONFIG_MODEL}_nodes_storage_cpus" 114 | STORAGEMEM=${!storage_ram} 115 | MEMNEEDED="$(($MASTERMEM * $NMASTERS + $WORKERMEM * $NWORKERS + $LBMEM * $NLOADBAL + $STORAGEMEM * $NSTORAGE))" 116 | MEMHOST="$(free -m | awk 'NR==2{print $7}')" 117 | 118 | if [[ $DO_MEMORY_CHECK == true ]]; then 119 | # Check memory configuration with host 120 | if [[ "$MEMNEEDED" -gt "$MEMHOST" ]]; then 121 | read -r -p "The configuration needs ${MEMNEEDED}MB but the host only has ${MEMHOST}MB available, do you want to continue [y/N] " response 122 | response=${response,,} 123 | if [[ ! "$response" =~ ^(yes|y)$ ]]; then 124 | exit 1 125 | fi 126 | fi 127 | fi 128 | 129 | echo "Deploy CAASP with the CAASP_CONFIG_MODEL=$CAASP_CONFIG_MODEL" 130 | echo " Masters=$NMASTERS CPUS=${!master_cpus} MEM=$MASTERMEM" 131 | echo " Workers=$NWORKERS CPUS=${!worker_cpus} MEM=$WORKERMEM" 132 | echo " Load Balancers=$NLOADBAL CPUS=${!lb_cpus} MEM=$LBMEM" 133 | echo " Storage Nodes=$NSTORAGE CPUS=${!storage_cpus} MEM=$STORAGEMEM" 134 | 135 | total_cpus="$((${!master_cpus}*$NMASTERS + ${!worker_cpus}*$NWORKERS + ${!lb_cpus}*$NLOADBAL + ${!storage_cpus}*$NSTORAGE))" 136 | total_mem="$(($MASTERMEM*$NMASTERS + $WORKERMEM*$NWORKERS + $LBMEM*$NLOADBAL + $STORAGEMEM*$NSTORAGE))" 137 | 138 | echo "TOTALS CPU=$total_cpus MEM=$total_mem" 139 | echo "" 140 | 141 | if [[ $AIR_GAPPED == true ]]; then 142 | # Check for required air-gapped config files 143 | FILE=./air-gap.d/air-gapped-registries.conf 144 | if [[ -f "$FILE" ]]; then 145 | echo "Custom Air-gapped Registries configuration found!!" 146 | echo "Configuring nodes for air-gap after VMs are up." 147 | else 148 | echo "Air-gap command-line option specified but missing required configuration file(s)." 149 | echo "See ./air-gap.d/README.md for information." 150 | echo "Exiting" 151 | exit 1 152 | fi 153 | else 154 | echo "Default registry location : ensure access to registry.suse.com for installation images" 155 | fi 156 | 157 | if [ "$FULL_DEPLOYMENT" == true ]; then 158 | echo "Do full deployment after VMs are up." 159 | else 160 | echo "Not running deployment scripts after VMs are up." 161 | fi 162 | 163 | if [[ $DO_DRY_RUN == true ]]; then 164 | echo "Dry run complete" 165 | exit 166 | fi 167 | 168 | # 169 | # Now do the work of standing up the vms 170 | # 171 | 172 | echo "Deploying $NMASTERS masters" 173 | for m in $(seq ${NMASTERS}) 174 | do 175 | vagrant up caasp4-master-${m} 176 | done 177 | 178 | echo "Deploying $NWORKERS workers" 179 | for w in $(seq ${NWORKERS}) 180 | do 181 | vagrant up caasp4-worker-${w} 182 | done 183 | 184 | echo "Deploying $NLOADBAL load balancers" 185 | for l in $(seq ${NLOADBAL}) 186 | do 187 | vagrant up caasp4-lb-${l} 188 | done 189 | 190 | echo "Deploying $NSTORAGE storage nodes" 191 | for s in $(seq ${NSTORAGE}) 192 | do 193 | vagrant up caasp4-storage-${s} 194 | done 195 | 196 | if [[ $AIR_GAPPED == true ]]; then 197 | echo "Preparing Air-Gapped Setup..." 198 | echo "Modifying Masters..." 199 | for m in $(seq ${NMASTERS}) 200 | do 201 | vagrant ssh caasp4-master-${m} -c 'sudo /vagrant/deploy/100.prep_airgap.sh' 202 | done 203 | echo "Modifying Workers..." 204 | for w in $(seq ${NWORKERS}) 205 | do 206 | vagrant ssh caasp4-worker-${w} -c 'sudo /vagrant/deploy/100.prep_airgap.sh' 207 | done 208 | echo "Finished Air-Gapped Setup." 209 | fi 210 | 211 | if [[ $FULL_DEPLOYMENT == true ]]; then 212 | vagrant ssh caasp4-master-1 -c 'sudo su - sles -c /vagrant/deploy/99.run-all.sh' 213 | fi 214 | 215 | echo "Happy CaaSPing!" 216 | echo "vagrant ssh caasp4-master-1" 217 | echo "sudo su - sles" 218 | echo "See scripts in the /vagrant/deploy directory for deployment guide steps" 219 | echo "...or run $0 --full to have your cluster auto-deployed" 220 | -------------------------------------------------------------------------------- /destroy_caasp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . caasp_env.conf 3 | export CAASP_CONFIG_MODEL=${MODEL} 4 | vagrant destroy -f 5 | # cleanup some files... 6 | sudo rm -fr ./cluster 2>/dev/null 7 | -------------------------------------------------------------------------------- /k8s_tests/test_nfs_pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: test-nfs-sc-pvc 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: nfs-client 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | 13 | -------------------------------------------------------------------------------- /lib.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This reads in the config.yml file and 4 | # Parses it into env vars 5 | # minimal_nodes_master_memory 6 | # minimal_nodes_master_cpus 7 | # small_nodes_master_memory 8 | # etc. 9 | function parse_yaml { 10 | local prefix=$2 11 | local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') 12 | sed -ne "s|^\($s\):|\1|" \ 13 | -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ 14 | -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | 15 | awk -F$fs '{ 16 | indent = length($1)/2; 17 | vname[indent] = $2; 18 | for (i in vname) {if (i > indent) {delete vname[i]}} 19 | if (length($3) > 0) { 20 | vn=""; for (i=0; i caasp_env.conf << EOF 46 | NMASTERS=$NMASTERS 47 | NWORKERS=$NWORKERS 48 | NLOADBAL=$NLOADBAL 49 | NSTORAGE=$NSTORAGE 50 | 51 | # The config model chosen at deploy_caasp.sh time 52 | MODEL=$CAASP_CONFIG_MODEL 53 | SKUBA_VERBOSITY=${SKUBA_VERBOSITY} 54 | EOF 55 | -------------------------------------------------------------------------------- /libvirt_setup/add_hosts_to_net.sh: -------------------------------------------------------------------------------- 1 | NET="vagrant-libvirt" 2 | export LIBVIRT_DEFAULT_URI=qemu:///system 3 | virsh net-start ${NET} 4 | # API load balancers 5 | virsh net-update ${NET} add-last ip-dhcp-host \ 6 | '' \ 7 | --live --config --parent-index 0 8 | virsh net-update ${NET} add-last ip-dhcp-host \ 9 | '' \ 10 | --live --config --parent-index 0 11 | 12 | # masters 13 | virsh net-update ${NET} add-last ip-dhcp-host \ 14 | '' \ 15 | --live --config --parent-index 0 16 | virsh net-update ${NET} add-last ip-dhcp-host \ 17 | '' \ 18 | --live --config --parent-index 0 19 | virsh net-update ${NET} add-last ip-dhcp-host \ 20 | '' \ 21 | --live --config --parent-index 0 22 | 23 | # workers 24 | virsh net-update ${NET} add-last ip-dhcp-host \ 25 | '' \ 26 | --live --config --parent-index 0 27 | virsh net-update ${NET} add-last ip-dhcp-host \ 28 | '' \ 29 | --live --config --parent-index 0 30 | virsh net-update ${NET} add-last ip-dhcp-host \ 31 | '' \ 32 | --live --config --parent-index 0 33 | virsh net-update ${NET} add-last ip-dhcp-host \ 34 | '' \ 35 | --live --config --parent-index 0 36 | virsh net-update ${NET} add-last ip-dhcp-host \ 37 | '' \ 38 | --live --config --parent-index 0 39 | 40 | # NFS 41 | virsh net-update ${NET} add-last ip-dhcp-host \ 42 | '' \ 43 | --live --config --parent-index 0 44 | 45 | # API load balancers 46 | virsh net-update ${NET} add dns-host 'caasp4-lb-1.suselab.comcaasp4-lb-1' --live --config 47 | virsh net-update ${NET} add dns-host 'caasp4-lb-2.suselab.comcaasp4-lb-2' --live --config 48 | # masters 49 | virsh net-update ${NET} add dns-host 'caasp4-master-1.suselab.comcaasp4-master-1' --live --config 50 | virsh net-update ${NET} add dns-host 'caasp4-master-2.suselab.comcaasp4-master-2' --live --config 51 | virsh net-update ${NET} add dns-host 'caasp4-master-3.suselab.comcaasp4-master-3' --live --config 52 | # workers 53 | virsh net-update ${NET} add dns-host 'caasp4-worker-1.suselab.comcaasp4-worker-1' --live --config 54 | virsh net-update ${NET} add dns-host 'caasp4-worker-2.suselab.comcaasp4-worker-2' --live --config 55 | virsh net-update ${NET} add dns-host 'caasp4-worker-3.suselab.comcaasp4-worker-3' --live --config 56 | virsh net-update ${NET} add dns-host 'caasp4-worker-4.suselab.comcaasp4-worker-4' --live --config 57 | virsh net-update ${NET} add dns-host 'caasp4-worker-5.suselab.comcaasp4-worker-5' --live --config 58 | # NFS 59 | virsh net-update ${NET} add dns-host 'caasp4-storage-1.suselab.comcaasp4-storage-1' --live --config 60 | -------------------------------------------------------------------------------- /libvirt_setup/delete_box.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | vagrant destroy -f 3 | vagrant box remove vagrant-caasp 4 | sudo rm /var/lib/libvirt/images/vagrant-caasp_vagrant_box_image_0.img 5 | virsh pool-refresh default 6 | -------------------------------------------------------------------------------- /libvirt_setup/openSUSE_vagrant_setup.sh: -------------------------------------------------------------------------------- 1 | # Taken from: github.com/openSUSE/vagrant-ceph 2 | 3 | set -ex 4 | 5 | zypper in -y --allow-unsigned-rpm https://releases.hashicorp.com/vagrant/2.2.5/vagrant_2.2.5_x86_64.rpm 6 | 7 | # workaround for https://github.com/hashicorp/vagrant/issues/10019 8 | mv /opt/vagrant/embedded/lib/libreadline.so.7{,.disabled} | true 9 | 10 | zypper in -y ruby-devel 11 | zypper in -y gcc gcc-c++ make 12 | zypper in -y qemu-kvm libvirt-daemon-qemu libvirt libvirt-devel 13 | 14 | #need for vagrant-libvirt 15 | gem install ffi 16 | gem install unf_ext 17 | gem install ruby-libvirt 18 | 19 | systemctl enable libvirtd 20 | systemctl start libvirtd 21 | 22 | vagrant plugin install vagrant-libvirt 23 | 24 | -------------------------------------------------------------------------------- /libvirt_setup/update_firewall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | BRIDGE=$(virsh net-info vagrant-libvirt|sed -n 's/^Bridge:\s*\(.*\).*/\1/p') 3 | if [ ! -z "${BRIDGE}" ]; then 4 | echo "Found vagrant-libvirt bridge device: ${BRIDGE}" 5 | else 6 | echo "Unable to determine bridge... Automation failed you." 7 | exit 1 8 | fi 9 | 10 | FWSTATE="unknown" 11 | if [ -x /usr/bin/firewall-cmd ]; then 12 | FWSTATE=$(firewall-cmd --state) 13 | else 14 | exit 0 15 | fi 16 | 17 | ZONE="vagrant-caasp" 18 | FWCMDRC=$(firewall-cmd --info-zone ${ZONE} >/dev/null 2>&1;echo $?) 19 | if [ "${FWCMDRC}" -eq 0 ]; then 20 | echo "Firewall zone ${ZONE} already configured" 21 | exit 0 22 | fi 23 | 24 | if [ "${FWSTATE}" == "running" ]; then 25 | echo "Updating firewall configuration for ${BRIDGE}" 26 | set -x 27 | firewall-cmd --permanent --new-zone=${ZONE} 28 | firewall-cmd --zone=${ZONE} --permanent --add-interface=${BRIDGE} 29 | firewall-cmd --zone=${ZONE} --permanent --set-target=ACCEPT 30 | firewall-cmd --reload 31 | set +x 32 | exit 0 33 | else 34 | echo "Firewall not running?" 35 | exit 0 36 | fi 37 | -------------------------------------------------------------------------------- /libvirt_setup/vagrant-libvirt.xml: -------------------------------------------------------------------------------- 1 | 2 | vagrant-libvirt 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | caasp4-lb-1.suselab.com 13 | caasp4-lb-1 14 | 15 | 16 | caasp4-lb-2.suselab.com 17 | caasp4-lb-2 18 | 19 | 20 | caasp4-master-1.suselab.com 21 | caasp4-master-1 22 | 23 | 24 | caasp4-master-2.suselab.com 25 | caasp4-master-2 26 | 27 | 28 | caasp4-master-3.suselab.com 29 | caasp4-master-3 30 | 31 | 32 | caasp4-worker-1.suselab.com 33 | caasp4-worker-1 34 | 35 | 36 | caasp4-worker-2.suselab.com 37 | caasp4-worker-2 38 | 39 | 40 | caasp4-worker-3.suselab.com 41 | caasp4-worker-3 42 | 43 | 44 | caasp4-worker-4.suselab.com 45 | caasp4-worker-4 46 | 47 | 48 | caasp4-worker-5.suselab.com 49 | caasp4-worker-5 50 | 51 | 52 | caasp4-storage-1.suselab.com 53 | caasp4-storage-1 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | -------------------------------------------------------------------------------- /openstack/000.zypper_repos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # sudo this file 4 | 5 | echo "alias ll='ls -al'" >> ~sles/.bashrc 6 | 7 | zypper ar http://download.suse.de/ibs/SUSE:/SLE-15-SP1:/GA/standard SLE15-SP1-GA 8 | zypper ar http://download.suse.de/ibs/SUSE:/SLE-15-SP1:/Update/standard/ SLE15-SP1-Update 9 | 10 | zypper ar http://download.suse.de/install/SLP/SLE-15-SP1-Module-Development-Tools-GM/x86_64/DVD1/ SLE15-DEV-DVD1 11 | zypper ar http://download.suse.de/install/SLP/SLE-15-SP1-Module-Development-Tools-GM/x86_64/DVD2/ SLE15-DEV-DVD2 12 | zypper ar http://download.suse.de/install/SLP/SLE-15-SP1-Module-Development-Tools-GM/x86_64/DVD3/ SLE15-DEV-DVD3 13 | 14 | zypper ar http://download.suse.de/install/SLP/SLE-15-SP1-Module-Basesystem-GM/x86_64/DVD1/ SLE15-SP1-BASE1 15 | zypper ar http://download.suse.de/install/SLP/SLE-15-SP1-Module-Basesystem-GM/x86_64/DVD2/ SLE15-SP1-BASE2 16 | zypper ar http://download.suse.de/install/SLP/SLE-15-SP1-Module-Basesystem-GM/x86_64/DVD3/ SLE15-SP1-BASE3 17 | zypper refresh 18 | -------------------------------------------------------------------------------- /openstack/001.install_packages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo zypper in -y wget make python3-pip jq bc 4 | 5 | sudo pip install -U pip 6 | sudo pip install "cmd2<=0.8.7" 7 | sudo pip install python-openstackclient python-heatclient --ignore-installed 8 | -------------------------------------------------------------------------------- /openstack/002.setup_hosts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # this script adds the required entries to the /etc/hosts file 4 | 5 | 6 | tee -a /etc/hosts </etc/systemd/system/helm-serve.service < 43 | 44 | To login cat the ~/.kube/config file and use the token at the bottom of the 45 | file as the auth token. 46 | -------------------------------------------------------------------------------- /openstack/discover_dashboard_port.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Kube Dashboard port = " 3 | kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services kubernetes-dashboard -n kube-system 4 | echo "" 5 | -------------------------------------------------------------------------------- /openstack/patch-030-ingress.patch: -------------------------------------------------------------------------------- 1 | diff --git a/tools/deployment/developer/common/030-ingress.sh b/tools/deployment/developer/common/030-ingress.sh 2 | index a92f94d5..59c61af8 100755 3 | --- a/tools/deployment/developer/common/030-ingress.sh 4 | +++ b/tools/deployment/developer/common/030-ingress.sh 5 | @@ -31,6 +31,14 @@ deployment: 6 | type: DaemonSet 7 | network: 8 | host_namespace: true 9 | + vip: 10 | + manage: true 11 | + # what type of vip manage mechanism will be used 12 | + # possible options: routed, keepalived 13 | + mode: routed 14 | + interface: ingress-vip 15 | + addr: 192.168.121.169/32 16 | + external_policy_local: true 17 | EOF 18 | helm upgrade --install ingress-kube-system ${HELM_CHART_ROOT_PATH}/ingress \ 19 | --namespace=kube-system \ 20 | -------------------------------------------------------------------------------- /rook/examples/mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: wordpress-mysql 5 | labels: 6 | app: wordpress 7 | spec: 8 | ports: 9 | - port: 3306 10 | selector: 11 | app: wordpress 12 | tier: mysql 13 | clusterIP: None 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | metadata: 18 | name: mysql-pv-claim 19 | labels: 20 | app: wordpress 21 | spec: 22 | storageClassName: rook-ceph-block 23 | accessModes: 24 | - ReadWriteOnce 25 | resources: 26 | requests: 27 | storage: 20Gi 28 | --- 29 | apiVersion: apps/v1 30 | kind: Deployment 31 | metadata: 32 | name: wordpress-mysql 33 | labels: 34 | app: wordpress 35 | tier: mysql 36 | spec: 37 | selector: 38 | matchLabels: 39 | app: wordpress 40 | tier: mysql 41 | strategy: 42 | type: Recreate 43 | template: 44 | metadata: 45 | labels: 46 | app: wordpress 47 | tier: mysql 48 | spec: 49 | containers: 50 | - image: mysql:5.6 51 | name: mysql 52 | env: 53 | - name: MYSQL_ROOT_PASSWORD 54 | value: changeme 55 | ports: 56 | - containerPort: 3306 57 | name: mysql 58 | volumeMounts: 59 | - name: mysql-persistent-storage 60 | mountPath: /var/lib/mysql 61 | volumes: 62 | - name: mysql-persistent-storage 63 | persistentVolumeClaim: 64 | claimName: mysql-pv-claim 65 | -------------------------------------------------------------------------------- /rook/examples/test-cephfs-webserver.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: webserver 5 | labels: 6 | app: webserver 7 | spec: 8 | ports: 9 | - port: 80 10 | selector: 11 | app: webserver 12 | tier: frontend 13 | type: LoadBalancer 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | metadata: 18 | name: ws-cephfs-pvc 19 | labels: 20 | app: webserver 21 | spec: 22 | storageClassName: rook-cephfs 23 | accessModes: 24 | - ReadWriteOnce 25 | resources: 26 | requests: 27 | storage: 1Gi 28 | --- 29 | apiVersion: apps/v1 30 | kind: Deployment 31 | metadata: 32 | name: cephfs-demo 33 | labels: 34 | app: webserver 35 | tier: frontend 36 | spec: 37 | selector: 38 | matchLabels: 39 | app: webserver 40 | tier: frontend 41 | strategy: 42 | type: Recreate 43 | template: 44 | metadata: 45 | labels: 46 | app: webserver 47 | tier: frontend 48 | spec: 49 | containers: 50 | - image: nginx 51 | name: webserver 52 | ports: 53 | - containerPort: 80 54 | name: webserver 55 | volumeMounts: 56 | - name: webserver-pvc 57 | mountPath: /var/lib/www/html 58 | volumes: 59 | - name: webserver-pvc 60 | persistentVolumeClaim: 61 | claimName: ws-cephfs-pvc 62 | readOnly: false 63 | -------------------------------------------------------------------------------- /rook/examples/wordpress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: wordpress 5 | labels: 6 | app: wordpress 7 | spec: 8 | ports: 9 | - port: 80 10 | selector: 11 | app: wordpress 12 | tier: frontend 13 | type: LoadBalancer 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | metadata: 18 | name: wp-pv-claim 19 | labels: 20 | app: wordpress 21 | spec: 22 | storageClassName: rook-ceph-block 23 | accessModes: 24 | - ReadWriteOnce 25 | resources: 26 | requests: 27 | storage: 20Gi 28 | --- 29 | apiVersion: apps/v1 30 | kind: Deployment 31 | metadata: 32 | name: wordpress 33 | labels: 34 | app: wordpress 35 | tier: frontend 36 | spec: 37 | selector: 38 | matchLabels: 39 | app: wordpress 40 | tier: frontend 41 | strategy: 42 | type: Recreate 43 | template: 44 | metadata: 45 | labels: 46 | app: wordpress 47 | tier: frontend 48 | spec: 49 | containers: 50 | - image: wordpress:4.6.1-apache 51 | name: wordpress 52 | env: 53 | - name: WORDPRESS_DB_HOST 54 | value: wordpress-mysql 55 | - name: WORDPRESS_DB_PASSWORD 56 | value: changeme 57 | ports: 58 | - containerPort: 80 59 | name: wordpress 60 | volumeMounts: 61 | - name: wordpress-persistent-storage 62 | mountPath: /var/www/html 63 | volumes: 64 | - name: wordpress-persistent-storage 65 | persistentVolumeClaim: 66 | claimName: wp-pv-claim 67 | -------------------------------------------------------------------------------- /rook/filesystem.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ceph.rook.io/v1 2 | kind: CephFilesystem 3 | metadata: 4 | name: myfs 5 | namespace: rook-ceph 6 | spec: 7 | metadataPool: 8 | replicated: 9 | size: 3 10 | dataPools: 11 | - replicated: 12 | size: 3 13 | metadataServer: 14 | activeCount: 1 15 | activeStandby: true 16 | -------------------------------------------------------------------------------- /rook/rook_cephfs_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | kubectl apply -f /vagrant/rook/filesystem.yaml 3 | kubectl apply -f /vagrant/rook/sc-cephfs.yaml 4 | 5 | -------------------------------------------------------------------------------- /rook/rook_setup.sh: -------------------------------------------------------------------------------- 1 | cd /usr/share/k8s-yaml/rook/ceph 2 | kubectl apply -f common.yaml -f operator.yaml 3 | kubectl apply -f cluster.yaml 4 | kubectl apply -f toolbox.yaml 5 | 6 | kubectl apply -f /vagrant/rook/sc.yaml 7 | -------------------------------------------------------------------------------- /rook/rook_status.sh: -------------------------------------------------------------------------------- 1 | kubectl get po -n rook-ceph 2 | kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') -- bash -c "ceph status" 3 | -------------------------------------------------------------------------------- /rook/sc-cephfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: rook-cephfs 5 | # Change "rook-ceph" provisioner prefix to match the operator namespace if needed 6 | provisioner: rook-ceph.cephfs.csi.ceph.com 7 | parameters: 8 | # clusterID is the namespace where operator is deployed. 9 | clusterID: rook-ceph 10 | 11 | # CephFS filesystem name into which the volume shall be created 12 | fsName: myfs 13 | 14 | # Ceph pool into which the volume shall be created 15 | # Required for provisionVolume: "true" 16 | pool: myfs-data0 17 | 18 | # Root path of an existing CephFS volume 19 | # Required for provisionVolume: "false" 20 | # rootPath: /absolute/path 21 | 22 | # The secrets contain Ceph admin credentials. These are generated automatically by the operator 23 | # in the same namespace as the cluster. 24 | csi.storage.k8s.io/provisioner-secret-name: rook-ceph-csi 25 | csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph 26 | csi.storage.k8s.io/controller-expand-secret-name: rook-ceph-csi 27 | csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph 28 | csi.storage.k8s.io/node-stage-secret-name: rook-ceph-csi 29 | csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph 30 | 31 | reclaimPolicy: Delete 32 | allowVolumeExpansion: true 33 | -------------------------------------------------------------------------------- /rook/sc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ceph.rook.io/v1 2 | kind: CephBlockPool 3 | metadata: 4 | name: replicapool 5 | namespace: rook-ceph 6 | spec: 7 | failureDomain: host 8 | replicated: 9 | size: 3 10 | --- 11 | apiVersion: storage.k8s.io/v1 12 | kind: StorageClass 13 | metadata: 14 | name: rook-ceph-block 15 | # Change "rook-ceph" provisioner prefix to match the operator namespace if needed 16 | provisioner: rook-ceph.rbd.csi.ceph.com 17 | parameters: 18 | # clusterID is the namespace where the rook cluster is running 19 | clusterID: rook-ceph 20 | # Ceph pool into which the RBD image shall be created 21 | pool: replicapool 22 | 23 | # RBD image format. Defaults to "2". 24 | imageFormat: "2" 25 | 26 | # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. 27 | imageFeatures: layering 28 | 29 | # The secrets contain Ceph admin credentials. 30 | csi.storage.k8s.io/provisioner-secret-name: rook-ceph-csi 31 | csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph 32 | csi.storage.k8s.io/node-stage-secret-name: rook-ceph-csi 33 | csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph 34 | 35 | # Specify the filesystem type of the volume. If not specified, csi-provisioner 36 | # will set default as `ext4`. 37 | # csi.storage.k8s.io/fstype: xfs 38 | 39 | # Delete the rbd volume when a PVC is deleted 40 | reclaimPolicy: Delete 41 | 42 | -------------------------------------------------------------------------------- /rook/switch_default_sc_to_ses.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Changing default storage class to SES..." 3 | 4 | echo "Current:" 5 | kubectl get sc 6 | 7 | # remove current default 8 | kubectl patch storageclass nfs-client -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' 9 | # set new default to SES 10 | kubectl patch storageclass rook-ceph-block -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' 11 | 12 | echo "New:" 13 | kubectl get sc 14 | -------------------------------------------------------------------------------- /utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 3 | source "${SCRIPTDIR}"/caasp_env.conf 4 | 5 | function wait_for_masters_ready { 6 | printf "Waiting for masters to be ready" 7 | until [[ $(kubectl get nodes 2>/dev/null | egrep -c "caasp4-master-[0-9]\s+Ready") -eq $NMASTERS ]]; do 8 | sleep 5 9 | printf "." 10 | done 11 | printf "\n" 12 | } 13 | 14 | function wait_for_workers_ready { 15 | printf "Waiting for workers to be ready" 16 | until [[ $(kubectl get nodes 2>/dev/null | egrep -c "caasp4-worker-[0-9]\s+Ready") -eq $NWORKERS ]]; do 17 | sleep 5 18 | printf "." 19 | done 20 | printf "\n" 21 | } 22 | 23 | function wait_for_workers_notready { 24 | printf "Waiting for workers to be flagged 'NotReady'" 25 | until [[ $(kubectl get nodes 2>/dev/null | egrep -c "caasp4-worker-[0-9]\s+NotReady") -eq $NWORKERS ]]; do 26 | sleep 5 27 | printf "." 28 | done 29 | printf "\n" 30 | } 31 | --------------------------------------------------------------------------------