├── .github ├── pull_request_template.md └── workflows │ └── docbook.yml ├── .gitignore ├── DC-caasp-admin ├── DC-caasp-airgap ├── DC-caasp-architecture ├── DC-caasp-deployment ├── DC-caasp-quickstart ├── LICENSE ├── README.adoc ├── adoc ├── admin-cap-integration.adoc ├── admin-cilium.adoc ├── admin-cluster-disaster-recovery-backup.adoc ├── admin-cluster-disaster-recovery-restore-master-nodes.adoc ├── admin-cluster-disaster-recovery.adoc ├── admin-cluster-management.adoc ├── admin-configure-kubelet.adoc ├── admin-crio-proxy.adoc ├── admin-crio-registries.adoc ├── admin-custom-config.adoc ├── admin-flexvolume.adoc ├── admin-gpus.adoc ├── admin-kubernetes-changes.adoc ├── admin-logging-audit.adoc ├── admin-logging-centralized.adoc ├── admin-logging-skuba.adoc ├── admin-logging.adoc ├── admin-migration.adoc ├── admin-monitoring-health-checks.adoc ├── admin-monitoring-horizontal-pod-autoscaler.adoc ├── admin-monitoring-stack.adoc ├── admin-security-access.adoc ├── admin-security-admission.adoc ├── admin-security-certificates.adoc ├── admin-security-configure-authentication-connector.adoc ├── admin-security-deploy-ldap-server.adoc ├── admin-security-firewall.adoc ├── admin-security-ldap-user-group-management.adoc ├── admin-security-nginx-ingress.adoc ├── admin-security-psp.adoc ├── admin-security-rbac-administration.adoc ├── admin-security-rbac-user-access.adoc ├── admin-ses-integration.adoc ├── admin-shutdown-startup.adoc ├── admin-software-installation.adoc ├── admin-storage-vsphere.adoc ├── admin-stratos-web-console.adoc ├── admin-troubleshooting-etcd.adoc ├── admin-troubleshooting.adoc ├── admin-updates.adoc ├── admin-velero-backup.adoc ├── admin-velero-deployment.adoc ├── admin-velero-disaster-recovery.adoc ├── admin-velero-prereqs.adoc ├── admin-velero-restore.adoc ├── admin-velero-usecase.adoc ├── architecture-description.adoc ├── architecture-updates.adoc ├── attributes.adoc ├── book_admin-docinfo.xml ├── book_admin.adoc ├── book_airgap.adoc ├── book_architecture-docinfo.xml ├── book_architecture.adoc ├── book_deployment-docinfo.xml ├── book_deployment.adoc ├── book_quickstart-docinfo.xml ├── book_quickstart.adoc ├── common_authors.adoc ├── common_changelog.adoc ├── common_copyright.adoc ├── common_copyright_gfdl.adoc ├── common_copyright_quick.adoc ├── common_disclaimer.adoc ├── common_glossary.adoc ├── common_intro_available_doc.adoc ├── common_intro_feedback.adoc ├── common_intro_target_audience.adoc ├── common_intro_typography.adoc ├── common_legal.adoc ├── common_tech_preview.adoc ├── deployment-airgap.adoc ├── deployment-aws.adoc ├── deployment-bare-metal-or-kvm.adoc ├── deployment-bootstrap.adoc ├── deployment-cilium.adoc ├── deployment-default.adoc ├── deployment-loadbalancer.adoc ├── deployment-openstack.adoc ├── deployment-preparation.adoc ├── deployment-sles.adoc ├── deployment-sysreqs.adoc ├── deployment-terraform-example.adoc ├── deployment-vmware.adoc ├── docinfo.xml ├── entities.adoc ├── images │ ├── airgap.png │ ├── caasp_cluster_airgap_network.png │ ├── caasp_cluster_components.png │ ├── caasp_cluster_software.png │ ├── deploy-loadbalancer-ip.png │ ├── logo_cilium.png │ ├── logo_crio.svg │ ├── logo_dex.png │ ├── logo_etcd.svg │ ├── logo_kubernetes.png │ ├── logo_kured.png │ ├── oidc_flow_cli.png │ ├── oidc_flow_web.png │ ├── rbac-configure-kubectl.png │ ├── sm_logo_cilium.png │ ├── sm_logo_crio.svg │ ├── sm_logo_dex.png │ ├── sm_logo_etcd.svg │ ├── sm_logo_kubernetes.png │ ├── sm_logo_kured.png │ ├── src │ │ ├── caasp_cluster_airgap.drawio │ │ ├── caasp_cluster_airgap_network.drawio │ │ ├── caasp_cluster_components.drawio │ │ └── caasp_cluster_software.drawio │ ├── vmware_extension.png │ ├── vmware_step1.png │ ├── vmware_step10.png │ ├── vmware_step11.png │ ├── vmware_step12.png │ ├── vmware_step13.png │ ├── vmware_step14.png │ ├── vmware_step15.png │ ├── vmware_step16.png │ ├── vmware_step17.png │ ├── vmware_step2.png │ ├── vmware_step3.png │ ├── vmware_step4.png │ ├── vmware_step5.png │ ├── vmware_step6.png │ ├── vmware_step6b.png │ ├── vmware_step7.png │ ├── vmware_step8.png │ └── vmware_step9.png ├── network-decl.adoc ├── quick-deployment.adoc ├── suse-rbac-oidc-flow-cli.xml └── suse-rbac-oidc-flow-web.xml └── make_release_package.sh /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Describe your changes 2 | What is this PR about? Please briefly detail why this needs to be integrated 3 | and where you think it should go if it's not obvious from the changes. 4 | 5 | # Related Issues / Projects 6 | Please provide links to Bugzilla and other GitHub projects with your description if they are related to the changes. 7 | 8 | Relates to: 9 | 10 | 34 | -------------------------------------------------------------------------------- /.github/workflows/docbook.yml: -------------------------------------------------------------------------------- 1 | name: Validate/build docs 2 | 3 | on: 4 | push: 5 | paths: 6 | - 'DC-*' 7 | - 'xml/**' 8 | - 'adoc/**' 9 | - 'images/src/**' 10 | - '**/DC-*' 11 | - '**/xml/**' 12 | - '**/adoc/**' 13 | - '**/images/src/**' 14 | 15 | jobs: 16 | select-dc-files: 17 | runs-on: ubuntu-latest 18 | outputs: 19 | validate-list: ${{ steps.select-dc-validate.outputs.dc-list }} 20 | build-list: ${{ steps.select-dc-build.outputs.dc-list }} 21 | allow-build: ${{ steps.select-dc-build.outputs.allow-build }} 22 | relevant-branches: ${{ steps.select-dc-build.outputs.relevant-branches }} 23 | steps: 24 | - uses: actions/checkout@v3 25 | 26 | - name: Checking basic soundness of DC files 27 | uses: openSUSE/doc-ci@gha-select-dcs 28 | with: 29 | mode: soundness 30 | 31 | - name: Selecting DC files to validate 32 | id: select-dc-validate 33 | uses: openSUSE/doc-ci@gha-select-dcs 34 | with: 35 | mode: list-validate 36 | 37 | - name: Selecting DC files to build 38 | id: select-dc-build 39 | uses: openSUSE/doc-ci@gha-select-dcs 40 | with: 41 | mode: list-build 42 | original-org: SUSE 43 | 44 | validate: 45 | runs-on: ubuntu-latest 46 | needs: select-dc-files 47 | strategy: 48 | # don't cancel all validation runners when one of them fails, we want full results 49 | fail-fast: false 50 | matrix: 51 | dc-files: ${{ fromJson(needs.select-dc-files.outputs.validate-list) }} 52 | steps: 53 | - uses: actions/checkout@v3 54 | - name: Validating DC file(s) ${{ matrix.dc-files }} 55 | uses: openSUSE/doc-ci@gha-validate 56 | with: 57 | dc-files: ${{ matrix.dc-files }} 58 | validate-ids: false 59 | 60 | 61 | build-html: 62 | runs-on: ubuntu-latest 63 | needs: [select-dc-files, validate] 64 | if: ${{ needs.select-dc-files.outputs.allow-build == 'true' }} 65 | outputs: 66 | artifact-name: ${{ steps.build-dc.outputs.artifact-name }} 67 | artifact-dir: ${{ steps.build-dc.outputs.artifact-dir }} 68 | strategy: 69 | matrix: 70 | dc-files: ${{ fromJson(needs.select-dc-files.outputs.build-list) }} 71 | steps: 72 | - uses: actions/checkout@v3 73 | - name: Building DC file(s) ${{ matrix.dc-files }} 74 | id: build-dc 75 | uses: openSUSE/doc-ci@gha-build 76 | with: 77 | dc-files: ${{ matrix.dc-files }} 78 | - name: Uploading builds as artifact 79 | uses: actions/upload-artifact@v3 80 | with: 81 | name: ${{ steps.build-dc.outputs.artifact-name }} 82 | path: ${{ steps.build-dc.outputs.artifact-dir }}/* 83 | retention-days: 3 84 | 85 | 86 | publish: 87 | runs-on: ubuntu-latest 88 | if: ${{ success() }} 89 | needs: [select-dc-files, build-html] 90 | continue-on-error: true 91 | steps: 92 | - name: Downloading all build artifacts 93 | uses: actions/download-artifact@v3 94 | with: 95 | path: artifact-dir 96 | - name: Publishing builds on susedoc.github.io 97 | uses: openSUSE/doc-ci@gha-publish 98 | env: 99 | DEPLOY_KEY: ${{ secrets.DEPLOY_KEY_CAASP }} 100 | with: 101 | artifact-path: artifact-dir 102 | relevant-dirs: ${{ needs.select-dc-files.outputs.relevant-branches }} 103 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | \#*# 3 | .#* 4 | *~ 5 | *.bak 6 | *.orig 7 | .directory 8 | .DS_Store 9 | .DS_Store? 10 | -------------------------------------------------------------------------------- /DC-caasp-admin: -------------------------------------------------------------------------------- 1 | ## Doc config file for the DAPS example document 2 | ## See /etc/daps/config for documentation of the settings below 3 | ## 4 | 5 | ## MAIN file (mandatory) 6 | 7 | MAIN="book_admin.adoc" 8 | 9 | ##----- optional parameters ----- 10 | 11 | ## Image directory 12 | 13 | ADOC_IMG_DIR="adoc/images" 14 | 15 | ## Type 16 | 17 | ADOC_TYPE="book" 18 | 19 | ## Turn on postprocessing 20 | 21 | ADOC_POST="yes" 22 | 23 | ## Stylesheet directory 24 | 25 | STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2013-ns" 26 | #STYLEROOT="/usr/share/xml/docbook/stylesheet/daps2013" 27 | 28 | ## XSLT Parameters for customizing the stylesheets 29 | 30 | #XSLTPARAM="--stringparam homepage=https://github.com/openSUSE/daps" 31 | XSLTPARAM+="--param variablelist.as.blocks=1" 32 | -------------------------------------------------------------------------------- /DC-caasp-airgap: -------------------------------------------------------------------------------- 1 | ## Doc config file for the DAPS example document 2 | ## See /etc/daps/config for documentation of the settings below 3 | ## 4 | 5 | ## MAIN file (mandatory) 6 | 7 | MAIN="book_airgap.adoc" 8 | 9 | ##----- optional parameters ----- 10 | 11 | ## Image directory 12 | 13 | ADOC_IMG_DIR="adoc/images" 14 | 15 | ## Type 16 | 17 | ADOC_TYPE="book" 18 | 19 | ## Turn on postprocessing 20 | 21 | ADOC_POST="yes" 22 | 23 | ## Stylesheet directory 24 | 25 | STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2013-ns" 26 | #STYLEROOT="/usr/share/xml/docbook/stylesheet/daps2013" 27 | 28 | ## XSLT Parameters for customizing the stylesheets 29 | 30 | #XSLTPARAM="--stringparam homepage=https://github.com/openSUSE/daps" 31 | XSLTPARAM+="--param variablelist.as.blocks=1" 32 | -------------------------------------------------------------------------------- /DC-caasp-architecture: -------------------------------------------------------------------------------- 1 | ## Doc config file for the DAPS example document 2 | ## See /etc/daps/config for documentation of the settings below 3 | ## 4 | 5 | ## MAIN file (mandatory) 6 | 7 | MAIN="book_architecture.adoc" 8 | 9 | ##----- optional parameters ----- 10 | 11 | ## Image directory 12 | 13 | ADOC_IMG_DIR="adoc/images" 14 | 15 | ## Type 16 | 17 | ADOC_TYPE="book" 18 | 19 | ## Turn on postprocessing 20 | 21 | ADOC_POST="yes" 22 | 23 | ## Stylesheet directory 24 | 25 | STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2013-ns" 26 | #STYLEROOT="/usr/share/xml/docbook/stylesheet/daps2013" 27 | 28 | ## XSLT Parameters for customizing the stylesheets 29 | 30 | #XSLTPARAM="--stringparam homepage=https://github.com/openSUSE/daps" 31 | XSLTPARAM+="--param variablelist.as.blocks=1" 32 | -------------------------------------------------------------------------------- /DC-caasp-deployment: -------------------------------------------------------------------------------- 1 | ## Doc config file for the DAPS example document 2 | ## See /etc/daps/config for documentation of the settings below 3 | ## 4 | 5 | ## MAIN file (mandatory) 6 | 7 | MAIN="book_deployment.adoc" 8 | 9 | ##----- optional parameters ----- 10 | 11 | ## Image directory 12 | 13 | ADOC_IMG_DIR="adoc/images" 14 | 15 | ## Type 16 | 17 | ADOC_TYPE="book" 18 | 19 | ## Turn on postprocessing 20 | 21 | ADOC_POST="yes" 22 | 23 | ## Stylesheet directory 24 | 25 | STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2013-ns" 26 | #STYLEROOT="/usr/share/xml/docbook/stylesheet/daps2013" 27 | 28 | ## XSLT Parameters for customizing the stylesheets 29 | 30 | #XSLTPARAM="--stringparam homepage=https://github.com/openSUSE/daps" 31 | XSLTPARAM+="--param variablelist.as.blocks=1" 32 | -------------------------------------------------------------------------------- /DC-caasp-quickstart: -------------------------------------------------------------------------------- 1 | ## Doc config file for the DAPS example document 2 | ## See /etc/daps/config for documentation of the settings below 3 | ## 4 | 5 | ## MAIN file (mandatory) 6 | 7 | MAIN="book_quickstart.adoc" 8 | 9 | ##----- optional parameters ----- 10 | 11 | ## Image directory 12 | 13 | ADOC_IMG_DIR="adoc/images" 14 | 15 | ## Type 16 | 17 | ADOC_TYPE="book" 18 | 19 | ## Turn on postprocessing 20 | 21 | ADOC_POST="yes" 22 | 23 | ## Stylesheet directory 24 | 25 | STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2013-ns" 26 | #STYLEROOT="/usr/share/xml/docbook/stylesheet/daps2013" 27 | 28 | ## XSLT Parameters for customizing the stylesheets 29 | 30 | #XSLTPARAM="--stringparam homepage=https://github.com/openSUSE/daps" 31 | XSLTPARAM+="--param variablelist.as.blocks=1" 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | adoc/common_copyright_gfdl.adoc -------------------------------------------------------------------------------- /README.adoc: -------------------------------------------------------------------------------- 1 | = SUSE CaaS Platform Documentation 2 | :revdate: 2019-05-24 3 | 4 | image:https://travis-ci.org/SUSE/doc-caasp.svg?branch=adoc["Build Status", link="https://travis-ci.org/SUSE/doc-caasp"] 5 | 6 | This is the source for the official SUSE CaaS Platform documentation 7 | 8 | Released versions of the documentation will be published at 9 | https://documentation.suse.com/ once available. 10 | 11 | == Contributing 12 | 13 | .Allow maintainer updates for pull requests 14 | [IMPORTANT] 15 | When creating a Pull Request please allow maintainers to push commits into your fork. 16 | See: https://help.github.com/en/articles/allowing-changes-to-a-pull-request-branch-created-from-a-fork 17 | 18 | If you would like to contribute, please fork this repository and send pull requests. 19 | 20 | For help on style and structure, refer to the https://doc.opensuse.org/products/opensuse/Styleguide/opensuse_documentation_styleguide_sd/[Documentation Styleguide] 21 | 22 | == Branches 23 | 24 | [IMPORTANT] 25 | Changes to already released versions of the documentation must be merged to a `maintenance/CaaSX` branch from the maintainer. 26 | 27 | We maintain three versions of the product at this point: 28 | 29 | * master (ongoing 4.5.x work) 30 | * maintenance/CaaS4.5 (4.5.x released version and incremental updates) 31 | * maintenance/CaaS4 (4.2.x released version and incremental updates) 32 | 33 | == Files and Directory Structure 34 | 35 | * `DC-caasp-*`: Configuration files for the exported guides. 36 | * `adoc/`: Contains all the pages that make up the content. 37 | * `adoc/book_*`: Meta files collating pages into a guide document. 38 | * `adoc/attributes.adoc` - Contains all version numbers of the product and its components. 39 | Also contains the `release_type` flag that determines if the branch contains an `public` or (SUSE) `internal` release. 40 | * `adoc/entities.adoc` - Contains text substitutions for often used component names and strings. 41 | * `adoc/common_*`: Include files with common information like legal disclaimers and licenses. 42 | * `adoc/admin-*`: Pages belonging to the Admin guide. 43 | * `adoc/deployment-*`: Pages belonging to the Deployment guide. 44 | * `adoc/architecture-*`: Pages belonging to the Architecture guide. 45 | * `adoc/quick-*`: Pages belonging to the Quickstart guide. 46 | 47 | == Editing AsciiDoc 48 | 49 | To contribute to the documentation you will use AsciiDoc syntax. 50 | 51 | * You can learn about AsciiDoc syntax at link:https://asciidoctor.org/docs/asciidoc-syntax-quick-reference/[] 52 | ** A much more detailed manual can be found link:https://asciidoctor.org/docs/user-manual/[here] 53 | * For simple preview use the browser extensions for 54 | ** https://chrome.google.com/webstore/detail/asciidoctorjs-live-previe/iaalpfgpbocpdfblpnhhgllgbdbchmia[Chrome] 55 | ** https://addons.mozilla.org/en-US/firefox/addon/asciidoctorjs-live-preview/[Firefox] 56 | ** Make sure you set the "security" setting in the Extension Preferences to `server` 57 | 58 | * SUSE documents are generally built with DAPS (package `daps`) and the 59 | SUSE XSL Stylesheets (package `suse-xsl-stylesheets`). It's available as a 60 | SUSE rpm package from the the SUSE repository http://download.opensuse.org/repositories/Documentation:/Tools/[Documentation:Tools] or 61 | directly from https://github.com/openSUSE/suse-xsl/[Github]. 62 | * If you are running a (recent) version of openSUSE, you can install our documentation toolchain with the following command: 63 | `sudo /sbin/OneClickInstallUI https://gitlab.nue.suse.com/susedoc/doc-ymp/raw/master/Documentation.ymp` 64 | * If you don't want to download the entire documentation toolchain, you can also build documentation using DAPS inside a Docker container with https://github.com/openSUSE/daps2docker[daps2docker]: 65 | ** Clone the daps2docker repository. 66 | ** Change directory to the folder which contains your documentation source files (`doc-caasp`). 67 | ** Execute the `daps2docker.sh` file, for example by running `../daps2docker/daps2docker.sh .`. 68 | ** Follow the commandline instructions for further building options. 69 | * Basic daps usage: 70 | ** `$ daps -d DC- validate`: Make sure what you have written is 71 | well-formed XML and valid DocBook 5 72 | ** `$ daps -d DC- pdf`: Build a PDF document 73 | ** `$ daps -d DC- html`: Build multi-page HTML document 74 | ** Learn more at https://opensuse.github.io/daps 75 | -------------------------------------------------------------------------------- /adoc/admin-cap-integration.adoc: -------------------------------------------------------------------------------- 1 | = {cap} Integration 2 | 3 | For integration with {cap}, refer to: link:https://documentation.suse.com/suse-cap/{cap_version}/single-html/cap-guides/#cha-cap-depl-caasp[Deploying SUSE Cloud Application Platform on SUSE CaaS Platform]. 4 | -------------------------------------------------------------------------------- /adoc/admin-cilium.adoc: -------------------------------------------------------------------------------- 1 | == Network Policies 2 | 3 | [NOTE] 4 | ==== 5 | {productname} versions 4.0 and 4.1 support L3 and L4 policy management; the hyperlinks below will refer you to Cilium documentation on creating these policies, as Cilium documentation is an excellent and definitive source. 6 | ==== 7 | 8 | The default behavior of {kube} is that all pods can communicate with all other pods within a cluster, whether those pods are hosted by the same {kube} node or different ones. 9 | This behavior is intentional, and aids greatly in the development process as the complexity of networking is effectively removed from both the developer and the operator. 10 | 11 | However, when a workload is deployed in a {kube} cluster in production, any number of reasons may arise leading to the need to isolate some workloads from others. 12 | For example, if a Human Resources department is running workloads processing PII (Personally Identifiable Information), those workloads should not by default be accessible by any other workload in the cluster. 13 | 14 | Network policies are the mechanism provided by {kube} which allow a cloud operator to isolate workloads from each other in a variety of ways. 15 | For example, a policy could be defined which only allows a database server workload to be accessed only by the web servers whose pages use the data in the database. 16 | Another policy could be defined in the cluster which allows only web browsers outside the cluster to access the web server workloads in the cluster and so on. 17 | 18 | To implement network policies, a network plugin must be correctly integrated into the cluster. {productname} incorporates Cilium as its supported network policy management plugin. 19 | For L3 and L4 Cilium leverages link:https://www.kernel.org/doc/html/latest/bpf/index.html[BPF (Berkeley Packet Filter)] where every bit of communication transits through a packet processing engine in the kernel. For L7 network policies, Cilium leverages an Envoy proxy running together with the cilium-agent. Other policy management plugins in the {kube} ecosystem leverage `iptables`. 20 | 21 | {suse} has supported `iptables` since its inception in the Linux world, but believes BPF brings sufficiently compelling advantages (fine-grained control, performance) over `iptables`. 22 | Not only does Cilium have performance benefits brought on by BPF, it also has benefits far higher in the network stack. 23 | 24 | The most typically used policies in {kube} cover L3 and L4 events in the network stack, allowing workloads to be protected by specifying IP addresses and TCP ports. 25 | To implement the earlier example of a dedicated webserver accessing a critical secured database, an L3 policy would be define allowing a web server workload running at IP address `192.168.0.1` to access a MySQL database workload running at IP address `192.168.0.2` on TCP port `3306`. 26 | 27 | [source,yaml] 28 | ---- 29 | apiVersion: "cilium.io/v2" 30 | kind: CiliumNetworkPolicy 31 | metadata: 32 | name: "allow-hr-webui" 33 | spec: 34 | endpointSelector: 35 | matchLabels: 36 | id: hr-db1 37 | ingress: 38 | - toCIDR: 39 | - 192.168.0.2/32 40 | - toPorts: 41 | - ports: 42 | - port: "3306" 43 | protocol: TCP 44 | - fromCIDR: 45 | - 192.168.0.1/32 46 | ---- 47 | 48 | Here an example of L7 policy using HTTP header rules: 49 | 50 | [source,yaml] 51 | ---- 52 | apiVersion: "cilium.io/v2" 53 | kind: CiliumNetworkPolicy 54 | metadata: 55 | name: "l7-rule" 56 | spec: 57 | endpointSelector: 58 | matchLabels: 59 | app: myService 60 | ingress: 61 | - toPorts: 62 | - ports: 63 | - port: '80' 64 | protocol: TCP 65 | rules: 66 | http: 67 | - method: GET 68 | path: "/path1$" 69 | - method: PUT 70 | path: "/path2$" 71 | headers: 72 | - 'X-My-Header: true' 73 | ---- 74 | 75 | Find below hyperlinks to Cilium’s documentation including a high-level introduction to Cilium technology, L3 and L4 protection using IP addresses and TCP ports or DNS addresses and a general guide for network policies: 76 | 77 | General introduction to Cilium:: 78 | https://cilium.readthedocs.io/en/v1.6/intro/ 79 | 80 | Securing traffic for HTTP servers and APIs:: 81 | https://cilium.readthedocs.io/en/v1.6/gettingstarted/http/ 82 | 83 | Restricting the network traffic to specific DNS queries or domains:: 84 | https://cilium.readthedocs.io/en/v1.6/gettingstarted/dns/ 85 | 86 | General configuration of network policies:: 87 | https://cilium.readthedocs.io/en/v1.6/policy/ 88 | -------------------------------------------------------------------------------- /adoc/admin-cluster-disaster-recovery-backup.adoc: -------------------------------------------------------------------------------- 1 | 2 | = Backing Up etcd Cluster Data 3 | 4 | This chapter describes the backup of `etcd` cluster data running on master nodes of {productname}. 5 | 6 | == Data To Backup 7 | . Create backup directories on external storage. 8 | + 9 | [source,bash] 10 | ---- 11 | BACKUP_DIR=CaaSP_Backup_`date +%Y%m%d%H%M%S` 12 | mkdir /${BACKUP_DIR} 13 | ---- 14 | . Copy the following files/folders into the backup directory: 15 | * The `skuba` command-line binary: for the running cluster. Used to replace nodes from cluster. 16 | * The cluster definition folder: Directory created during bootstrap holding the cluster certificates and configuration. 17 | * The `etcd` cluster database: Holds all non-persistent cluster data. 18 | Can be used to recover master nodes. Please refer to the next section for steps to create an `etcd` cluster database backup. 19 | . (Optional) Make backup directory into a compressed file, and remove the original backup directory. 20 | + 21 | [source,bash] 22 | ---- 23 | tar cfv ${BACKUP_DIR}.tgz /${BACKUP_DIR} 24 | rm -rf /${BACKUP_DIR} 25 | ---- 26 | 27 | == Creating an etcd Cluster Database Backup 28 | 29 | === Procedure 30 | 31 | . Mount external storage device to all master nodes. 32 | This is only required if the following step is using local hostpath as volume storage. 33 | . Create backup. 34 | .. Find the size of the database to be backed up 35 | + 36 | [source,bash] 37 | ---- 38 | ls -sh /var/lib/etcd/member/snap/db 39 | ---- 40 | + 41 | [IMPORTANT] 42 | ==== 43 | The backup size depends on the cluster. Ensure each of the backups has sufficient space. 44 | The available size should be more than the database snapshot file. 45 | 46 | You should also have a rotation method to clean up the unneeded snapshots over time. 47 | 48 | When there is insufficient space available during backup, pods will fail to be in `Running` state and `no space left on device` errors will show in pod logs. 49 | 50 | The below example manifest shows a binding to a local `hostPath`. 51 | We strongly recommend using other storage methods instead. 52 | ==== 53 | .. Modify the script example 54 | + 55 | Replace `` with the directory in which to store the backup. 56 | The directory must exist on every node in cluster. 57 | + 58 | Replace `` with the `etcd` image used in the cluster. 59 | This can be retrieved by accessing any one of the nodes in the cluster and running: 60 | + 61 | ---- 62 | grep image: /etc/kubernetes/manifests/etcd.yaml | awk '{print $2}' 63 | ---- 64 | .. Create a backup deployment 65 | + 66 | Run the following script: 67 | + 68 | [source,bash] 69 | ---- 70 | ETCD_SNAPSHOT="/etcd_snapshot" 71 | ETCD_IMAGE="" 72 | MANIFEST="etcd-backup.yaml" 73 | 74 | cat << *EOF* > ${MANIFEST} 75 | apiVersion: batch/v1 76 | kind: Job 77 | metadata: 78 | name: etcd-backup 79 | namespace: kube-system 80 | labels: 81 | jobgroup: backup 82 | spec: 83 | template: 84 | metadata: 85 | name: etcd-backup 86 | labels: 87 | jobgroup: backup 88 | spec: 89 | containers: 90 | - name: etcd-backup 91 | image: ${ETCD_IMAGE} 92 | env: 93 | - name: ETCDCTL_API 94 | value: "3" 95 | command: ["/bin/sh"] 96 | args: ["-c", "etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key snapshot save /backup/etcd-snapshot-\$(date +%Y-%m-%d_%H:%M:%S_%Z).db"] 97 | volumeMounts: 98 | - mountPath: /etc/kubernetes/pki/etcd 99 | name: etcd-certs 100 | readOnly: true 101 | - mountPath: /backup 102 | name: etcd-backup 103 | restartPolicy: OnFailure 104 | nodeSelector: 105 | node-role.kubernetes.io/master: "" 106 | tolerations: 107 | - effect: NoSchedule 108 | operator: Exists 109 | hostNetwork: true 110 | volumes: 111 | - name: etcd-certs 112 | hostPath: 113 | path: /etc/kubernetes/pki/etcd 114 | type: DirectoryOrCreate 115 | - name: etcd-backup 116 | hostPath: 117 | path: ${ETCD_SNAPSHOT} 118 | type: Directory 119 | *EOF* 120 | 121 | kubectl create -f ${MANIFEST} 122 | ---- 123 | + 124 | If you are using local `hostPath` and not using a shared storage device, the `etcd` backup will be created to any one of the master nodes. 125 | To find the node associated with each `etcd` backup run: 126 | + 127 | [source,bash] 128 | ---- 129 | kubectl get pods --namespace kube-system --selector=job-name=etcd-backup -o wide 130 | ---- 131 | 132 | == Scheduling etcd Cluster Backup 133 | . Mount external storage device to all master nodes. 134 | This is only required if the following step is using local `hostPath` as volume storage. 135 | . Create Cronjob. 136 | .. Find the size of the database to be backed up 137 | + 138 | [IMPORTANT] 139 | ==== 140 | The backup size depends on the cluster. Ensure each of the backups has sufficient space. 141 | The available size should be more than the database snapshot file. 142 | 143 | You should also have a rotation method to clean up the unneeded snapshots over time. 144 | 145 | When there is insufficient space available during backup, pods will fail to be in `Running` state and `no space left on device` errors will show in pod logs. 146 | 147 | The below example manifest shows a binding to a local `hostPath`. 148 | We strongly recommend using other storage methods instead. 149 | ==== 150 | + 151 | [source,bash] 152 | ---- 153 | ls -sh /var/lib/etcd/member/snap/db 154 | ---- 155 | .. Modify the script example 156 | + 157 | Replace `` with directory to store for backup. The directory must exist on every node in cluster. 158 | + 159 | Replace `` with etcd image used in cluster. 160 | This can be retrieved by accessing any one of the nodes in the cluster and running: 161 | + 162 | ---- 163 | grep image: /etc/kubernetes/manifests/etcd.yaml | awk '{print $2}' 164 | ---- 165 | .. Create a backup schedule deployment 166 | + 167 | Run the following script: 168 | + 169 | [source,bash] 170 | ---- 171 | ETCD_SNAPSHOT="/etcd_snapshot" 172 | ETCD_IMAGE="" 173 | 174 | # SCHEDULE in Cron format. https://crontab.guru/ 175 | SCHEDULE="0 1 * * *" 176 | 177 | # *_HISTORY_LIMIT is the number of maximum history keep in the cluster. 178 | SUCCESS_HISTORY_LIMIT="3" 179 | FAILED_HISTORY_LIMIT="3" 180 | 181 | MANIFEST="etcd-backup.yaml" 182 | 183 | cat << *EOF* > ${MANIFEST} 184 | apiVersion: batch/v1beta1 185 | kind: CronJob 186 | metadata: 187 | name: etcd-backup 188 | namespace: kube-system 189 | spec: 190 | startingDeadlineSeconds: 100 191 | schedule: "${SCHEDULE}" 192 | successfulJobsHistoryLimit: ${SUCCESS_HISTORY_LIMIT} 193 | failedJobsHistoryLimit: ${FAILED_HISTORY_LIMIT} 194 | jobTemplate: 195 | spec: 196 | template: 197 | spec: 198 | containers: 199 | - name: etcd-backup 200 | image: ${ETCD_IMAGE} 201 | env: 202 | - name: ETCDCTL_API 203 | value: "3" 204 | command: ["/bin/sh"] 205 | args: ["-c", "etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key snapshot save /backup/etcd-snapshot-\$(date +%Y-%m-%d_%H:%M:%S_%Z).db"] 206 | volumeMounts: 207 | - mountPath: /etc/kubernetes/pki/etcd 208 | name: etcd-certs 209 | readOnly: true 210 | - mountPath: /backup 211 | name: etcd-backup 212 | restartPolicy: OnFailure 213 | nodeSelector: 214 | node-role.kubernetes.io/master: "" 215 | tolerations: 216 | - effect: NoSchedule 217 | operator: Exists 218 | hostNetwork: true 219 | volumes: 220 | - name: etcd-certs 221 | hostPath: 222 | path: /etc/kubernetes/pki/etcd 223 | type: DirectoryOrCreate 224 | - name: etcd-backup 225 | # hostPath is only one of the types of persistent volume. Suggest to setup external storage instead. 226 | hostPath: 227 | path: ${ETCD_SNAPSHOT} 228 | type: Directory 229 | *EOF* 230 | 231 | kubectl create -f ${MANIFEST} 232 | ---- 233 | -------------------------------------------------------------------------------- /adoc/admin-cluster-disaster-recovery.adoc: -------------------------------------------------------------------------------- 1 | 2 | 3 | = Cluster Disaster Recovery 4 | 5 | Etcd is a crucial component of Kubernetes - the *etcd cluster* stores the entire Kubernetes cluster state, which means critical configuration data, specifications, as well as the statuses of the running workloads. It also serves as the backend for service discovery. <> explains how to use Velero to backup, restore and migrate data. However, the Kubernetes cluster needs to be accessible for Velero to operate. And since the Kubernetes cluster can become inaccessible for many reasons, for example when all of its master nodes are lost, *it is important to periodically backup etcd cluster data*. 6 | 7 | include::admin-cluster-disaster-recovery-backup.adoc[leveloffset=+1] 8 | 9 | include::admin-cluster-disaster-recovery-restore-master-nodes.adoc[leveloffset=+1] 10 | -------------------------------------------------------------------------------- /adoc/admin-cluster-management.adoc: -------------------------------------------------------------------------------- 1 | //= Cluster Management 2 | 3 | Cluster management refers to several processes in the life cycle of a cluster and 4 | its individual nodes: bootstrapping, joining and removing nodes. 5 | For maximum automation and ease {productname} uses the `skuba` tool, 6 | which simplifies {kube} cluster creation and reconfiguration. 7 | 8 | == Prerequisites 9 | 10 | You must have the proper SSH keys for accessing the nodes set up and allow passwordless `sudo` 11 | on the nodes in order to perform many of these steps. If you have followed the standard 12 | deployment procedures this should already be the case. 13 | 14 | Please note: If you are using a different management workstation than the one you have 15 | used during the initial deployment, you might have to transfer the SSH identities 16 | from the original management workstation. 17 | 18 | == Bootstrap and Initial Configuration 19 | 20 | Bootstrapping the cluster is the initial process of starting up a minimal 21 | viable cluster and joining the first master node. Only the first master node needs to be bootstrapped, 22 | later nodes can simply be joined as described in <>. 23 | 24 | Before bootstrapping any nodes to the cluster, 25 | you need to create an initial cluster definition folder (initialize the cluster). 26 | This is done using `skuba cluster init` and its `--control-plane` flag. 27 | 28 | For a step by step guide on how to initialize the cluster, configure updates using `kured` 29 | and subsequently bootstrap nodes to it, refer to the _{productname} Deployment Guide_. 30 | 31 | [#adding-nodes] 32 | == Adding Nodes 33 | 34 | Once you have added the first master node to the cluster using `skuba node bootstrap`, 35 | use the `skuba node join` command to add more nodes. Joining master or worker nodes to 36 | an existing cluster should be done sequentially, meaning the nodes have to be added 37 | one after another and not more of them in parallel. 38 | 39 | [source,bash] 40 | skuba node join --role --user --sudo --target 41 | 42 | The mandatory flags for the join command are `--role`, `--user`, `--sudo` and `--target`. 43 | 44 | - `--role` serves to specify if the node is a *master* or *worker*. 45 | - `--sudo` is for running the command with superuser privileges, 46 | which is necessary for all node operations. 47 | - `` is the name of the user that exists on your SLES machine (default: `sles`). 48 | - `--target ` is the IP address or FQDN of the relevant machine. 49 | - `` is how you decide to name the node you are adding. 50 | 51 | [IMPORTANT] 52 | ==== 53 | New master nodes that you didn't initially include in your Terraform's configuration have 54 | to be manually added to your load balancer's configuration. 55 | ==== 56 | 57 | To add a new *worker* node, you would run something like: 58 | 59 | [source,bash] 60 | skuba node join --role worker --user sles --sudo --target 10.86.2.164 worker1 61 | 62 | === Adding Nodes from Template 63 | 64 | If you are using a virtual machine template for creating new cluster nodes, 65 | you must make sure that *before* joining the cloned machine to the cluster it is updated to the same software versions 66 | than the other nodes in the cluster. 67 | 68 | Refer to <>. 69 | 70 | Nodes with mismatching package or container software 71 | versions might not be fully functional. 72 | 73 | [#removing-nodes] 74 | == Removing Nodes 75 | 76 | === Temporary Removal 77 | 78 | If you wish to remove a node temporarily, the recommended approach is to first drain the node. 79 | 80 | When you want to bring the node back, you only have to uncordon it. 81 | 82 | TIP: For instructions on how to perform these operations refer to <>. 83 | 84 | === Permanent Removal 85 | 86 | [IMPORTANT] 87 | ==== 88 | Nodes removed with this method cannot be added back to the cluster or any other 89 | skuba-initiated cluster. You must reinstall the entire node and then join it 90 | again to the cluster. 91 | ==== 92 | 93 | The `skuba node remove` command serves to *permanently* remove nodes. 94 | Running this command will work even if the target virtual machine is down, 95 | so it is the safest way to remove the node. 96 | 97 | [source,bash] 98 | ---- 99 | skuba node remove [flags] 100 | ---- 101 | 102 | [NOTE] 103 | ==== 104 | Per default, node removal has an unlimited timeout on waiting for the node to drain. 105 | If the node is unreachable it can not be drained and thus the removal will fail or get stuck indefinitely. 106 | You can specify a time after which removal will be performed without waiting for the node to 107 | drain with the flag `--drain-timeout `. 108 | 109 | For example, waiting for the node to drain for 1 minute and 5 seconds: 110 | ---- 111 | skuba node remove caasp-worker1 --drain-timeout 1m5s 112 | ---- 113 | 114 | For a list of supported time formats run `skuba node remove -h`. 115 | ==== 116 | 117 | [IMPORTANT] 118 | ==== 119 | After the removal of a master node, you have to manually delete its entries 120 | from your load balancer's configuration. 121 | ==== 122 | 123 | == Reconfiguring Nodes 124 | 125 | To reconfigure a node, for example to change the node's role from worker to master, 126 | you will need to use a combination of commands. 127 | 128 | . Run `skuba node remove `. 129 | . Reinstall the node from scratch. 130 | . Run `skuba node join --role --user --sudo --target `. 131 | 132 | [#node-operations] 133 | == Node Operations 134 | 135 | === Uncordon and Cordon 136 | 137 | These to commands respectively define if a node is marked as `schedulable` or `unschedulable`. 138 | This means that a node is allowed to or not allowed to receive any new workloads. 139 | This can be useful when troubleshooting a node. 140 | 141 | To mark a node as `unschedulable` run: 142 | 143 | [source,bash] 144 | kubectl cordon 145 | 146 | To mark a node as `schedulable` run: 147 | 148 | [source,bash] 149 | kubectl uncordon 150 | 151 | === Draining Nodes 152 | 153 | Draining a node consists of evicting all the running pods from the current node in order to perform maintenance. 154 | This is a mandatory step in order to ensure a proper functioning of the workloads. 155 | This is achieved using `kubectl`. 156 | 157 | To drain a node run: 158 | 159 | [source,bash] 160 | kubectl drain 161 | 162 | This action will also implicitly cordon the node. 163 | Therefore once the maintenance is done, uncordon the node to set it back to schedulable. 164 | 165 | Refer to the official {kube} documentation for more information: 166 | {kubedoc}tasks/administer-cluster/safely-drain-node/#use-kubectl-drain-to-remove-a-node-from-service 167 | -------------------------------------------------------------------------------- /adoc/admin-configure-kubelet.adoc: -------------------------------------------------------------------------------- 1 | = Configuring kubelet 2 | 3 | [WARNING] 4 | ==== 5 | Modifying the file `/etc/sysconfig/kubelet` directly is not supported. 6 | 7 | The changes made to this file will not persist through an update/upgrade of the software. 8 | Please follow the instructions below to change the configuration for `kubelet` persistently. 9 | ==== 10 | 11 | [NOTE] 12 | ==== 13 | This procedure does not override the default configuration but amends the changes 14 | from the "drop-in" configuration. 15 | 16 | Please refer to: https://www.freedesktop.org/software/systemd/man/systemd.unit.html 17 | ==== 18 | 19 | If you wish to modify the configuration for `kubelet` you must use the "drop-in" 20 | configuration feature of systemd. The steps need to be performed on each cluster 21 | node whose `kubelet` you wish to reconfigure. 22 | 23 | . Create an appropriate `.conf` file (e.g. `resource-handling.conf`) in `/usr/lib/systemd/system/kubelet.service.d/` with your desired changes. 24 | . 25 | . Reload the service definitions 26 | + 27 | ---- 28 | sudo systemctl daemon-reload 29 | ---- 30 | . Restart kubelet 31 | + 32 | ---- 33 | sudo systemctl restart kubelet 34 | ---- 35 | -------------------------------------------------------------------------------- /adoc/admin-crio-proxy.adoc: -------------------------------------------------------------------------------- 1 | == Configuring HTTP/HTTPS Proxy for {crio} 2 | 3 | In some cases you must configure the container runtime to use a proxy to pull 4 | container images. 5 | 6 | The {crio} runtime uses the system-wide proxy configuration, defined at `/etc/sysconfig/proxy`. 7 | 8 | This file can be edited a number of ways. 9 | It can be pre-configured at build time via AutoYaST, as described in the 10 | https://documentation.suse.com/sles/15-SP2/single-html/SLES-autoyast/#Configuration-Network-Proxy[AutoYaST documentation]. 11 | On an existing system, the file can be edited via YaST by running `yast2 proxy`. 12 | 13 | If preferred, it can alternatively be edited manually as described in the SUSE https://www.suse.com/support/kb/doc/?id=7006845[Knowledge Base] 14 | article 15 | 16 | [NOTE] 17 | ==== 18 | {crio} and skuba both support four types of comma-separated entries in the `NO_PROXY` variable: 19 | 20 | * An exact IP address (`1.2.3.4`) 21 | * CIDR IP range (`1.2.3.4/16`) 22 | * DNS domain name (`eg.com` matches `www.eg.com` and `eg.com`) 23 | * Restricted DNS subdomain (`.eg.com` matches `www.eg.com` but not `eg.com`) 24 | 25 | All standard programs should ignore unsupported values in that variable and continue to work (albeit without the configured proxy) 26 | when encountering an unsupported value. 27 | ==== 28 | 29 | [TIP] 30 | Not all programs on all systems will respect CIDR ranges or restricted subdomains. 31 | 32 | After you have configured the system proxy for your environment, restart the container runtime with: 33 | 34 | [source,bash] 35 | ---- 36 | systemctl restart crio 37 | ---- 38 | -------------------------------------------------------------------------------- /adoc/admin-crio-registries.adoc: -------------------------------------------------------------------------------- 1 | [#config-crio-container-registry] 2 | == Configuring Container Registries for {crio} 3 | 4 | [IMPORTANT] 5 | ==== 6 | The configuration example in this text uses `VERSION 2` of the {crio} registries 7 | configuration syntax. It is not compatible with the `VERSION 1` syntax present 8 | in some upstream examples. 9 | 10 | Please refer to: https://raw.githubusercontent.com/containers/image/master/docs/containers-registries.conf.5.md 11 | ==== 12 | 13 | [TIP] 14 | ==== 15 | You can create and deploy a custom `registries.conf` during the initial bootstrap of the cluster with skuba. 16 | Create the file `/addons/containers/registries.conf` and apply your changes there. 17 | This file will be rolled out during node bootstrapping and upgrading. 18 | ==== 19 | 20 | Every registry-related configuration needs to be done in the link:https://github.com/toml-lang/toml[TOML] file 21 | `/etc/containers/registries.conf`. After any change of this file, CRI-O 22 | needs to be restarted. 23 | 24 | The configuration is a sequence of `\[[registry]]` entries. For example, a 25 | single registry entry within that configuration could be added like this: 26 | 27 | `/etc/containers/registries.conf` 28 | [source,toml] 29 | ---- 30 | [[registry]] 31 | blocked = false 32 | insecure = false 33 | location = "example.net/bar" 34 | prefix = "example.com/foo/images" 35 | mirror = [ 36 | { location = "example-mirror-0.local", insecure = false }, 37 | { location = "example-mirror-1.local", insecure = true, mirror-by-digest-only = true } 38 | ] 39 | 40 | [[registry]] 41 | blocked = false 42 | insecure = false 43 | location = "example.net/mymirror" 44 | prefix = "example.com/mirror/images" 45 | mirror = [ 46 | { location = "example-mirror-2.local", insecure = false, mirror-by-digest-only = true }, 47 | { location = "example-mirror-3.local", insecure = true } 48 | ] 49 | unqualified-search = false 50 | ---- 51 | 52 | Given an image name, a single `\[[registry]]` TOML table is chosen based on its 53 | `prefix` field. 54 | 55 | A prefix is mainly a user-specified image name and can have one of the 56 | following formats: 57 | 58 | - `host[:port]` 59 | - `host[:port]/namespace[/namespace…]` 60 | - `host[:port]/namespace[/namespace…]/repo` 61 | - `host[:port]/namespace[/namespace…]/repo[:tag|@digest]` 62 | 63 | The user-specified image name must start with the specified `prefix` (and 64 | continue with the appropriate separator) for a particular `\[[registry]]` TOML 65 | table to be considered. Only the TOML entry with the longest match is used. 66 | 67 | As a special case, the `prefix` field can be missing. If so, it defaults to the 68 | value of the `location` field. 69 | 70 | === Per-namespace Settings 71 | 72 | - `insecure` (`true` or `false`): By default, container runtimes require TLS 73 | when retrieving images from a registry. If `insecure` is set to `true`, 74 | unencrypted HTTP as well as TLS connections with untrusted certificates are 75 | allowed. 76 | 77 | - `blocked` (`true` or `false`): If `true`, pulling images with matching names 78 | is forbidden. 79 | 80 | === Remapping and Mirroring Registries 81 | 82 | The user-specified image reference is, primarily, a "logical" image name, 83 | always used for naming the image. By default, the image reference also directly 84 | specifies the registry and repository to use, but the following options can be 85 | used to redirect the underlying accesses to different registry servers or 86 | locations. This can be used to support configurations with no access to the 87 | Internet without having to change Dockerfiles, or to add redundancy. 88 | 89 | ==== `location` 90 | 91 | Accepts the same format as the `prefix` field, and specifies the physical 92 | location of the `prefix`-rooted namespace. By default, this is equal to `prefix` 93 | (in which case `prefix` can be omitted and the `\[[registry]]` TOML table can 94 | just specify `location`). 95 | 96 | ===== Example 97 | 98 | [source,toml] 99 | ---- 100 | prefix = "example.com/foo" 101 | location = "internal-registry-for-example.net/bar" 102 | ---- 103 | 104 | Requests for the image `example.com/foo/myimage:latest` will actually work with 105 | the `internal-registry-for-example.net/bar/myimage:latest` image. 106 | 107 | ==== `mirror` 108 | 109 | An array of TOML tables specifying (possibly partial) mirrors for the 110 | `prefix`-rooted namespace. 111 | 112 | The mirrors are attempted in the specified order. The first one that can be 113 | contacted and contains the image will be used (and if none of the mirrors 114 | contains the image, the primary location specified by the `registry.location` 115 | field, or using the unmodified user-specified reference, is tried last). 116 | 117 | Each TOML table in the `mirror` array can contain the following fields, with 118 | the same semantics as if specified in the `\[[registry]]` TOML table directly: 119 | 120 | - `location` 121 | - `insecure` 122 | 123 | ==== `mirror-by-digest-only` 124 | 125 | Can be `true` or `false`. If `true`, mirrors will only be used during pulling 126 | if the image reference includes a digest. Referencing an image by digest 127 | ensures that the same one is always used (whereas referencing an image by a tag may 128 | cause different registries to return different images if the tag mapping is out 129 | of sync). 130 | 131 | Note that if this is `true`, images referenced by a tag will only use the primary 132 | registry, failing if that registry is not accessible. 133 | -------------------------------------------------------------------------------- /adoc/admin-custom-config.adoc: -------------------------------------------------------------------------------- 1 | [#configuration-management] 2 | = Configuration Management 3 | 4 | [NOTE] 5 | ==== 6 | If you use multiple management workstations to manage the {productname} cluster, make sure 7 | that your customizations to the config files are synced across all management workstations. 8 | 9 | Otherwise, if the set of customizations is outdated, the next run of `skuba addon upgrade` will potentially 10 | undo desired changes and possibly leave the cluster or some workloads in non-functional condition. 11 | ==== 12 | 13 | [#addon-kustomize] 14 | == Using Kustomize 15 | 16 | {productname} uses link:https://github.com/kubernetes-sigs/kustomize[Kustomize] 17 | to allow customization of included addons. 18 | 19 | Kustomize allows you to create YAML or JSON files without a template that will override 20 | the default configurations, leaving the default files intact. For information about 21 | the file format and allowed fields, refer to: https://kubernetes-sigs.github.io/kustomize/api-reference/glossary/#kustomization 22 | 23 | [NOTE] 24 | ==== 25 | The configuration options specified in the `kustomize` files will be "appended" to the 26 | default configuration. This, in effect, means that the default options will be effectively overwritten. 27 | In reality, the options will be parsed from both the default and custom configuration and only the latter applies. 28 | This can lead to some outputs for commands showing option/parameter flags that are overruled by the custom configuration. 29 | ==== 30 | 31 | [TIP] 32 | ==== 33 | Using YAML files has the advantage that these files can be directly applied to the cluster 34 | without converting them first. Using JSON format requires converting the files to YAML with `kustomize build`. 35 | ==== 36 | 37 | The configuration files should managed in a central location on the management workstation 38 | together with your cluster definition files that were created during the deployment of the cluster. 39 | 40 | == {crio} Customization 41 | 42 | === Upgrading {crio} Configuration for 4.5 43 | 44 | For {productname} 4.5, CRI-O needs a configuration format upgrade. 45 | For more information, refer to <>. 46 | 47 | If you have custom changes in your {crio} configuration from a release prior to 4.5, 48 | you should back up any files from the old configuration directory `/addons/cri` 49 | before executing the above command, since it will disrupt any previous changes. 50 | 51 | After backing up your changes, on the management workstation, run: 52 | 53 | ---- 54 | skuba cluster upgrade localconfig 55 | ---- 56 | 57 | During this process `skuba` replaces the existing configuration and moves it 58 | to some new files and subfolders in `/addons/cri`. 59 | 60 | [IMPORTANT] 61 | ==== 62 | Your changes made to the previous configuration file will be discarded. 63 | ==== 64 | 65 | The new configuration files will still use the same formats and the same keys. 66 | You must manually restore any custom values from your backup files to the new 67 | configuration. 68 | 69 | [NOTE] 70 | ==== 71 | Some new configuration options/values have been introduced with the {crio} version shipped with {productname} 4.5. 72 | 73 | For more information, refer to link:https://github.com/cri-o/cri-o#configuration[]. 74 | ==== 75 | 76 | === Configuring {crio} Registry 77 | 78 | Instead of modifying the `registries.conf` file on each individual node, 79 | you can place your modified version in `/addons/containers/registries.conf`. 80 | 81 | Upon node bootstrapping or upgrading this file will be distributed to all nodes automatically by `skuba`. 82 | 83 | This file does not conform to the `Kustomize` format or method. It is written in 84 | link:https://github.com/toml-lang/toml[TOML] format. 85 | 86 | For comprehensive information on how to create a custom {crio} registry configuration, 87 | refer to: <>. 88 | 89 | == Applying Configuration 90 | 91 | === During Cluster Bootstrap 92 | 93 | When you bootstrap a new cluster or join a new node with skuba, it will automatically 94 | deploy the contents of the `/addons/` folder to the respective 95 | configuration locations across the nodes. 96 | 97 | Configurations are also automatically distributed when you upgrade nodes or the cluster 98 | to a new version. 99 | 100 | === Existing Cluster 101 | 102 | If you have an existing cluster and wish to update and roll out new configuration 103 | customizations, please use `kubeadm apply` to apply any changed manifests have {kube} take care of the 104 | required steps to safely install new configurations across your cluster. 105 | 106 | [NOTE] 107 | ==== 108 | There currently is no automated way to re-apply customizations to the {crio} 109 | registries configuration. If you wish to update the configuration, you should 110 | add your desired changes to the cluster definition folder (`/addons/containers/`) 111 | and then manually distribute a copy of this file to all nodes. 112 | 113 | Afterwards, you must restart {crio} for the new configuration to take effect by (on each cluster node) running: 114 | 115 | ---- 116 | sudo systemctl daemon-reload 117 | sudo systemctl restart crio 118 | ---- 119 | ==== 120 | -------------------------------------------------------------------------------- /adoc/admin-flexvolume.adoc: -------------------------------------------------------------------------------- 1 | = FlexVolume Configuration 2 | 3 | FlexVolume drivers are external (out-of-tree) drivers usually provided by a specific vendor. 4 | They are executable files that are placed in a predefined directory in the cluster on both worker and master nodes. 5 | Pods interact with FlexVolume drivers through the `flexvolume` in-tree plugin. 6 | 7 | The vendor driver first has to be installed on each worker and master node in a Kubernetes cluster. 8 | On {productname} {productmajor}, the path to install the drivers is `/usr/lib/kubernetes/kubelet-plugins/volume/exec/`. 9 | 10 | If the drivers are deployed with `DaemonSet`, this will require changing 11 | the FlexVolume directory path, which is usually stored as an environment 12 | variable, for example for rook: 13 | 14 | [source,bash] 15 | FLEXVOLUME_DIR_PATH=/usr/lib/kubernetes/kubelet-plugins/volume/exec/ 16 | 17 | //For more information on DaemonSets, refer to cha.user.daemonset 18 | 19 | For a general guide to the FlexVolume configuration, see https://github.com/kubernetes/community/blob/master/contributors/devel/sig-storage/flexvolume.md 20 | -------------------------------------------------------------------------------- /adoc/admin-kubernetes-changes.adoc: -------------------------------------------------------------------------------- 1 | [#k8s-changes-117-118] 2 | == Changes from Kubernetes 1.17 to 1.18 3 | 4 | This documentation page lists all the behavioral changes and API deprecations that will happen from {kube} 1.17 to 1.18. 5 | You will require this information when migrating to {productname} 5.x shipping with {kube} 1.18. 6 | 7 | === API deprecations 8 | 9 | The following APIs have been deprecated: 10 | 11 | - All resources under *apps/v1beta1* and *apps/v1beta2*: please use *apps/v1* instead. 12 | - *daemonsets*, *deployments*, *replicasets* under *extensions/v1beta1*: please use *apps/v1* instead. 13 | - *networkpolicies* under *extensions/v1beta1*: please use *networking.k8s.io/v1* instead. 14 | - *podsecuritypolicies* resources under *extensions/v1beta1*: please use *policy/v1beta1* instead. 15 | 16 | === Behavioral changes 17 | 18 | In this section we will highlight some relevant changes that might interest you for the new {kube} version. 19 | 20 | ==== Core 21 | 22 | - link:https://github.com/kubernetes/enhancements/issues/853[#853] Configurable scale velocity for HPA. 23 | - link:https://github.com/kubernetes/enhancements/issues/1393[#1393] Provide OIDC discovery for service account token issuer. 24 | - link:https://github.com/kubernetes/enhancements/issues/1513[#1513] CertificateSigningRequest API. 25 | 26 | ==== Scheduling 27 | 28 | - link:https://github.com/kubernetes/enhancements/issues/1451[#1451] Run multiple Scheduling Profiles [Alpha] 29 | - link:https://github.com/kubernetes/enhancements/issues/895[#895] Even pod spreading across failure domains [Beta] 30 | - link:https://github.com/kubernetes/enhancements/issues/1258[#1258] Add a configurable default Even Pod Spreading rule [Alpha] 31 | - link:https://github.com/kubernetes/enhancements/issues/166[#166] Taint Based Eviction [Stable] 32 | 33 | ==== Nodes 34 | 35 | - link:https://github.com/kubernetes/enhancements/issues/1539[#1539] Extending Hugepage Feature [Stable] 36 | - link:https://github.com/kubernetes/enhancements/issues/688[#688] Pod Overhead: account resources tied to the pod sandbox, but not specific containers [Beta] 37 | - link:https://github.com/kubernetes/enhancements/issues/693[#693] Node Topology Manager [Beta] 38 | - link:https://github.com/kubernetes/enhancements/issues/950[#950] Add pod-startup liveness-probe holdoff for slow-starting pods [Beta] 39 | 40 | ==== Networking 41 | 42 | - link:https://github.com/kubernetes/enhancements/issues/752[#752] EndpointSlice API [Beta] 43 | - link:https://github.com/kubernetes/enhancements/issues/508[#508] IPv6 support added [Beta] 44 | - link:https://github.com/kubernetes/enhancements/issues/1024[#1024] Graduate NodeLocal DNSCache to GA [Stable] 45 | - link:https://github.com/kubernetes/enhancements/issues/1453[#1453] Graduate Ingress to V1 [Beta] 46 | - link:https://github.com/kubernetes/enhancements/issues/1507[#1507] Adding AppProtocol to Services and Endpoints [Stable] 47 | 48 | ==== API 49 | 50 | - link:https://github.com/kubernetes/enhancements/issues/1040[#1040] Priority and Fairness for API Server Requests [Alpha] 51 | - link:https://github.com/kubernetes/enhancements/issues/1601[#1601] client-go signature refactor to standardize options and context handling [Stable] 52 | - link:https://github.com/kubernetes/enhancements/issues/576[#576] APIServer DryRun [Stable] 53 | - link:https://github.com/kubernetes/enhancements/issues/1281[#1281] API Server Network Proxy KEP to Beta [Beta] 54 | 55 | ==== Storage 56 | 57 | - link:https://github.com/kubernetes/enhancements/issues/695[#695] Skip Volume Ownership Change [Alpha] 58 | - link:https://github.com/kubernetes/enhancements/issues/1412[#1412] Immutable Secrets and ConfigMaps [Alpha] 59 | - link:https://github.com/kubernetes/enhancements/issues/1495[#1495] Generic data populators [Alpha] 60 | - link:https://github.com/kubernetes/enhancements/issues/770[#770] Skip attach for non-attachable CSI volumes [Stable] 61 | - link:https://github.com/kubernetes/enhancements/issues/351[#351] Raw block device using persistent volume source [Stable] 62 | - link:https://github.com/kubernetes/enhancements/issues/565[#565] CSI Block storage support [Stable] 63 | - link:https://github.com/kubernetes/enhancements/issues/603[#603] Pass Pod information in CSI calls [Stable] 64 | - link:https://github.com/kubernetes/enhancements/issues/989[#989] Extend allowed PVC DataSources [Stable] 65 | 66 | ==== Features 67 | 68 | - link:https://github.com/kubernetes/enhancements/issues/1441[#1441] kubectl debug [Alpha] 69 | - link:https://github.com/kubernetes/enhancements/issues/491[#491] kubectl diff [Stable] 70 | - link:https://github.com/kubernetes/enhancements/issues/670[#670] Support Out-of-Tree vSphere Cloud Provider [Stable] 71 | -------------------------------------------------------------------------------- /adoc/admin-logging-audit.adoc: -------------------------------------------------------------------------------- 1 | = Audit Log 2 | 3 | To track actions that have been performed on the cluster, you can enable the {kube} audit log during cluster bootstrap or on a running cluster. 4 | 5 | This allows the audit logs to be written on the {kube} master nodes at `/var/log/kube-apiserver/audit.log` or the given path. 6 | 7 | For more information on the audit log and its contents, see: https://kubernetes.io/docs/tasks/debug-application-cluster/audit/ 8 | 9 | == Limitations 10 | 11 | The {kube} audit log only collects and stores actions performed on the level of the cluster. This does not include any resulting actions of application services. 12 | 13 | == Enable Auditing During Cluster Bootstrap 14 | 15 | . Create audit policy file - `audit.yaml`. Here uses a simple policy for demonstration. 16 | + 17 | ==== 18 | apiVersion: audit.k8s.io/v1beta1 19 | kind: Policy 20 | rules: 21 | - level: Metadata // <1> 22 | ==== 23 | <1> The audit level of the event. This sample will log all requests at the Metadata level. 24 | For detailed information, refer to: https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy 25 | . Create audit policy file directory on all master nodes. 26 | + 27 | ---- 28 | sudo mkdir -p /etc/kubernetes/policies 29 | ---- 30 | 31 | . Copy audit policy file - `audit.yaml` to `/etc/kubernetes/policies/audit.yaml` on all master nodes. 32 | 33 | . Edit `kubeadm-init.conf` file in skuba init folder to add audit related configurations. 34 | + 35 | ---- 36 | vi /kubeadm-init.conf 37 | ---- 38 | + 39 | ==== 40 | ... 41 | apiServer: 42 | extraArgs: 43 | audit-log-path: /var/log/kube-apiserver/audit.log 44 | audit-policy-file: /etc/kubernetes/policies/audit.yaml // <1> 45 | audit-log-maxage: "30" // <2> 46 | audit-log-maxsize: "100" // <3> 47 | audit-log-maxbackup: "5" // <4> 48 | audit-log-format: json // <5> 49 | extraVolumes: 50 | - name: audit-policy 51 | hostPath: /etc/kubernetes/policies/audit.yaml // <6> 52 | mountPath: /etc/kubernetes/policies/audit.yaml // <7> 53 | readOnly: true 54 | pathType: File 55 | - name: audit-logs 56 | hostPath: /var/log/kube-apiserver // <8> 57 | mountPath: /var/log/kube-apiserver // <9> 58 | pathType: DirectoryOrCreate 59 | ... 60 | ==== 61 | <1> Path to the YAML file that defines the audit policy configuration. 62 | <2> The maximum number of days to retain old audit log files based on the timestamp encoded in their filename. (Default: 15) 63 | <3> The maximum size in megabytes of the audit log file before it gets rotated. (Default: 10) 64 | <4> The maximum number of old audit log files to retain. (Default: 20) 65 | <5> Format of saved audits. Known formats are "legacy", "json". "legacy" indicates 1-line text format for each event. "json" indicates structured json format. 66 | <6> The audit policy configuration file path from the host node's filesystem. 67 | <7> The audit policy configuration file path on the api-server pod. 68 | <8> The audit log file directory from the host node's filesystem. 69 | <9> The audit log file directory on the api-server pod. 70 | 71 | . Proceed with link:{docurl}html/caasp-deployment/bootstrap.html[Cluster Bootstrap]. 72 | 73 | . If everything is setup correctly, you should be able to see audit logs are written to `/var/log/kube-apiserver/audit.log`. 74 | 75 | == Enable Auditing On Running Cluster 76 | 77 | [NOTE] 78 | ==== 79 | The following steps take effect only on the updated master nodes. You need to repeat the following steps on every master node in the cluster. 80 | ==== 81 | 82 | . Create audit policy file - `audit.yaml`. Here uses a simple policy for demonstration. 83 | + 84 | ==== 85 | apiVersion: audit.k8s.io/v1beta1 86 | kind: Policy 87 | rules: 88 | - level: Metadata // <1> 89 | ==== 90 | <1> The audit level of the event. This sample will log all requests at the Metadata level. For detailed information, refer to: https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy 91 | 92 | . Create audit policy file directory on master node. 93 | + 94 | ---- 95 | sudo mkdir -p /etc/kubernetes/policies 96 | ---- 97 | 98 | . Copy audit policy file - `audit.yaml` to `/etc/kubernetes/policies/audit.yaml` on master node. 99 | 100 | . Edit `/etc/kubernetes/manifests/kube-apiserver.yaml`. 101 | + 102 | ==== 103 | ... 104 | spec: 105 | containers: 106 | - command: 107 | - kube-apiserver 108 | - --audit-log-path=/var/log/kube-apiserver/audit.log 109 | - --audit-policy-file=/etc/kubernetes/policies/audit.yaml // <1> 110 | - --audit-log-maxage=30 // <2> 111 | - --audit-log-maxsize=100 // <3> 112 | - --audit-log-maxbackup=5 // <4> 113 | - --audit-log-format=json // <5> 114 | ... 115 | volumeMounts: 116 | - mountPath: /etc/kubernetes/policies/audit.yaml // <6> 117 | name: audit-policy 118 | readOnly: true 119 | - mountPath: /var/log/kube-apiserver // <7> 120 | name: audit-logs 121 | ... 122 | volumes: 123 | - hostPath: 124 | path: /etc/kubernetes/policies/audit.yaml // <8> 125 | type: File 126 | name: audit-policy 127 | - hostPath: 128 | path: /var/log/kube-apiserver // <9> 129 | type: DirectoryOrCreate 130 | name: audit-logs 131 | ... 132 | ==== 133 | <1> Path to the YAML file that defines the audit policy configuration. 134 | <2> The maximum number of days to retain old audit log files based on the timestamp encoded in their filename. (Default: 15) 135 | <3> The maximum size in megabytes of the audit log file before it gets rotated. (Default: 10) 136 | <4> The maximum number of old audit log files to retain. (Default: 20) 137 | <5> Format of saved audits. Known formats are "legacy", "json". "legacy" indicates 1-line text format for each event. "json" indicates structured json format. 138 | <6> The audit policy configuration file path on the api-server pod. 139 | <7> The audit log file directory on the api-server pod. 140 | <8> The audit policy configuration file path from the host node's filesystem. 141 | <9> The audit log file directory from the host node's filesystem. 142 | 143 | . Restart kubelet. 144 | + 145 | ---- 146 | sudo systemctl restart kubelet 147 | ---- 148 | 149 | . If everything is set up correctly, you should be able to see audit logs being written to `/var/log/kube-apiserver/audit.log`. 150 | 151 | 152 | == Disable Auditing 153 | 154 | [NOTE] 155 | ==== 156 | The following steps take effect only on the updated master nodes. You need to repeat the following steps on every master node in the cluster. 157 | ==== 158 | 159 | . Remote access to the master node. 160 | ---- 161 | ssh sles@ 162 | ---- 163 | 164 | . Edit `/etc/kubernetes/manifests/kube-apiserver.yaml` and remove the following lines. 165 | + 166 | ==== 167 | ... 168 | - --audit-log-path=/var/log/kube-apiserver/audit.log 169 | - --audit-policy-file=/etc/kubernetes/policies/audit.yaml 170 | - --audit-log-maxage=30 171 | - --audit-log-maxsize=100 172 | - --audit-log-maxbackup=5 173 | - --audit-log-format=json 174 | ... 175 | - mountPath: /etc/kubernetes/policies/audit.yaml 176 | name: audit-policy 177 | readOnly: true 178 | - mountPath: /var/log/kube-apiserver 179 | name: audit-logs 180 | ... 181 | - hostPath: 182 | path: /etc/kubernetes/policies/audit.yaml 183 | type: File 184 | name: audit-policy 185 | - hostPath: 186 | path: /var/log/kube-apiserver 187 | type: DirectoryOrCreate 188 | name: audit-logs 189 | ==== 190 | 191 | . Restart kubelet. 192 | + 193 | ---- 194 | sudo systemctl restart kubelet 195 | ---- 196 | -------------------------------------------------------------------------------- /adoc/admin-logging-skuba.adoc: -------------------------------------------------------------------------------- 1 | [#tee-logging] 2 | = Logging in skuba 3 | 4 | One important part of deploying and maintaining a product is to have reliable 5 | logs. Tools like `skuba` take the approach of printing the output to the 6 | standard output directly. This is not just common practice, but it also has the 7 | advantage that then the user has more flexibility on how to manage said output. 8 | 9 | Thus, whenever throughout this guide we write a `skuba` command, take into 10 | account that the output will be printed into the standard output. If you would 11 | also like to have the logs stored somewhere else for later inspection, you can 12 | use tools like `tee`. For example: 13 | 14 | [source,bash] 15 | ---- 16 | skuba node bootstrap --user sles --sudo --target | tee -skuba-node-bootstrap.log 17 | ---- 18 | 19 | Otherwise, you might want to use other tools to manage the logs for later 20 | inspection. The point being that this guide will never consider how to manage 21 | these logs because `skuba` itself does not. It's up to you to manage these logs 22 | in any way you find desirable. 23 | 24 | Moreover, `skuba` has also various levels of log verbosity. This is managed by 25 | the `-v, --verbosity` flag. This flag accepts an integer argument, ranging from 26 | 0 to 5, where a higher number means a higher level of verbosity. If you don't 27 | pass any arguments, then 0 is assumed. We recommend using the default argument, 28 | since it will already log warnings and errors, among other relevant output, 29 | whereas 5 can be a bit overwhelming. Thus, for the above example, we would 30 | recommend something like: 31 | 32 | [source,bash] 33 | ---- 34 | skuba node bootstrap -v --user sles --sudo --target | tee -skuba-node-bootstrap.log 35 | ---- 36 | 37 | Now the `-skuba-node-bootstrap.log` will have more useful information 38 | than without the `-v` flag. We *strongly* recommend using this flag in order to 39 | get as much useful information as possible from a single run. 40 | -------------------------------------------------------------------------------- /adoc/admin-logging.adoc: -------------------------------------------------------------------------------- 1 | == Introduction 2 | 3 | Logging is ubiquitous throughout {productname}. Some tools will only print their 4 | outputs to the currently running session shell and not create a "log file". 5 | 6 | If you need to retain the output of these files you can `tee` them into a separate file (refer to <>). 7 | 8 | Many other service components will produce log files or other log info streams. 9 | You can collect, store and evaluate these logs via <> for 10 | use with the <>. 11 | 12 | 13 | 14 | [NOTE] 15 | ==== 16 | If you are looking for troubleshooting logs please refer to <>. 17 | ==== 18 | -------------------------------------------------------------------------------- /adoc/admin-migration.adoc: -------------------------------------------------------------------------------- 1 | [#caasp-migration] 2 | == Migration to {productname} 4.5 3 | 4 | .Sequential Upgrade Required 5 | [WARNING] 6 | ==== 7 | For a successful migration, make sure you are at the latest 4.2 version before migrating your cluster and management workstation to {productname} 4.5. 8 | 9 | For this, please follow the upgrade guide to update all your cluster nodes and management workstation to the latest base OS updates and {productname} updates. 10 | Refer to: link:{docurl}html/caasp-admin/_cluster_updates.html[] 11 | ==== 12 | 13 | === Updating the operating system 14 | . The node should be able to communicate with the servers for {scc} or {rmt}. 15 | Other migration scenarios are covered in the SLES upgrade guide. 16 | + 17 | [NOTE] 18 | ==== 19 | In order to reconnect your system to the registration server, run: 20 | ---- 21 | SUSEConnect -r SUSEConnect -p sle-module-containers/15.1/x86_64 -r 22 | ---- 23 | ==== 24 | . You also need the new `zypper migration` plugin. 25 | This plugin is used to migrate the node itself to the latest version of {productname}, such as updating the repositories to the new ones, and calling `zypper dup`. 26 | This plugin is provided by the `zypper-migration-plugin` package. 27 | Therefore, you need to install the `zypper-migration-plugin` package: 28 | + 29 | ---- 30 | zypper -n in zypper-migration-plugin 31 | ---- 32 | . Then, run the newly installed `zypper-migration` plugin (on the management node first, then on the rest of the nodes): 33 | + 34 | ---- 35 | zypper migration 36 | ---- 37 | + 38 | [NOTE] 39 | ==== 40 | If you want migration to progress non-interactive, you can add the flags: `--non-interactive --auto-agree-with-licenses` 41 | ==== 42 | . Check that all required repositories are enabled again and have the correct version. Run: 43 | + 44 | ---- 45 | zypper lr -uE 46 | ---- 47 | + 48 | Verify that all repositories on the following list are present and enabled: 49 | + 50 | [NOTE] 51 | ==== 52 | The actual Aliases might be different from the ones shown here if they were configured differently during the initial installation of {sle}. 53 | 54 | The URIs will have long UUID strings (`update?`,`product?`) attached to them. The UUIDs identify your personal licensed product or update repositories. 55 | These have been omitted from this output example. 56 | ==== 57 | + 58 | [options="header",cols="2"] 59 | |=== 60 | |Alias |URI 61 | |Basesystem_Module_15_SP2_x86_64:SLE-Module-Basesystem15-SP2-Pool | https://updates.suse.com/SUSE/Products/SLE-Module-Basesystem/15-SP2/x86_64/ 62 | |Basesystem_Module_15_SP2_x86_64:SLE-Module-Basesystem15-SP2-Updates | https://updates.suse.com/SUSE/Updates/SLE-Module-Basesystem/15-SP2/x86_64/ 63 | |Containers_Module_15_SP2_x86_64:SLE-Module-Containers15-SP2-Pool | https://updates.suse.com/SUSE/Products/SLE-Module-Containers/15-SP2/x86_64/ 64 | |Containers_Module_15_SP2_x86_64:SLE-Module-Containers15-SP2-Updates | https://updates.suse.com/SUSE/Updates/SLE-Module-Containers/15-SP2/x86_64/ 65 | |Python_2_Module_15_SP2_x86_64:SLE-Module-Python2-15-SP2-Pool | https://updates.suse.com/SUSE/Products/SLE-Module-Python2/15-SP2/x86_64/ 66 | |Python_2_Module_15_SP2_x86_64:SLE-Module-Python2-15-SP2-Updates | https://updates.suse.com/SUSE/Updates/SLE-Module-Python2/15-SP2/x86_64/ 67 | |SUSE_CaaS_Platform_4.5_x86_64:SUSE-CAASP-4.5-Pool | https://updates.suse.com/SUSE/Products/SUSE-CAASP/4.5/x86_64/ 68 | |SUSE_CaaS_Platform_4.5_x86_64:SUSE-CAASP-4.5-Updates | https://updates.suse.com/SUSE/Updates/SUSE-CAASP/4.5/x86_64/ 69 | |SUSE_Linux_Enterprise_Server_15_SP2_x86_64:SLE-Product-SLES15-SP2-Pool | https://updates.suse.com/SUSE/Products/SLE-Product-SLES/15-SP2/x86_64/ 70 | |SUSE_Linux_Enterprise_Server_15_SP2_x86_64:SLE-Product-SLES15-SP2-Updates | https://updates.suse.com/SUSE/Updates/SLE-Product-SLES/15-SP2/x86_64/ 71 | |Server_Applications_Module_15_SP2_x86_64:SLE-Module-Server-Applications15-SP2-Pool | https://updates.suse.com/SUSE/Products/SLE-Module-Server-Applications/15-SP2/x86_64/ 72 | |Server_Applications_Module_15_SP2_x86_64:SLE-Module-Server-Applications15-SP2-Updates | https://updates.suse.com/SUSE/Updates/SLE-Module-Server-Applications/15-SP2/x86_64/ 73 | |=== 74 | . Check if `skuba` was indeed upgraded for 4.5: 75 | + 76 | ---- 77 | skuba version 78 | ---- 79 | + 80 | [IMPORTANT] 81 | ==== 82 | The version must be >= `skuba-2.1`. 83 | `skuba 2` corresponds to {productname} 4.5, while `skuba 1.0-1.4` corresponds to {productname} 4. 84 | ==== 85 | 86 | === Upgrade the cluster 87 | 88 | . And now run the skuba cluster upgrade commands as it's done below. 89 | + 90 | - First, check if there are any addons or components to upgrade before you upgrade the nodes: 91 | + 92 | ---- 93 | skuba cluster upgrade plan 94 | skuba addon upgrade plan 95 | skuba addon upgrade apply 96 | ---- 97 | - Then, check with `cluster status` if all nodes have the same {kube} version (which must be 1.17.x): 98 | + 99 | ---- 100 | skuba cluster status 101 | ---- 102 | + 103 | [NOTE] 104 | ==== 105 | If not all nodes are properly upgraded to the same Kubernetes version, then the ones with an older {kube} version must be upgraded before attempting a migration. 106 | Refer to the update documentation of the previous version to bring all nodes to the latest update state. 107 | ==== 108 | + 109 | - Once all nodes have the same {kube} version, you must upgrade the {crio} config: 110 | + 111 | ---- 112 | skuba cluster upgrade localconfig 113 | ---- 114 | - Run `skuba node upgrade`: 115 | + 116 | ---- 117 | skuba node upgrade apply --user sles --sudo --target 118 | ---- 119 | - Before repeating the same cycle with the rest of the nodes, **please make sure** that all the components of the kubernetes stack **are running** on the freshly upgraded node. 120 | You can do this with the following command: 121 | + 122 | ---- 123 | kubectl get all -n kube-system 124 | ---- 125 | . Now repeat the above steps for all nodes to bring them to the upgraded state. 126 | . After upgrading all the nodes, make sure you run another addon upgrade across the cluster: 127 | + 128 | ---- 129 | skuba addon upgrade plan 130 | skuba addon upgrade apply 131 | ---- 132 | 133 | After following all these instructions you should be running {productname} 4.5. 134 | Refer to the link:https://www.suse.com/releasenotes/x86_64/SUSE-CAASP/4.5/[release notes] for further information on the new features that this release brings. 135 | Enjoy! 136 | -------------------------------------------------------------------------------- /adoc/admin-security-access.adoc: -------------------------------------------------------------------------------- 1 | = Access Control 2 | 3 | Users access the API using `kubectl`, client libraries, or by making REST requests. 4 | Both human users and {kube} service accounts can be authorized for API access. 5 | When a request reaches the API, it goes through several stages, that can be explained with the following three questions: 6 | 7 | . Authentication: *who are you?* This is accomplished via <> to validate the user's entity and respond to the corresponding user group after successful login. 8 | . Authorization: *what kind of access do you have?* This is accomplished via <> API, that is a set of permissions for the previously authenticated user. Permissions are purely additive (there are no "deny" rules). A role can be defined within a namespace with a Role, or cluster-wide with a ClusterRole. 9 | . Admission Control: *what are you trying to do?* This is accomplished via <>. They can modify (mutate) or validate (accept or reject) requests. 10 | 11 | Unlike authentication and authorization, if any admission controller rejects, then the request is immediately rejected. 12 | 13 | Users can access with a Web browser or command line to do the authentication or self-configure `kubectl` to access authorized resources. 14 | 15 | == Authentication and Authorization Flow 16 | 17 | Authentication is composed of: 18 | 19 | * *Dex* (https://github.com/dexidp/dex) is an identity provider service 20 | (idP) that uses OIDC (Open ID Connect: https://openid.net/connect/) 21 | to drive authentication for client applications. 22 | It acts as a portal to defer authentication to the provider through connected 23 | identity providers (connectors). 24 | * *Client*: 25 | . Web browser: *Gangway* (https://github.com/heptiolabs/gangway): 26 | a Web application that enables authentication flow for your {productname}. 27 | The user can log in, authorize access, download `kubeconfig`, or self-configure `kubectl`. 28 | . Command-line: `skuba auth login`, a CLI application that enables authentication flow for your {productname}. The user can log in, authorize access, and get `kubeconfig`. 29 | 30 | For authorization (Role-Based Access Control, RBAC), administrators can use `kubectl` to create corresponding 31 | `RoleBinding` or `ClusterRoleBinding` for a user or group to limit resource access. 32 | 33 | === Web Flow 34 | image::oidc_flow_web.png[] 35 | // Source: suse-rbac-oidc-flow-web.xml (open with http://draw.io/app) 36 | 37 | . The user requests access through Gangway. 38 | . Gangway redirects to Dex. 39 | . Dex redirects to a connected identity provider (connector). 40 | User login and a request to approve access are generated. 41 | . Dex continues with OIDC authentication flow on behalf of the user 42 | and creates/updates data to {kube} CRDs. 43 | . Dex redirects the user to Gangway. 44 | This redirect includes (ID/refresh) tokens. 45 | . Gangway returns a link to download `kubeconfig` or self-configures `kubectl` 46 | instructions to the user. 47 | + 48 | image::rbac-configure-kubectl.png[] 49 | 50 | . User downloads `kubeconf` or self-configures `kubectl`. 51 | . User uses `kubectl` to connect to the {kube} API server. 52 | . {kube} CRDs validate the {kube} API server request and return a response. 53 | . The `kubectl` connects to the authorized {kube} resources through the {kube} API server. 54 | 55 | === CLI Flow 56 | image::oidc_flow_cli.png[] 57 | // Source: suse-rbac-oidc-flow-cli.xml (open with http://draw.io/app) 58 | 59 | . User requests access through `skuba auth login` with the Dex server URL, 60 | username and password. 61 | . Dex uses received username and password to log in and approve the access 62 | request to the connected identity providers (connectors). 63 | . Dex continues with the OIDC authentication flow on behalf of the user and 64 | creates/updates data to the {kube} CRDs. 65 | . Dex returns the ID token and refreshes token to `skuba auth login`. 66 | . `skuba auth login` generates the kubeconfig file `kubeconf.txt`. 67 | . User uses `kubectl` to connect the {kube} API server. 68 | . {kube} CRDs validate the {kube} API server request and return a response. 69 | . The `kubectl` connects to the authorized {kube} resources through {kube} API server. 70 | -------------------------------------------------------------------------------- /adoc/admin-security-admission.adoc: -------------------------------------------------------------------------------- 1 | [#admission] 2 | = Admission Controllers 3 | 4 | == Introduction 5 | 6 | After user authentication and authorization, *admission* takes place to complete the access control for the {kube} API. 7 | As the final step in the access control process, admission enhances the security layer by mandating a reasonable security baseline across a specific namespace or the entire cluster. 8 | The built-in {psp} admission controller is perhaps the most prominent example of it. 9 | 10 | Apart from the security aspect, admission controllers can enforce custom policies to adhere to certain best-practices such as having good labels, annotation, resource limits, or other settings. 11 | It is worth noting that instead of only validating the request, admission controllers are also capable of "fixing" a request by mutating it, such as automatically adding resource limits if the user forgets to. 12 | 13 | The admission is controlled by admission controllers which may only be configured by the cluster administrator. The admission control process happens in *two phases*: 14 | 15 | . In the first phase, *mutating* admission controllers are run. They are empowered to automatically change the requested object to comply with certain cluster policies by making modifications to it if needed. 16 | . In the second phase, *validating* admission controllers are run. Based on the results of the previous mutating phase, an admission controller can either allow the request to proceed and reach `etcd` or deny it. 17 | 18 | [IMPORTANT] 19 | ==== 20 | If any of the controllers in either phase reject the request, the entire request is rejected immediately and an error is returned to the end-user. 21 | ==== 22 | 23 | == Configured admission controllers 24 | 25 | [IMPORTANT] 26 | ==== 27 | Any modification of this list prior to the creation of the cluster will be overwritten by these default settings. 28 | 29 | The ability to add or remove individual admission controllers will be provided with one of the upcoming releases of {productname}. 30 | ==== 31 | 32 | The complete list of admission controllers can be found at {kubedoc}reference/access-authn-authz/admission-controllers/#what-does-each-admission-controller-do 33 | 34 | The default admission controllers enabled in {productname} are: 35 | 36 | . `NodeRestriction` 37 | . `PodSecurityPolicy` 38 | // . `NamespaceLifecycle` 39 | // . `LimitRanger` 40 | // . `ServiceAccount` 41 | // . `TaintNodesByCondition` 42 | // . `Priority` 43 | // . `DefaultTolerationSeconds` 44 | // . `DefaultStorageClass` 45 | // . `PersistentVolumeClaimResize` 46 | // . `MutatingAdmissionWebhook` 47 | // . `ValidatingAdmissionWebhook` 48 | // . `ResourceQuota` 49 | -------------------------------------------------------------------------------- /adoc/admin-security-firewall.adoc: -------------------------------------------------------------------------------- 1 | = Network Access Considerations 2 | 3 | It is good security practice not to expose the kubernetes API server on the public internet. 4 | Use network firewalls that only allow access from trusted subnets. 5 | -------------------------------------------------------------------------------- /adoc/admin-security-psp.adoc: -------------------------------------------------------------------------------- 1 | = Pod Security Policies 2 | 3 | [NOTE] 4 | ==== 5 | Please note that criteria for designing {psp} are not part of this document. 6 | ==== 7 | 8 | "Pod Security Policy" (stylized as `{psp}` and abbreviated "PSP") is a security 9 | measure implemented by {kube} to control which specifications a pod must meet 10 | to be allowed to run in the cluster. They control various aspects of execution of 11 | pods and interactions with other parts of the software infrastructure. 12 | 13 | You can find more general information about {psp} in the link:{kubedoc}concepts/policy/pod-security-policy/[Kubernetes Docs]. 14 | 15 | User access to the cluster is controlled via "Role Based Access Control (RBAC)". 16 | Each {psp} is associated with one or more users or 17 | service accounts so they are allowed to launch pods with the associated 18 | specifications. The policies are associated with users or service accounts via 19 | role bindings. 20 | 21 | [NOTE] 22 | ==== 23 | The default policies shipped with {productname} are a good start, but depending 24 | on security requirements, adjustments should be made or additional policies should be created. 25 | ==== 26 | 27 | == Default Policies 28 | 29 | {productname} {productmajor} currently ships with two default policies: 30 | 31 | * Privileged (full access everywhere) 32 | * Unprivileged (only very basic access) 33 | 34 | All pods running the containers for the basic {productname} software are 35 | deployed into the `kube-system` namespace and run with the "privileged" policy. 36 | 37 | All authenticated system users (`group system:authenticated`) and service accounts in kube-system (`system:serviceaccounts:kube-system`) 38 | have a RoleBinding (`suse:caasp:psp:privileged`) to run pods using the privileged policy in the kube-system namespace. 39 | 40 | Any other pods launched in any other namespace are, by default, deployed in 41 | unprivileged mode. 42 | 43 | [IMPORTANT] 44 | ==== 45 | You must configure RBAC rules and {psp} to provide proper functionality 46 | and security. 47 | ==== 48 | 49 | == Policy Definition 50 | 51 | The policy definitions are embedded in the link:https://github.com/SUSE/skuba/blob/master/pkg/skuba/actions/cluster/init/manifests.go[cluster bootstrap manifest (GitHub)]. 52 | 53 | During the bootstrap with `skuba`, the policy files will be stored on your 54 | workstation in the cluster definition folder under `addons/psp/base`. These policy files 55 | will be installed automatically for all cluster nodes. 56 | 57 | The file names of the files created are: 58 | 59 | * `podsecuritypolicy-unprivileged.yaml` 60 | + 61 | and 62 | * `podsecuritypolicy-privileged.yaml`. 63 | 64 | [#configure-psp] 65 | === Policy File Examples 66 | 67 | This is the unprivileged policy as a configuration file. You can use this 68 | as a basis to develop your own {psp} which should be saved as `custom-psp.yaml` 69 | `addons/psp/patches` directory. 70 | 71 | ---- 72 | apiVersion: policy/v1beta1 73 | kind: PodSecurityPolicy 74 | metadata: 75 | name: suse.caasp.psp.unprivileged 76 | annotations: 77 | apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default 78 | apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default 79 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: runtime/default 80 | seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default 81 | spec: 82 | # Privileged 83 | privileged: false 84 | # Volumes and File Systems 85 | volumes: 86 | # Kubernetes Pseudo Volume Types 87 | - configMap 88 | - secret 89 | - emptyDir 90 | - downwardAPI 91 | - projected 92 | - persistentVolumeClaim 93 | # Networked Storage 94 | - nfs 95 | - rbd 96 | - cephFS 97 | - glusterfs 98 | - fc 99 | - iscsi 100 | # Cloud Volumes 101 | - cinder 102 | - gcePersistentDisk 103 | - awsElasticBlockStore 104 | - azureDisk 105 | - azureFile 106 | - vsphereVolume 107 | allowedHostPaths: 108 | # Note: We don't allow hostPath volumes above, but set this to a path we 109 | # control anyway as a belt+braces protection. /dev/null may be a better 110 | # option, but the implications of pointing this towards a device are 111 | # unclear. 112 | - pathPrefix: /opt/kubernetes-hostpath-volumes 113 | readOnlyRootFilesystem: false 114 | # Users and groups 115 | runAsUser: 116 | rule: RunAsAny 117 | supplementalGroups: 118 | rule: RunAsAny 119 | fsGroup: 120 | rule: RunAsAny 121 | # Privilege Escalation 122 | allowPrivilegeEscalation: false 123 | defaultAllowPrivilegeEscalation: false 124 | # Capabilities 125 | allowedCapabilities: [] 126 | defaultAddCapabilities: [] 127 | requiredDropCapabilities: [] 128 | # Host namespaces 129 | hostPID: false 130 | hostIPC: false 131 | hostNetwork: false 132 | hostPorts: 133 | - min: 0 134 | max: 65535 135 | # SELinux 136 | seLinux: 137 | # SELinux is unused in CaaSP 138 | rule: 'RunAsAny' 139 | --- 140 | apiVersion: rbac.authorization.k8s.io/v1 141 | kind: ClusterRole 142 | metadata: 143 | name: suse:caasp:psp:unprivileged 144 | rules: 145 | - apiGroups: ['extensions'] 146 | resources: ['podsecuritypolicies'] 147 | verbs: ['use'] 148 | resourceNames: ['suse.caasp.psp.unprivileged'] 149 | --- 150 | # Allow all users and serviceaccounts to use the unprivileged 151 | # PodSecurityPolicy 152 | apiVersion: rbac.authorization.k8s.io/v1 153 | kind: ClusterRoleBinding 154 | metadata: 155 | name: suse:caasp:psp:default 156 | roleRef: 157 | kind: ClusterRole 158 | name: suse:caasp:psp:unprivileged 159 | apiGroup: rbac.authorization.k8s.io 160 | subjects: 161 | - kind: Group 162 | apiGroup: rbac.authorization.k8s.io 163 | name: system:serviceaccounts 164 | - kind: Group 165 | apiGroup: rbac.authorization.k8s.io 166 | name: system:authenticated 167 | ---- 168 | 169 | == Creating a PodSecurityPolicy 170 | 171 | In order to properly secure and run your {kube} workloads you must configure 172 | RBAC rules for your desired users create a {psp} adequate for your respective 173 | workloads and then link the user accounts to the {psp} using (Cluster)RoleBinding. 174 | 175 | {kubedoc}concepts/policy/pod-security-policy/ 176 | -------------------------------------------------------------------------------- /adoc/admin-security-rbac-user-access.adoc: -------------------------------------------------------------------------------- 1 | [#sec-admin-security-rbac-apply] 2 | == User Access 3 | 4 | * Using the web browser: 5 | . Go to the login page at `+https://:32001+`. 6 | . Click "Sign In". 7 | . Choose a login method. 8 | . Enter login credentials. 9 | . Download `kubeconfig` or self-configure `kubectl` with the provided setup instructions. 10 | 11 | * Using the CLI: 12 | . Run `skuba auth login` with Dex server URL `+https://:32000+`, 13 | login username and password. 14 | . The kubeconfig `kubeconf.txt` is generated at your current directory. 15 | 16 | == Access {kube} Resources 17 | 18 | The user can now access resources in the authorized ``. 19 | 20 | If the user has the proper permissions to access the resources, the output should look like this: 21 | 22 | ---- 23 | # kubectl -n get pod 24 | 25 | NAMESPACE NAME READY STATUS RESTARTS AGE 26 | kube-system dex-844dc9b8bb-w2zkm 1/1 Running 0 19d 27 | kube-system gangway-944dc9b8cb-w2zkm 1/1 Running 0 19d 28 | kube-system cilium-76glw 1/1 Running 0 27d 29 | kube-system cilium-fvgcv 1/1 Running 0 27d 30 | kube-system cilium-j5lpx 1/1 Running 0 27d 31 | kube-system cilium-operator-5d9cc4fbb7-g5plc 1/1 Running 0 34d 32 | kube-system cilium-vjf6p 1/1 Running 8 27d 33 | kube-system coredns-559fbd6bb4-2r982 1/1 Running 9 46d 34 | kube-system coredns-559fbd6bb4-89k2j 1/1 Running 9 46d 35 | kube-system etcd-my-master 1/1 Running 5 46d 36 | kube-system kube-apiserver- 1/1 Running 0 19d 37 | kube-system kube-controller-manager-my-master 1/1 Running 14 46d 38 | kube-system kube-proxy-62hls 1/1 Running 4 46d 39 | kube-system kube-proxy-fhswj 1/1 Running 0 46d 40 | kube-system kube-proxy-r4h42 1/1 Running 1 39d 41 | kube-system kube-proxy-xsdf4 1/1 Running 0 39d 42 | kube-system kube-scheduler-my-master 1/1 Running 13 46d 43 | ---- 44 | 45 | If the user does not have the right permissions to access a resource, 46 | they will receive a `Forbidden` message. 47 | 48 | ---- 49 | Error from server (Forbidden): pods is forbidden 50 | ---- 51 | 52 | == OIDC Tokens 53 | 54 | The kubeconfig file (`kubeconf.txt`) contains the OIDC tokens necessary to perform authentication and authorization in the cluster. 55 | OIDC tokens have an *expiration date* which means that they need to be refreshed after some time. 56 | 57 | [IMPORTANT] 58 | ==== 59 | If you use the same user in multiple `kubeconfig` files distributed among multiple machines, 60 | this can lead to issues. Due to the nature of access and refresh tokens (https://tools.ietf.org/html/rfc6749#page-10) only one of the machines will be fully able to refresh the token set at any given time. 61 | 62 | The user will be able to download multiple 'kubeconfig' files. However, the file with the same user is likely to be valid only for single access until expiration. 63 | 64 | Dex regards one session per user. The `id-token` and `refresh-token` are refreshed together. 65 | If a second user is trying to login to get a new `id-token`, Dex will invalidate the previous `id-token` and `refresh-token` for the first user. 66 | The first user is still able to continue using the old `id-token` until expiration. After expiration, the first user is not allowed to refresh the `id-token` due to the invalid `refresh-token`. 67 | Only the second user will have a valid `refresh-token` now. The first user will encounter an error like: `"msg="failed to rotate keys: keys already rotated by another server instance"`. 68 | 69 | If sharing the same `id-token` in many places, all of them can be used until expiration. 70 | The first user to refresh the `id-token` and `refresh token` will be able to continue accessing the cluster. 71 | All other users will encounter an error `Refresh token is invalid or has already been claimed by another client` because the `refresh-token` got updated by the first user. 72 | 73 | Please use separate users for each `kubeconfig` file to avoid this situation. 74 | Find out how to add more users in <>. 75 | You can also check information about the user and the respective OIDC tokens in the `kubeconfig` file under the `users` section: 76 | 77 | ---- 78 | users: 79 | - name: myuser 80 | user: 81 | auth-provider: 82 | config: 83 | client-id: oidc 84 | client-secret: 85 | id-token: 86 | idp-issuer-url: https://: 87 | refresh-token: 88 | name: oidc 89 | ---- 90 | ==== 91 | -------------------------------------------------------------------------------- /adoc/admin-storage-vsphere.adoc: -------------------------------------------------------------------------------- 1 | = vSphere Storage 2 | The vSphere cloud provider can be enabled with {productname} to allow Kubernetes pods to use VMWare vSphere Virtual Machine Disk (VMDK) volumes as persistent storage. 3 | 4 | This chapter provides the two types of persistent volume usage and description. 5 | 6 | Please refer to link:{docurl}html/caasp-deployment/bootstrap.html[Cluster Bootstrap] on how to setup vSphere cloud provider enabled cluster. 7 | 8 | == Node Meta 9 | 10 | Extra node meta could be found when region and zone was added to `vsphere.conf` before bootstrap cluster node. 11 | 12 | ==== 13 | [Labels] 14 | region = "" 15 | zone = "" 16 | ==== 17 | 18 | `Region` refers to the datacenter and zones refers to the cluster grouping of hosts within the datacenter. 19 | Adding region and zone makes {kube} persistent volume created with zone and region labels. 20 | With such an environment, {kube} pod scheduler is set to be locational aware for the persistent volume. 21 | For more information refer to: https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/zones.html. 22 | 23 | You can view the cloudprovider associated node meta with command. 24 | ---- 25 | kubectl get nodes -o jsonpath='{range .items[*]}{.metadata.name}{"\tregion: "}{.metadata.labels.failure-domain\.beta\.kubernetes\.io/region}{"\tzone: "}{.metadata.labels.failure-domain\.beta\.kubernetes\.io/zone}{"\n"}{end}' 26 | ... 27 | 010084072206 region: vcp-provo zone: vcp-cluster-jazz 28 | 010084073045 region: vcp-provo zone: vcp-cluster-jazz 29 | ---- 30 | 31 | == Static Persistent Volume 32 | 33 | . Create new VMDK volume in datastore. The VMDK volume used in persistent volume must exist before the resource is created. 34 | + 35 | You can use `govc` to automate the task. 36 | + 37 | For installation instructions, refer to: https://github.com/vmware/govmomi/tree/master/govc. 38 | + 39 | ---- 40 | govc datastore.disk.create -dc -ds -size 41 | ---- 42 | The datacenter name in vCenter where Kubernetes nodes reside. 43 | + 44 | The datastore in vCenter where volume should be created. 45 | + 46 | The volume size to create, for example 1G. 47 | + 48 | The VMDK volume name. for example my-disk.vmdk, or -folder/my-disk.vmdk. 49 | 50 | . Create persistent volume - sample-static-pv.yaml. 51 | + 52 | ---- 53 | kubectl create -f sample-static-pv.yaml 54 | ---- 55 | + 56 | ==== 57 | apiVersion: v1 58 | kind: PersistentVolume 59 | metadata: 60 | name: sample-static-pv // <1> 61 | spec: 62 | capacity: 63 | storage: 1Gi // <2> 64 | accessModes: 65 | - ReadWriteOnce 66 | persistentVolumeReclaimPolicy: Delete // <3> 67 | vsphereVolume: 68 | volumePath: "[datastore] volume/path" // <4> 69 | fsType: ext4 // <5> 70 | ==== 71 | <1> The name of persistent volume resource. 72 | <2> The disk size available. 73 | <3> The policy for how persistent volume should be handled when it is released. 74 | <4> The path to VMDK volume. This path must exist. 75 | <5> The file system type to mount. 76 | 77 | . Create persistent volume claim - sample-static-pvc.yaml. 78 | + 79 | ---- 80 | kubectl create -f sample-static-pvc.yaml 81 | ---- 82 | + 83 | ==== 84 | apiVersion: v1 85 | kind: PersistentVolumeClaim 86 | metadata: 87 | name: sample-static-pvc 88 | labels: 89 | app: sample 90 | spec: 91 | accessModes: 92 | - ReadWriteOnce 93 | resources: 94 | requests: 95 | storage: 1Gi // <1> 96 | ==== 97 | <1> The required volume size. 98 | 99 | . Create deployement - sample-static-deployment.yaml. 100 | + 101 | ---- 102 | kubectl create -f sample-static-deployment.yaml 103 | ---- 104 | + 105 | ==== 106 | apiVersion: apps/v1 107 | kind: Deployment 108 | metadata: 109 | name: sample-static-deployment 110 | labels: 111 | app: sample 112 | tier: sample 113 | spec: 114 | selector: 115 | matchLabels: 116 | app: sample 117 | tier: sample 118 | strategy: 119 | type: Recreate 120 | template: 121 | metadata: 122 | labels: 123 | app: sample 124 | tier: sample 125 | spec: 126 | containers: 127 | - image: busybox 128 | name: sample 129 | volumeMounts: 130 | - name: sample-volume 131 | mountPath: /data // <1> 132 | command: [ "sleep", "infinity" ] 133 | volumes: 134 | - name: sample-volume 135 | persistentVolumeClaim: 136 | claimName: sample-static-pvc // <2> 137 | ==== 138 | + 139 | <1> The volume mount path in deployed pod. 140 | <2> The requested persistent volume claim name. 141 | 142 | . Check persistent volume claim is bonded and pod is running. 143 | + 144 | ---- 145 | kubectl get pvc 146 | ... 147 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 148 | sample-static-pvc Bound sample-static-pv 1Gi RWO 55s 149 | 150 | kubectl get pod 151 | ... 152 | NAME READY STATUS RESTARTS AGE 153 | sample-static-deployment-549dc77d76-pwdqw 1/1 Running 0 3m42s 154 | ---- 155 | 156 | == Dynamic Persistent Volume 157 | 158 | . Create storage class - sample-sc.yaml. 159 | + 160 | ---- 161 | kubectl create -f sample-sc.yaml 162 | ---- 163 | + 164 | ==== 165 | kind: StorageClass 166 | apiVersion: storage.k8s.io/v1 167 | metadata: 168 | name: sample-sc 169 | annotations: 170 | storageclass.kubernetes.io/is-default-class: "true" // <1> 171 | provisioner: kubernetes.io/vsphere-volume 172 | parameters: 173 | datastore: "datastore" // <2> 174 | ==== 175 | <1> Set as the default storage class. 176 | <2> The datastore name in vCenter where volume should be created. 177 | 178 | . Create persistent volume claim - sample-dynamic-pvc.yaml. 179 | + 180 | ---- 181 | kubectl create -f sample-dynamic-pvc.yaml 182 | ---- 183 | + 184 | ==== 185 | apiVersion: v1 186 | kind: PersistentVolumeClaim 187 | metadata: 188 | name: sample-dynamic-pvc 189 | annotations: 190 | volume.beta.kubernetes.io/storage-class: sample-sc // <1> 191 | labels: 192 | app: sample 193 | spec: 194 | accessModes: 195 | - ReadWriteOnce 196 | resources: 197 | requests: 198 | storage: 1Gi // <2> 199 | ==== 200 | <1> Annotate with storage class name to use the storage class created. 201 | <2> The required volume size. 202 | 203 | . Create deployment - sample-deployment.yaml 204 | + 205 | ---- 206 | kubectl create -f sample-deployment.yaml 207 | ---- 208 | + 209 | ==== 210 | apiVersion: apps/v1 211 | kind: Deployment 212 | metadata: 213 | name: sample-dynamic-deployment 214 | labels: 215 | app: sample 216 | tier: sample 217 | spec: 218 | selector: 219 | matchLabels: 220 | app: sample 221 | tier: sample 222 | strategy: 223 | type: Recreate 224 | template: 225 | metadata: 226 | labels: 227 | app: sample 228 | tier: sample 229 | spec: 230 | containers: 231 | - image: busybox 232 | name: sample 233 | volumeMounts: 234 | - name: sample-volume 235 | mountPath: /data // <1> 236 | command: [ "sleep", "infinity" ] 237 | volumes: 238 | - name: sample-volume 239 | persistentVolumeClaim: 240 | claimName: sample-dynamic-pvc // <2> 241 | ==== 242 | <1> The volume mount path in deployed pod. 243 | <2> The requested persistent volume claim name. 244 | 245 | . Check persistent volume claim is bonded and pod is running. 246 | + 247 | ---- 248 | kubectl get pvc 249 | ... 250 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 251 | sample-dynamic-pvc Bound pvc-0ca694b5-0084-4e36-bef1-5b2354158d79 1Gi RWO sample-sc 70s 252 | 253 | kubectl get pod 254 | ... 255 | NAME READY STATUS RESTARTS AGE 256 | sample-dynamic-deployment-687765d5b5-67vnh 1/1 Running 0 20s 257 | ---- 258 | -------------------------------------------------------------------------------- /adoc/admin-troubleshooting-etcd.adoc: -------------------------------------------------------------------------------- 1 | [#troubleshooting-etcd] 2 | = ETCD Troubleshooting 3 | 4 | == Introduction 5 | 6 | This document aims to describe debugging an etcd cluster. 7 | 8 | The required etcd logs are part of the `supportconfig`, a utility that collects all the required information for debugging a problem. The rest of the document provides information on how you can obtain these information manually. 9 | 10 | == ETCD container 11 | 12 | ETCD is a distributed reliable key-value store for the most critical data of a distributed system. It is running **only on the master** nodes in a form a container application. For instance, in a cluster with 3 master nodes, it is expected 13 | to have 3 etcd instances as well: 14 | 15 | [source,bash] 16 | ---- 17 | kubectl get pods -n kube-system -l component=etcd 18 | NAME READY STATUS RESTARTS AGE 19 | etcd-vm072044.qa.prv.suse.net 1/1 Running 1 7d 20 | etcd-vm072050.qa.prv.suse.net 1/1 Running 1 7d 21 | etcd-vm073033.qa.prv.suse.net 1/1 Running 1 7d 22 | ---- 23 | 24 | The specific configuration which `etcd` is using to start, is the following: 25 | 26 | [source,bash] 27 | ---- 28 | etcd \ 29 | --advertise-client-urls=https://:2379 \ 30 | --cert-file=/etc/kubernetes/pki/etcd/server.crt \ 31 | --client-cert-auth=true --data-dir=/var/lib/etcd \ 32 | --initial-advertise-peer-urls=https://:2380 \ 33 | --initial-cluster=vm072050.qa.prv.suse.net=https://:2380 \ 34 | --key-file=/etc/kubernetes/pki/etcd/server.key \ 35 | --listen-client-urls=https://127.0.0.1:2379,https://:2379 \ 36 | --listen-peer-urls=https://:2380 \ 37 | --name=vm072050.qa.prv.suse.net \ 38 | --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt \ 39 | --peer-client-cert-auth=true \ 40 | --peer-key-file=/etc/kubernetes/pki/etcd/peer.key \ 41 | --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt \ 42 | --snapshot-count=10000 --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt 43 | ---- 44 | 45 | [NOTE] 46 | ==== 47 | For more information related to ETCD, we **highly** recommend you to read https://etcd.io/docs/v3.4.0/faq/[ETCD FAQ] page. 48 | ==== 49 | 50 | == logging 51 | 52 | Since `etcd` is running in a container, that means it is not controlled by `systemd`, thus any commands related to that (e.g. `journalctl`) will fail, therefore you need to use container debugging approach instead. 53 | 54 | [NOTE] 55 | ==== 56 | To use the following commands, you need to connect (e.g. via SSH) to the master node where the etcd pod is running. 57 | ==== 58 | 59 | To see the `etcd` logs, connect to a {kube} master node and then run as root: 60 | [source,bash] 61 | ---- 62 | ssh sles@ 63 | sudo bash # connect as root 64 | etcdcontainer=$(crictl ps --label io.kubernetes.container.name=etcd --quiet) 65 | crictl logs -f $etcdcontainer 66 | ---- 67 | 68 | == etcdctl 69 | 70 | `etcdctl` is a command line client for `etcd`. The new version of {productname} is using the `v3` API. For that, you need to make sure to set environment variable `ETCDCTL_API=3` before using it. Apart from that, you need to provide the required keys and certificates for authentication and authorization, via `ETCDCTL_CACERT`, `ETCDCTL_CERT` and `ETCDCTL_KEY` environment variables. Last but not least, you need to also specify the endpoint via `ETCDCTL_ENDPOINTS` environment variable. 71 | 72 | 73 | * **Example** 74 | + 75 | To find out if your network and disk latency are fast enough, you can benchmark your node using the `etcdctl check perf` command. To do this, frist connect to a {kube} master node: 76 | + 77 | [source,bash] 78 | ---- 79 | ssh sles@ 80 | sudo bash # login as root 81 | ---- 82 | + 83 | and then run as root: 84 | + 85 | [source,bash] 86 | ---- 87 | etcdcontainer=$(crictl ps --label io.kubernetes.container.name=etcd --quiet) 88 | crictl exec $etcdcontainer sh -c \ 89 | "ETCDCTL_ENDPOINTS='https://127.0.0.1:2379' \ 90 | ETCDCTL_CACERT='/etc/kubernetes/pki/etcd/ca.crt' \ 91 | ETCDCTL_CERT='/etc/kubernetes/pki/etcd/server.crt' \ 92 | ETCDCTL_KEY='/etc/kubernetes/pki/etcd/server.key' \ 93 | ETCDCTL_API=3 \ 94 | etcdctl check perf" 95 | ---- 96 | 97 | == curl as an alternative 98 | 99 | For most of the `etcdctl` commands, there is an alternative way to fetch the same information via `curl`. First you need to connect to the master node and then issue a `curl` command against the ETCD endpoint. Here's an example of the information which `supportconfig` is collecting: 100 | 101 | * Health check: 102 | [source,bash] 103 | ---- 104 | sudo curl -Ls --cacert /etc/kubernetes/pki/etcd/ca.crt \ 105 | --key /etc/kubernetes/pki/etcd/server.key \ 106 | --cert /etc/kubernetes/pki/etcd/server.crt https://localhost:2379/health 107 | ---- 108 | 109 | * Member list 110 | [source,bash] 111 | ---- 112 | sudo curl -Ls --cacert /etc/kubernetes/pki/etcd/ca.crt \ 113 | --key /etc/kubernetes/pki/etcd/server.key \ 114 | --cert /etc/kubernetes/pki/etcd/server.crt https://localhost:2379/v2/members 115 | ---- 116 | 117 | * Leader information 118 | [source,bash] 119 | ---- 120 | # available only from the master node where ETCD **leader** runs 121 | sudo curl -Ls --cacert /etc/kubernetes/pki/etcd/ca.crt \ 122 | --key /etc/kubernetes/pki/etcd/server.key \ 123 | --cert /etc/kubernetes/pki/etcd/server.crt https://localhost:2379/v2/stats/leader 124 | ---- 125 | 126 | * Current member information 127 | [source,bash] 128 | ---- 129 | sudo curl -Ls --cacert /etc/kubernetes/pki/etcd/ca.crt \ 130 | --key /etc/kubernetes/pki/etcd/server.key \ 131 | --cert /etc/kubernetes/pki/etcd/server.crt https://localhost:2379/v2/stats/self 132 | ---- 133 | 134 | * Statistics 135 | [source,bash] 136 | ---- 137 | sudo curl -Ls --cacert /etc/kubernetes/pki/etcd/ca.crt \ 138 | --key /etc/kubernetes/pki/etcd/server.key \ 139 | --cert /etc/kubernetes/pki/etcd/server.crt https://localhost:2379/v2/stats/store 140 | ---- 141 | 142 | * Metrics 143 | [source,bash] 144 | ---- 145 | sudo curl -Ls --cacert /etc/kubernetes/pki/etcd/ca.crt \ 146 | --key /etc/kubernetes/pki/etcd/server.key \ 147 | --cert /etc/kubernetes/pki/etcd/server.crt https://localhost:2379/metrics 148 | ---- 149 | -------------------------------------------------------------------------------- /adoc/admin-velero-backup.adoc: -------------------------------------------------------------------------------- 1 | == Backup 2 | 3 | * Annotate Persistent Volume (_optional_) 4 | + 5 | If the persistent volume in the supported volume `snapshotter` provider, skip this procedure. 6 | + 7 | However, if we deploy the `restic` DaemonSet and want to backup the persistent volume by `restic`, we have to add annotation `backup.velero.io/backup-volumes=,,...` to the pods which have mounted the volume manually. 8 | + 9 | For example, we deploy an Elasticsearch cluster and want to backup the Elasticsearch cluster's data. Add the annotation to the Elasticsearch cluster pods: 10 | + 11 | [source,bash] 12 | ---- 13 | kubectl annotate pod/elasticsearch-master-0 backup.velero.io/backup-volumes=elasticsearch-master 14 | kubectl annotate pod/elasticsearch-master-1 backup.velero.io/backup-volumes=elasticsearch-master 15 | kubectl annotate pod/elasticsearch-master-2 backup.velero.io/backup-volumes=elasticsearch-master 16 | ---- 17 | + 18 | [NOTE] 19 | Velero currently does not provide a mechanism to detect persistent volume claims that are missing the `restic` backup annotation. 20 | To solve this, there is a community provided controller link:https://github.com/bitsbeats/velero-pvc-watcher[velero-pvc-watcher] which integrates Prometheus to generate alerts for volumes that are not in the backup or backup-exclusion annotation. 21 | 22 | * Manual Backup 23 | + 24 | [source,bash] 25 | ---- 26 | velero backup create 27 | ---- 28 | 29 | * Scheduled Backup 30 | + 31 | The schedule template in cron notation, using UTC time. The schedule can also be expressed using `@every ` syntax. 32 | The duration can be specified using a combination of seconds (s), minutes (m), and hours (h), for example: `@every 2h30m`. 33 | + 34 | [source,bash] 35 | ---- 36 | # Create schedule template 37 | # Create a backup every 6 hours 38 | velero schedule create --schedule="0 */6 * * *" 39 | 40 | # Create a backup every 6 hours with the @every notation 41 | velero schedule create --schedule="@every 6h" 42 | 43 | # Create a daily backup of the web namespace 44 | velero schedule create --schedule="@every 24h" --include-namespaces web 45 | 46 | # Create a weekly backup, each living for 90 days (2160 hours) 47 | velero schedule create --schedule="@every 168h" --ttl 2160h0m0s 48 | ---- 49 | + 50 | [options="header"] 51 | |=== 52 | | Character Position | Character Period | Acceptable Values 53 | |1 |Minute |`0-59,*` 54 | |2 |Hour |`0-23,*` 55 | |3 |Day of Month |`1-31,*` 56 | |4 |Month |`1-12,*` 57 | |5 |Day of Week |`0-7,*` 58 | |=== 59 | + 60 | [NOTE] 61 | When creating multiple backups to different backup locations closely, you might hit the object storage server API rate limit issues. Now, the velero does not have a mechanism on retry backups when the rate limit occurred. Consider shifting the time to create multiple backups. 62 | 63 | * Optional Flags 64 | 65 | ** Granularity 66 | + 67 | Without passing extra flags to `velero backup create`, Velero will backup the whole {kube} cluster. 68 | 69 | *** Namespace 70 | + 71 | Pass flag `--include-namespaces` or `--exclude-namespaces` to specify which namespaces to include/exclude when backing up. 72 | + 73 | For example: 74 | + 75 | [source,bash] 76 | ---- 77 | # Create a backup including the nginx and default namespaces 78 | velero backup create backup-1 --include-namespaces nginx,default 79 | 80 | # Create a backup excluding the kube-system and default namespaces 81 | velero backup create backup-1 --exclude-namespaces kube-system,default 82 | ---- 83 | 84 | *** Resources 85 | + 86 | Pass flag `--include-resources` or `--exclude-resources` to specifies which resources to include/exclude when backing up. 87 | + 88 | For example: 89 | + 90 | [source,bash] 91 | ---- 92 | # Create a backup including storageclass resource only 93 | velero backup create backup-1 --include-resources storageclasses 94 | ---- 95 | + 96 | [TIP] 97 | Use `kubectl api-resources` to lists all API resources on the server. 98 | 99 | *** Label Selector 100 | + 101 | Pass `--selector` to only back up resources matching the label selector. 102 | + 103 | [source,bash] 104 | ---- 105 | # Create a backup for the elasticsearch cluster only 106 | velero backup create backup-1 --selector app=elasticsearch-master 107 | ---- 108 | 109 | ** Location 110 | + 111 | Pass `--storage-location` to specify where to store the backup. 112 | For example, if we have an HA object storage server called default and secondary respectively. 113 | + 114 | [source,bash] 115 | ---- 116 | # Create a backup to the default storage server 117 | velero backup create backup2default --storage-location default 118 | 119 | # Create a backup to the secondary storage server 120 | velero backup create backup2secondary --storage-location secondary 121 | ---- 122 | 123 | ** Garbage Collection 124 | + 125 | Pass `--ttl` to specify how long the backup should be kept. After the specified time the backup will be deleted. 126 | The default time for a backup before deletion is 720 hours (30 days). 127 | 128 | ** Exclude Specific Items from Backup 129 | + 130 | You can exclude individual items from being backed up, even if they match the resource/namespace/label selectors defined in the backup spec. To do this, label the item as follows: 131 | + 132 | [source,bash] 133 | ---- 134 | kubectl label -n / velero.io/exclude-from-backup=true 135 | ---- 136 | 137 | === Backup Troubleshooting 138 | 139 | * List Backups 140 | + 141 | [source,bash] 142 | ---- 143 | velero backup get 144 | ---- 145 | 146 | * Describe Backups 147 | + 148 | [source,bash] 149 | ---- 150 | velero backup describe 151 | ---- 152 | 153 | * Retrieve Backup Logs 154 | + 155 | [source,bash] 156 | ---- 157 | velero backup logs 158 | ---- 159 | -------------------------------------------------------------------------------- /adoc/admin-velero-disaster-recovery.adoc: -------------------------------------------------------------------------------- 1 | :toc: 2 | :toclevels: 5 3 | include::entities.adoc[] 4 | 5 | [#backup-and-restore-with-velero] 6 | = Backup and Restore with Velero 7 | 8 | link:https://velero.io/[Velero] is a solution for supporting {kube} cluster disaster recovery, 9 | data migration, and data protection by backing up {kube} cluster resources and persistent volumes to externally supported storage backend on-demand or by schedule. 10 | 11 | The major functions include: 12 | 13 | * Backup {kube} resources and persistent volumes for supported storage providers. 14 | * Restore {kube} resources and persistent volumes for supported storage providers. 15 | * When backing up persistent volumes w/o supported storage provider, Velero leverages link:https://github.com/restic/restic[restic] as an agnostic solution to back up this sort of persistent volumes under some known limitations. 16 | 17 | User can leverage these fundamental functions to achieve user stories: 18 | 19 | * Backup whole {kube} cluster resources then restore if any {kube} resources loss. 20 | * Backup selected {kube} resources then restore if the selected {kube} resources loss. 21 | * Backup selected {kube} resources and persistent volumes then restore if the {kube} selected {kube} resources loss or data loss. 22 | * Replicate or migrate a cluster for any purpose, for example replicating a production cluster to a development cluster for testing. 23 | 24 | Velero consists of below components: 25 | 26 | * A Velero server that runs on your {kube} cluster. 27 | * A `restic` deployed on each worker nodes that run on your {kube} cluster (optional). 28 | * A command-line client that runs locally. 29 | 30 | == Limitations 31 | 32 | . Velero doesn't overwrite objects in-cluster if they already exist. 33 | . Velero supports a single set of credentials _per provider_. 34 | It's not yet possible to use different credentials for different object storage locations for the same provider. 35 | . Volume snapshots are limited by where your provider allows you to create snapshots. 36 | For example, AWS and Azure do not allow you to create a volume snapshot in a different region than where the volume is located. 37 | If you try to take a Velero backup using a volume snapshot location with a different region than where your cluster's volume is, the backup will fail. 38 | . It is not yet possible to send a single Velero backup to multiple backup storage locations simultaneously, or a single volume snapshot to multiple locations simultaneously. 39 | However, you can set up multiple backups manually or scheduled that differ only in the storage locations. 40 | . Cross-provider snapshots are not supported. If you have a cluster with more than one type of volume (e.g. NFS and Ceph), but you only have a volume snapshot location configured for NFS, then Velero will _only_ snapshot the NFS volumes. 41 | . `Restic` data is stored under a prefix/subdirectory of the main Velero bucket and will go into the bucket corresponding backup storage location selected by the user at backup creation time. 42 | . When performing cluster migration, the new cluster number of nodes should be equal or greater than the original cluster. 43 | 44 | For more information about storage and snapshot locations, refer to link:https://velero.io/docs/v1.4/locations/[Velero: Backup Storage Locations and Volume Snapshot Locations] 45 | 46 | include::admin-velero-prereqs.adoc[] 47 | 48 | include::admin-velero-deployment.adoc[] 49 | 50 | == Operations 51 | 52 | include::admin-velero-backup.adoc[] 53 | 54 | include::admin-velero-restore.adoc[] 55 | 56 | include::admin-velero-usecase.adoc[] 57 | 58 | == Uninstall 59 | Remove the Velero server deployment and `restic` DaemonSet if it exist. 60 | Then, delete Velero custom resource definitions (CRDs). 61 | 62 | [source,bash] 63 | ---- 64 | helm uninstall velero -n 65 | kubectl delete crds -l app.kubernetes.io/name=velero 66 | ---- 67 | -------------------------------------------------------------------------------- /adoc/admin-velero-restore.adoc: -------------------------------------------------------------------------------- 1 | == Restore 2 | 3 | * Manual Restore 4 | + 5 | [source,bash] 6 | ---- 7 | velero restore create --from-backup 8 | ---- 9 | + 10 | For example: 11 | + 12 | [source,bash] 13 | ---- 14 | # Create a restore named "restore-1" from backup "backup-1" 15 | velero restore create restore-1 --from-backup backup-1 16 | 17 | # Create a restore with a default name ("backup-1-") from backup "backup-1" 18 | velero restore create --from-backup backup-1 19 | ---- 20 | 21 | * Scheduled Backup 22 | + 23 | [source,bash] 24 | ---- 25 | velero restore create --from-schedule 26 | ---- 27 | + 28 | For example: 29 | + 30 | [source,bash] 31 | ---- 32 | # Create a restore from the latest successful backup triggered by schedule "schedule-1" 33 | velero restore create --from-schedule schedule-1 34 | 35 | # Create a restore from the latest successful OR partially-failed backup triggered by schedule "schedule-1" 36 | velero restore create --from-schedule schedule-1 --allow-partially-failed 37 | ---- 38 | 39 | * Optional Flags 40 | 41 | ** Granularity 42 | + 43 | Without passing extra flags to `velero restore create`, Velero will restore whole resources from the backup or the schedule. 44 | 45 | *** Namespace 46 | + 47 | Pass flag `--include-namespaces` or `--exclude-namespaces` to `velero restore create` to specifies which namespaces to include/exclude when restoring. 48 | + 49 | For example: 50 | + 51 | [source,bash] 52 | ---- 53 | # Create a restore including the nginx and default namespaces 54 | velero restore create --from-backup backup-1 --include-namespaces nginx,default 55 | 56 | # Create a restore excluding the kube-system and default namespaces 57 | velero restore create --from-backup backup-1 --exclude-namespaces kube-system,default 58 | ---- 59 | 60 | *** Resources 61 | + 62 | Pass flag `--include-resources` or `--exclude-resources` to `velero restore create` to specifies which resources to include/exclude when restoring. 63 | + 64 | For example: 65 | + 66 | [source,bash] 67 | ---- 68 | # create a restore for only persistentvolumeclaims and persistentvolumes within a backup 69 | velero restore create --from-backup backup-1 --include-resources persistentvolumeclaims,persistentvolumes 70 | ---- 71 | + 72 | [TIP] 73 | Use `kubectl api-resources` to lists all API resources on the server. 74 | 75 | *** Label Selector 76 | + 77 | Pass `--selector` to only restore the resources matching the label selector. 78 | + 79 | For example: 80 | + 81 | [source,bash] 82 | ---- 83 | # create a restore for only the elasticsearch cluster within a backup 84 | velero restore create --from-backup backup-1 --selector app=elasticsearch-master 85 | ---- 86 | 87 | === Restore Troubleshooting 88 | 89 | * Retrieve restores 90 | + 91 | [source,bash] 92 | ---- 93 | velero restore get 94 | ---- 95 | 96 | * Describe restores 97 | + 98 | [source,bash] 99 | ---- 100 | velero restore describe 101 | ---- 102 | 103 | * Retrieve restore logs 104 | + 105 | [source,bash] 106 | ---- 107 | velero restore logs 108 | ---- 109 | 110 | [NOTE] 111 | For troubleshooting velero restore, refer to link:https://velero.io/docs/v1.4/debugging-restores/[Velero: Debugging Restores] 112 | -------------------------------------------------------------------------------- /adoc/admin-velero-usecase.adoc: -------------------------------------------------------------------------------- 1 | == Use Cases 2 | 3 | === Disaster Recovery 4 | 5 | Use the scheduled backup function for periodical backups. When the {kube} cluster runs into an unexpected state, recover from the most recent scheduled backup. 6 | 7 | * Backup 8 | + 9 | Run the scheduled backup, this creates a backup file with the name `-`. 10 | + 11 | [source,bash] 12 | ---- 13 | velero schedule create --schedule="@daily" 14 | ---- 15 | 16 | * Restore 17 | + 18 | When a disaster happens, make sure the Velero server and `restic` DaemonSet exists (_optional_). If not, reinstall from the helm chart. 19 | 20 | . Update the backup storage location to _read-only_ mode (it prevents the backup file from being created or deleted in the backup storage location during the restore process): 21 | + 22 | [source,bash] 23 | ---- 24 | kubectl patch backupstoragelocation \ 25 | --namespace \ 26 | --type merge \ 27 | --patch '{"spec":{"accessMode":"ReadOnly"}}' 28 | ---- 29 | 30 | . Create a restore from the most recent backup file: 31 | + 32 | [source,bash] 33 | ---- 34 | velero restore create --from-backup - 35 | ---- 36 | 37 | . After restoring finished, change the backup storage location back to read-write mode: 38 | + 39 | [source,bash] 40 | ---- 41 | kubectl patch backupstoragelocation \ 42 | --namespace \ 43 | --type merge \ 44 | --patch '{"spec":{"accessMode":"ReadWrite"}}' 45 | ---- 46 | 47 | === Cluster Migration 48 | 49 | Migrate the {kube} cluster from `cluster 1` to `cluster 2`, as long as you point different cluster's Velero instances to the same external object storage location. 50 | 51 | [NOTE] 52 | ==== 53 | Velero does not support the migration of persistent volumes across public cloud providers. 54 | ==== 55 | 56 | . (At cluster 1) Backup the entire {kube} cluster manually: 57 | + 58 | [source,bash] 59 | ---- 60 | velero backup create 61 | ---- 62 | 63 | . (At cluster 2) Prepare a {kube} cluster deployed by skuba: 64 | 65 | . (At cluster 2) Helm install Velero and make sure the backup-location and snapshot-location point to the same location as cluster 1: 66 | + 67 | [source,bash] 68 | ---- 69 | velero backup-location get 70 | velero snapshot-location get 71 | ---- 72 | + 73 | [NOTE] 74 | The default sync interval is 1 minute. You could change the interval with the flag `--backup-sync-period` when creating a backup location. 75 | 76 | . (At cluster 2) Make sure the cluster 1 backup resources are sync to the external object storage server: 77 | + 78 | [source,bash] 79 | ---- 80 | velero backup get 81 | velero backup describe 82 | ---- 83 | 84 | . (At cluster 2) Restore the cluster from the backup file: 85 | + 86 | [source,bash] 87 | ---- 88 | velero restore create --from-backup 89 | ---- 90 | 91 | . (At cluster 2) Verify the cluster is behaving correctly: 92 | + 93 | [source,bash] 94 | ---- 95 | velero restore get 96 | velero restore describe 97 | velero restore logs 98 | ---- 99 | 100 | . (At cluster 2) Since Velero doesn't overwrite objects in-cluster if they already exist, a manual check of all addon configurations is desired after the cluster is restored: 101 | 102 | .. Check dex configuration: 103 | + 104 | [source,bash] 105 | ---- 106 | # Download dex.yaml 107 | kubectl -n kube-system get configmap oidc-dex-config -o yaml > oidc-dex-config.yaml 108 | 109 | # Edit oidc-dex-config.yaml to desired 110 | vim oidc-dex-config.yaml 111 | 112 | # Apply new oidc-dex-config.yaml 113 | kubectl apply -f oidc-dex-config.yaml --force 114 | 115 | # Restart oidc-dex deployment 116 | kubectl rollout restart deployment/oidc-dex -n kube-system 117 | ---- 118 | 119 | .. Check gangway configuration: 120 | + 121 | [source,bash] 122 | ---- 123 | # Download gangway.yaml 124 | kubectl -n kube-system get configmap oidc-gangway-config -o yaml > oidc-gangway-config.yaml 125 | 126 | # Edit oidc-gangway-config.yaml to desired 127 | vim oidc-gangway-config.yaml 128 | 129 | # Apply new oidc-gangway-config.yaml 130 | kubectl apply -f oidc-gangway-config.yaml --force 131 | 132 | # Restart oidc-gangway deployment 133 | kubectl rollout restart deployment/oidc-gangway -n kube-system 134 | ---- 135 | 136 | .. Check kured is disabled automatically reboots 137 | + 138 | [source,bash] 139 | ---- 140 | kubectl get daemonset kured -o yaml 141 | ---- 142 | 143 | .. Check that psp is what you wish it to be: 144 | + 145 | [source,bash] 146 | ---- 147 | kubectl get psp suse.caasp.psp.privileged -o yaml 148 | kubectl get clusterrole suse:caasp:psp:privileged -o yaml 149 | kubectl get rolebinding suse:caasp:psp:privileged -o yaml 150 | 151 | kubectl get psp suse.caasp.psp.unprivileged -o yaml 152 | kubectl get clusterrole suse:caasp:psp:unprivileged -o yaml 153 | kubectl get clusterrolebinding suse:caasp:psp:default -o yaml 154 | ---- 155 | -------------------------------------------------------------------------------- /adoc/architecture-updates.adoc: -------------------------------------------------------------------------------- 1 | === Upgrades of OS components (automated) 2 | 3 | By default {productname} clusters automatically apply all the patches that are marked as non 4 | interactive. These patches are safe to be applied since they should not cause any side effect. 5 | 6 | However, some patches need the nodes to be rebooted in order to be activated. This is the 7 | case for example of kernel updates or some package updates (like glibc). 8 | 9 | The nodes of the cluster have some metadata that is kept up-to-date by {productname}. 10 | This metadata can be used by the cluster administrator to answer these questions: 11 | 12 | * Does the node need to be rebooted to make some updates active? 13 | * Does the node have non interactive updates pending? 14 | * When was the check done last time? 15 | 16 | Cluster administrators can get a quick overview of each cluster node by using one of the 17 | following methods. 18 | 19 | UI mode: 20 | 21 | * Open a standard kubernetes UI (eg: ​kubernetes dashboard​). 22 | * Click on the node. 23 | * Look at the annotations associated to the node. 24 | 25 | Text mode: 26 | 27 | * Go to a machine with a working kubectl (meaning the user can connect to the cluster). 28 | * Ensure you've the caasp kubectl plugin installed. This is a simple statically linked binary 29 | that SUSE distributes alongside `skuba`. 30 | * Execute the `kubectl caasp cluster status` command (or `skuba cluster status`). 31 | 32 | The output of the command will look like that: 33 | 34 | [source,bash] 35 | ---- 36 | NAME OS-IMAGE KERNEL-VERSION KUBELET-VERSION CONTAINER-RUNTIME HAS-UPDATES HAS-DISRUPTIVE-UPDATES CAASP-RELEASE-VERSION 37 | master0 SUSE Linux Enterprise Server 15 SP2 4.12.14-197.29-default v1.18.6 cri-o://1.18.2 no no 4.5.0 38 | master1 SUSE Linux Enterprise Server 15 SP2 4.12.14-197.29-default v1.18.6 cri-o://1.18.2 no no 4.5.0 39 | master2 SUSE Linux Enterprise Server 15 SP2 4.12.14-197.29-default v1.18.6 cri-o://1.18.2 no no 4.5.0 40 | worker0 SUSE Linux Enterprise Server 15 SP2 4.12.14-197.29-default v1.18.6 cri-o://1.18.2 no no 4.5.0 41 | worker1 SUSE Linux Enterprise Server 15 SP2 4.12.14-197.29-default v1.18.6 cri-o://1.18.2 no no 4.5.0 42 | worker2 SUSE Linux Enterprise Server 15 SP2 4.12.14-197.29-default v1.18.6 cri-o://1.18.2 no no 4.5.0 43 | ---- 44 | 45 | ==== Node reboots 46 | 47 | Some updates require the node to be rebooted to make them active. 48 | 49 | The {productname} cluster is configured by default to take advantage of ​kured​. This service looks for 50 | nodes that have to be rebooted and, before doing the actual reboot, takes care of draining the 51 | node. 52 | 53 | Kured reboots one node per time. This ensures the rest of worker nodes won't be saturated when 54 | adopting the being rebooted worker node workloads, as well as ensuring that etcd will always be 55 | healthy in the case of control plane nodes. 56 | 57 | Cluster administrators can integrate kured stats into a prometheus instance and create alerts, 58 | charts and other personal customizations. 59 | 60 | Cluster administrators can also fine tune the kured deployment to prevent its agent from 61 | rebooting machines where special workloads are running. For example: it's possible to prevent kured 62 | from rebooting nodes running computational workloads until their pods are done. To achieve 63 | that it's necessary to instruct cluster users to add special labels to the pods they don't want to see 64 | interrupted due to the node being rebooted. 65 | 66 | ==== Interactive upgrades 67 | 68 | Some updates might cause damages to the running workloads. These interactive updates are 69 | currently being referenced by this document as "disruptive upgrades". 70 | 71 | Cluster administrators don't have to worry about disruptive upgrades. {productname} will 72 | automatically apply them making sure no disruption is caused to the cluster. 73 | That happens because nodes with disruptive upgrades are updated one at a time, similar to 74 | when nodes are automatically rebooted. Moreover, {productname} will take care of draining and 75 | cordoning the node before these updates are applied, and uncordoning it afterwards. 76 | 77 | Disruptive upgrades could take some time to be automatically applied due to their sensitive 78 | nature (nodes are updated one by one). Cluster operators can always see the status of all 79 | nodes by looking at the annotations of the kubernetes nodes (see previous section). 80 | 81 | By looking at node annotations a cluster administrator can answer the following questions: 82 | 83 | * Does the node have pending disruptive upgrades? 84 | * Are the disruptive upgrades being applied? 85 | 86 | === Upgrades of OS components (not automated) 87 | 88 | It's possible to disable the automatic patch apply completely, if the user wants to inspect every 89 | patch that will be applied to the cluster, so they are in complete control of when the cluster is 90 | patched. 91 | By default {productname} clusters have some updates applied automatically. Nodes can also be 92 | rebooted in an automatic fashion under some circumstances. 93 | To prevent that from happening it's possible to annotate nodes that are not desired to be automatically 94 | rebooted. Any user with rights to annotate nodes will be able to configure this behavior. 95 | Cluster administrators can use software like SUSE Manager to check the patching level of 96 | the underlying operating system of any node. 97 | When rebooting nodes, it's important to take some considerations into account: 98 | 99 | * Ensure nodes are drained (and thus, cordoned) before they are rebooted, uncordon the nodes once they 100 | are back 101 | * Reboot master/etcd nodes one by one. Wait for the rebooted node to come back, make 102 | sure etcd is in an healthy state before moving to the next etcd node (this can be done 103 | using etcdctl) 104 | * Do not reboot too many worker nodes at the same time to avoid the remaining ones to 105 | be swamped by workloads 106 | 107 | Cluster administrators have to follow the very same steps whenever a node has an 108 | interactive (aka disruptive) upgrade pending. 109 | 110 | === Upgrades of the Kubernetes platform 111 | 112 | The cluster administrator can check whether there's a new Kubernetes version available 113 | distributed by SUSE, and in case there is, they can upgrade the cluster in a controlled way. 114 | 115 | * In order to find out if there’s a new Kubernetes version available, the following command 116 | will be executed in a machine that has access to the cluster definition folder: 117 | ** `skuba cluster upgrade plan` 118 | * If there's a new version available, it will be reported in the terminal. 119 | * In order to start the upgrade of the cluster, all commands should be executed from a 120 | machine that contains the cluster definition folder, so that it contains an administrative kubeconfig file: 121 | *** This command will confirm the target version to what the cluster will be applied if 122 | the process is continued. 123 | ** It's necessary to upgrade all control plane nodes first, running: 124 | ** `skuba node upgrade apply --user sles --sudo --target ` 125 | *** This command has to be applied on all control plane nodes, one by one. 126 | ** It's necessary to upgrade all worker nodes last, running: 127 | ** `skuba node upgrade apply --user sles --sudo --target ` 128 | *** This command has to be applied on all worker nodes, one by one. 129 | -------------------------------------------------------------------------------- /adoc/attributes.adoc: -------------------------------------------------------------------------------- 1 | // Release type, set this to either 'public' or 'internal' 2 | :release_type: public 3 | :current_year: 2020 4 | 5 | // Product Versions 6 | 7 | //Counting upwards from 4, tied to SLE15 releases 8 | :productmajor: 4 9 | //Counting upwards from 0, tied to kubernetes releases 10 | :productminor: 5 11 | //Counting upwards from 0, tied to maintenance release 12 | :productpatch: 2 13 | :prerelease: 14 | :productversion: {productmajor}.{productminor}.{productpatch}{prerelease} 15 | :github_url: https://github.com/SUSE/doc-caasp 16 | 17 | // Component Versions 18 | :base_os_version: 15 SP2 19 | :vmware_version: 6.7 20 | :crio_version: 1.18.4 21 | :kube_version: 1.18.10 22 | :kubedoc: https://v1-18.docs.kubernetes.io/docs/ 23 | :cap_version: 1.5.2 24 | :cilium_release: 1.7 25 | :cilium_patch_version: 6 26 | :cilium_version: {cilium_release}.{cilium_patch_version} 27 | :cilium_docs_version: v{cilium_release} 28 | :envoy_version: 1.14.4 29 | :etcd_version: 3.4.13 30 | :skuba_version: 2.1.11 31 | :dex_version: 2.23.0 32 | :gangway_version: 3.1.0 33 | :metrics-server_version: 0.3.6 34 | :kured_version: 1.4.3 35 | :terraform_version: 0.12 36 | :haproxy_version: 1.8.7 37 | :helm3: 3.3.3 38 | 39 | // API versions 40 | 41 | :kubeadm_api_version: v1beta1 42 | 43 | // Media Locations 44 | 45 | ifeval::['{release_type}' != 'public'] 46 | :docurl: https://susedoc.github.io/doc-caasp/master/ 47 | endif::[] 48 | 49 | ifeval::['{release_type}' == 'public'] 50 | :docurl: https://documentation.suse.com/suse-caasp/{productmajor}.{productminor}/ 51 | endif::[] 52 | 53 | :caasp_repo_url: http://download.suse.de/ibs/SUSE:/SLE-15-SP2:/Update:/Products:/CASP40/standard/ 54 | :isofile: SLE-15-SP2-Full-x86_64-GM-Media1.iso 55 | :jeos_product_page_url: https://www.suse.com/download/sles/ 56 | :bugzilla_url: https://bugzilla.suse.com/buglist.cgi?bug_status=__open__&list_id=12463857&order=Importance&product=SUSE%20CaaS%20Platform%204&query_format=specific 57 | -------------------------------------------------------------------------------- /adoc/book_admin-docinfo.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | AdminGuide 4 | 5 | 6 | -------------------------------------------------------------------------------- /adoc/book_admin.adoc: -------------------------------------------------------------------------------- 1 | include::attributes.adoc[] 2 | include::entities.adoc[] 3 | 4 | = Administration Guide: This guide describes general and specialized administrative tasks for {productname} {productversion}. 5 | Markus Napp; Nora Kořánová 6 | :sectnums: 7 | :doctype: book 8 | :toc: left 9 | :toclevels: 3 10 | :toc-title: Contents 11 | :icons: font 12 | :revdate: {docdate} 13 | :imagesdir: images/ 14 | :experimental: 15 | :docinfo: shared,private-head 16 | :release_type: public 17 | 18 | 19 | include::common_disclaimer.adoc[Disclaimer] 20 | 21 | include::common_copyright_quick.adoc[leveloffset=+1] 22 | 23 | 24 | == About This Guide 25 | 26 | include::common_intro_target_audience.adoc[Required Background,leveloffset=+2] 27 | 28 | include::common_intro_available_doc.adoc[Available Documentation,leveloffset=+2] 29 | 30 | include::common_intro_feedback.adoc[Feedback,leveloffset=+2] 31 | 32 | include::common_intro_typography.adoc[leveloffset=+2] 33 | 34 | 35 | == Cluster Management 36 | 37 | include::admin-cluster-management.adoc[Cluster management,leveloffset=+1] 38 | 39 | include::admin-shutdown-startup.adoc[Cluster Shutdown and Startup,leveloffset=+1] 40 | 41 | 42 | == Software Management 43 | 44 | include::admin-software-installation.adoc[Software Installation,leveloffset=+2] 45 | 46 | 47 | == Cluster Updates 48 | 49 | include::admin-updates.adoc[Cluster Updates,leveloffset=+1] 50 | 51 | == Upgrading {productname} 52 | 53 | include::admin-migration.adoc[Migration from {productname} 4.2, leveloffset=+1] 54 | 55 | 56 | == Security 57 | 58 | include::admin-security-firewall.adoc[Network Access Considerations,leveloffset=+2] 59 | 60 | include::admin-security-access.adoc[Access Concepts,leveloffset=+2] 61 | 62 | include::admin-security-deploy-ldap-server.adoc[Deploying LDAP Server, leveloffset=+2] 63 | 64 | include::admin-security-ldap-user-group-management.adoc[LDAP User Group Management, leveloffset=+2] 65 | 66 | include::admin-security-configure-authentication-connector.adoc[Configuring Authentication Connector, leveloffset=+2] 67 | 68 | include::admin-security-rbac-administration.adoc[RBAC Administration,leveloffset=+2] 69 | 70 | include::admin-security-rbac-user-access.adoc[RBAC User Access,leveloffset=+2] 71 | 72 | include::admin-security-admission.adoc[Admission Controller,leveloffset=+2] 73 | 74 | include::admin-security-psp.adoc[Pod Security Policies,leveloffset=+2] 75 | 76 | include::admin-security-nginx-ingress.adoc[Nginx Ingress Controller,leveloffset=+2] 77 | 78 | include::admin-security-certificates.adoc[Certificates,leveloffset=+2] 79 | 80 | 81 | == Logging 82 | 83 | include::admin-logging.adoc[Logging,leveloffset=+1] 84 | 85 | include::admin-logging-skuba.adoc[leveloffset=+2] 86 | 87 | include::admin-logging-audit.adoc[leveloffset=+2] 88 | 89 | include::admin-logging-centralized.adoc[leveloffset=+2] 90 | 91 | 92 | == Monitoring 93 | 94 | include::admin-monitoring-stack.adoc[Monitoring Stack, leveloffset=+2] 95 | 96 | include::admin-monitoring-health-checks.adoc[Health Checks, leveloffset=+2] 97 | 98 | include::admin-monitoring-horizontal-pod-autoscaler.adoc[Horizontal Pod Autoscaler, leveloffset=+2] 99 | 100 | include::admin-stratos-web-console.adoc[Stratos Web Console, leveloffset=+2] 101 | 102 | 103 | == Storage 104 | 105 | include::admin-storage-vsphere.adoc[leveloffset=+2] 106 | 107 | 108 | == Integration 109 | 110 | [NOTE] 111 | ==== 112 | Integration with external systems might require you to install additional packages to the base OS. 113 | Please refer to <>. 114 | ==== 115 | 116 | // {ses} Integration 117 | 118 | include::admin-ses-integration.adoc[SES Integration, leveloffset=+2] 119 | 120 | // {cap} Integration 121 | 122 | include::admin-cap-integration.adoc[leveloffset=+2] 123 | 124 | 125 | == GPU-Dependent Workloads 126 | 127 | include::admin-gpus.adoc[GPUs, leveloffset=+2] 128 | 129 | // Disaster Recovery 130 | 131 | include::admin-cluster-disaster-recovery.adoc[Cluster Disaster Recovery, leveloffset=+1] 132 | 133 | include::admin-velero-disaster-recovery.adoc[Backup and Restore with Velero, leveloffset=+1] 134 | 135 | 136 | == Miscellaneous 137 | 138 | include::admin-crio-proxy.adoc[CRI-O Proxy,leveloffset=+1] 139 | 140 | include::admin-crio-registries.adoc[CRI-O Registry Configuration,leveloffset=+1] 141 | 142 | //include::admin-security-rbac.adoc[Role Based Access Control (RBAC),leveloffset=+1] 143 | include::admin-flexvolume.adoc[Flexvolume Configuration, leveloffset=+2] 144 | 145 | include::admin-configure-kubelet.adoc[Configure kubelet, leveloffset=+2] 146 | 147 | include::admin-kubernetes-changes.adoc[Kubernetes changes from 1.17 to 1.18,leveloffset=+1] 148 | 149 | 150 | == Troubleshooting 151 | 152 | include::admin-troubleshooting.adoc[Troubleshooting,leveloffset=+1] 153 | 154 | // Glossary 155 | //include::common_glossary.adoc[Glossary] 156 | 157 | // Change Log 158 | // include::common_changelog.adoc[Documentation Change Log] 159 | 160 | // Glossary for abbreviations and terms 161 | include::common_glossary.adoc[Glossary] 162 | 163 | //GNU Licenses 164 | include::common_legal.adoc[Legal] 165 | -------------------------------------------------------------------------------- /adoc/book_airgap.adoc: -------------------------------------------------------------------------------- 1 | include::attributes.adoc[] 2 | include::entities.adoc[] 3 | 4 | = Air gap Deployment Guide: This guide describes deployment for {productname} {productversion} in an air gap environment. 5 | Markus Napp; Nora Kořánová 6 | :sectnums: 7 | :doctype: book 8 | :toc: left 9 | :toclevels: 3 10 | :toc-title: Contents 11 | :icons: font 12 | :revdate: {docdate} 13 | :imagesdir: images/ 14 | :experimental: 15 | :docinfo: shared,private-head 16 | 17 | include::common_disclaimer.adoc[Disclaimer] 18 | 19 | include::common_copyright_quick.adoc[leveloffset=+1] 20 | 21 | 22 | [preface] 23 | == About This Guide 24 | 25 | include::common_intro_target_audience.adoc[Required Background,leveloffset=+2] 26 | 27 | include::common_intro_available_doc.adoc[Available Documentation,leveloffset=+2] 28 | 29 | include::common_intro_feedback.adoc[Feedback,leveloffset=+2] 30 | 31 | include::common_intro_typography.adoc[leveloffset=+2] 32 | 33 | // Air gapped deployment contents 34 | 35 | include::deployment-airgap.adoc[Air gapped Deployment,leveloffset=+1] 36 | 37 | // Change Log 38 | // include::common_changelog.adoc[Documentation Change Log] 39 | 40 | // Glossary for abbreviations and terms 41 | include::common_glossary.adoc[Glossary] 42 | 43 | //GNU Licenses 44 | include::common_legal.adoc[Legal,leveloffset=0] 45 | -------------------------------------------------------------------------------- /adoc/book_architecture-docinfo.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | ArchitectureGuide 4 | 5 | 6 | -------------------------------------------------------------------------------- /adoc/book_architecture.adoc: -------------------------------------------------------------------------------- 1 | include::attributes.adoc[] 2 | include::entities.adoc[] 3 | 4 | = Architecture Description: This guide describes the architecture of {productname} {productversion}. 5 | :doctype: book 6 | :sectnums: 7 | :sectnumlevels: 4 8 | :toc: left 9 | :toclevels: 4 10 | :icons: font 11 | :revdate: 2019-05-06 12 | :imagesdir: images/ 13 | :experimental: 14 | :docinfo: shared,private-head 15 | 16 | include::common_disclaimer.adoc[Disclaimer] 17 | 18 | include::common_copyright_quick.adoc[leveloffset=+1] 19 | 20 | include::architecture-description.adoc[Architecture Description] 21 | 22 | // Change Log 23 | // include::common_changelog.adoc[Documentation Change Log] 24 | 25 | // Glossary for abbreviations and terms 26 | include::common_glossary.adoc[Glossary] 27 | 28 | //GNU Licenses 29 | include::common_legal.adoc[Legal] 30 | -------------------------------------------------------------------------------- /adoc/book_deployment-docinfo.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | DeploymentGuide 4 | 5 | 6 | -------------------------------------------------------------------------------- /adoc/book_deployment.adoc: -------------------------------------------------------------------------------- 1 | include::attributes.adoc[] 2 | include::entities.adoc[] 3 | 4 | = Deployment Guide: This guide describes deployment for {productname} {productversion}. 5 | Markus Napp; Nora Kořánová 6 | :sectnums: 7 | :doctype: book 8 | :toc: left 9 | :toclevels: 3 10 | :toc-title: Contents 11 | :icons: font 12 | :revdate: {docdate} 13 | :imagesdir: images/ 14 | :experimental: 15 | :docinfo: shared,private-head 16 | 17 | include::common_disclaimer.adoc[Disclaimer] 18 | 19 | include::common_copyright_quick.adoc[leveloffset=+1] 20 | 21 | 22 | [preface] 23 | == About This Guide 24 | 25 | include::common_intro_target_audience.adoc[Required Background,leveloffset=+2] 26 | 27 | include::common_intro_available_doc.adoc[Available Documentation,leveloffset=+2] 28 | 29 | include::common_intro_feedback.adoc[Feedback,leveloffset=+2] 30 | 31 | include::common_intro_typography.adoc[leveloffset=+2] 32 | 33 | //System requirements 34 | 35 | include::deployment-sysreqs.adoc[System Requirements] 36 | 37 | == Deployment Scenarios 38 | 39 | include::deployment-default.adoc[Default Deployment,leveloffset=+2] 40 | 41 | === Air gap Environment 42 | 43 | For detailed instructions on how to prepare deployment in an air gapped environment, 44 | refer to: link:{docurl}html/caasp-airgap/index.html[]. 45 | 46 | == Deployment Instructions 47 | 48 | [IMPORTANT] 49 | ==== 50 | If you are installing over one of the previous milestones, you must remove the 51 | RPM repository. {productname} is now distributed as an extension for 52 | {sle} and no longer requires the separate repository. 53 | 54 | If you do not remove the repository before installation, there might be conflicts 55 | with the package dependencies that could render your installation nonfunctional. 56 | ==== 57 | 58 | [NOTE] 59 | ==== 60 | Due to a naming convention conflict, all versions of {productname} 4.x up to 4.5 will be released in the `4.0` module. 61 | Starting with 4.5 the product will be delivered in the `4.5` module. 62 | ==== 63 | 64 | include::deployment-preparation.adoc[Deployment Preparations, leveloffset=+1] 65 | 66 | include::deployment-openstack.adoc[SUSE OpenStack Cloud Instructions, leveloffset=+1] 67 | 68 | include::deployment-vmware.adoc[VMware Deployment Instructions, leveloffset=+1] 69 | 70 | include::deployment-bare-metal-or-kvm.adoc[Bare Metal or KVM Install, leveloffset=+1] 71 | 72 | include::deployment-sles.adoc[Existing SLES Installation, leveloffset=+1] 73 | 74 | include::deployment-aws.adoc[Amazon AWS Cloud Instructions, leveloffset=+1] 75 | 76 | include::deployment-bootstrap.adoc[Bootstrapping,leveloffset=0] 77 | 78 | include::deployment-cilium.adoc[Cilium,leveloffset=0] 79 | 80 | // Change Log 81 | // include::common_changelog.adoc[Documentation Change Log] 82 | 83 | // Glossary for abbreviations and terms 84 | include::common_glossary.adoc[Glossary] 85 | 86 | //GNU Licenses 87 | include::common_legal.adoc[Legal,leveloffset=0] 88 | -------------------------------------------------------------------------------- /adoc/book_quickstart-docinfo.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | QuickstartGuide 4 | 5 | 6 | -------------------------------------------------------------------------------- /adoc/book_quickstart.adoc: -------------------------------------------------------------------------------- 1 | include::attributes.adoc[] 2 | include::entities.adoc[] 3 | 4 | = QuickStart Guide: This guide describes quick deployment for {productname} {productversion}. 5 | Markus Napp; Nora Kořánová 6 | :sectnums: 7 | :doctype: book 8 | :toc: left 9 | :toclevels: 3 10 | :toc-title: Contents 11 | :icons: font 12 | :revdate: {docdate} 13 | :imagesdir: images/ 14 | :experimental: 15 | :docinfo: shared,private-head 16 | 17 | include::common_disclaimer.adoc[Disclaimer] 18 | 19 | include::common_copyright_quick.adoc[leveloffset=+1] 20 | 21 | 22 | // System requirements 23 | include::deployment-sysreqs.adoc[System Requirements] 24 | 25 | include::deployment-preparation.adoc[Preparation] 26 | 27 | include::deployment-bare-metal-or-kvm.adoc[Deployment Instructions] 28 | 29 | include::deployment-bootstrap.adoc[Bootstrapping,leveloffset=+1] 30 | 31 | // Glossary for abbreviations and terms 32 | include::common_glossary.adoc[Glossary] 33 | 34 | //GNU Licenses 35 | include::common_legal.adoc[Legal] 36 | -------------------------------------------------------------------------------- /adoc/common_authors.adoc: -------------------------------------------------------------------------------- 1 | 2 | **Authors:** Markus Napp, Nora Kořánová 3 | -------------------------------------------------------------------------------- /adoc/common_copyright.adoc: -------------------------------------------------------------------------------- 1 | 2 | = Legal Notice 3 | :imagesdir: ./images 4 | 5 | 6 | Copyright (C) 7 | 2006 -- {current_year} 8 | SUSE LLC and contributors. 9 | All rights reserved. 10 | 11 | Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or (at your option) version 1.3; with the Invariant Section being this copyright notice and license. 12 | A copy of the license version 1.2 is included in the section entitled "`GNU Free Documentation License`" 13 | . 14 | 15 | For {suse} 16 | trademarks, see http://www.suse.com/company/legal/. 17 | All other third-party trademarks are the property of their respective owners. 18 | Trademark symbols ({reg} 19 | , {trade} 20 | , etc.) denote trademarks of {suse} 21 | and its affiliates. 22 | Asterisks (*) denote third-party trademarks. 23 | 24 | All information found in this book has been compiled with utmost attention to detail. 25 | However, this does not guarantee complete accuracy. 26 | Neither SUSE LLC, its affiliates, the authors, nor the translators shall be held liable for possible errors or the consequences thereof. 27 | -------------------------------------------------------------------------------- /adoc/common_copyright_gfdl.adoc: -------------------------------------------------------------------------------- 1 | 2 | = Legal Notice 3 | :imagesdir: ./images 4 | 5 | 6 | Copyright (C) 7 | 2006 -- {current_year} 8 | SUSE LLC and contributors. 9 | All rights reserved. 10 | 11 | Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or (at your option) version 1.3; with the Invariant Section being this copyright notice and license. 12 | A copy of the license version 1.2 is included in the section entitled "`GNU Free Documentation License`" 13 | . 14 | 15 | For {suse} 16 | trademarks, see http://www.suse.com/company/legal/. 17 | All other third-party trademarks are the property of their respective owners. 18 | Trademark symbols ({reg} 19 | , {trade} 20 | , etc.) denote trademarks of {suse} 21 | and its affiliates. 22 | Asterisks (*) denote third-party trademarks. 23 | 24 | All information found in this book has been compiled with utmost attention to detail. 25 | However, this does not guarantee complete accuracy. 26 | Neither SUSE LLC, its affiliates, the authors nor the translators shall be held liable for possible errors or the consequences thereof. 27 | -------------------------------------------------------------------------------- /adoc/common_copyright_quick.adoc: -------------------------------------------------------------------------------- 1 | 2 | //= Legal Notice 3 | :imagesdir: ./images 4 | 5 | 6 | Copyright (C) 7 | 2006 -- {current_year} 8 | SUSE LLC and contributors. 9 | All rights reserved. 10 | 11 | Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or (at your option) version 1.3; with the Invariant Section being this copyright notice and license. 12 | A copy of the license version 1.2 is included in the section entitled "`GNU Free Documentation License`". 13 | 14 | For {suse} 15 | trademarks, see http://www.suse.com/company/legal/. 16 | All other third-party trademarks are the property of their respective owners. 17 | Trademark symbols ((R), (TM), etc.) denote trademarks of {suse} and its affiliates. 18 | Asterisks (*) denote third-party trademarks. 19 | 20 | All information found in this book has been compiled with utmost attention to detail. 21 | However, this does not guarantee complete accuracy. 22 | Neither SUSE LLC, its affiliates, the authors, nor the translators shall be held liable for possible errors or the consequences thereof. 23 | -------------------------------------------------------------------------------- /adoc/common_disclaimer.adoc: -------------------------------------------------------------------------------- 1 | ifeval::['{release_type}' != 'public'] 2 | [WARNING] 3 | ==== 4 | !!! This is an internal release and MUST NOT be distributed outside SUSE !!! 5 | ==== 6 | endif::[] 7 | 8 | [WARNING] 9 | ==== 10 | This document is a work in progress. 11 | 12 | The content in this document is subject to change without notice. 13 | ==== 14 | 15 | [NOTE] 16 | ==== 17 | This guide assumes a configured {sls} {base_os_version} environment. 18 | ==== 19 | -------------------------------------------------------------------------------- /adoc/common_glossary.adoc: -------------------------------------------------------------------------------- 1 | == Glossary 2 | 3 | [horizontal] 4 | AWS:: 5 | Amazon Web Services. A broadly adopted cloud platform run by Amazon. 6 | BPF:: 7 | Berkeley Packet Filter. Technology used by Cilium to filter network traffic at the level of packet processing in the kernel. 8 | CA:: 9 | Certificate or Certification Authority. An entity that issues digital certificates. 10 | CIDR:: 11 | Classless Inter-Domain Routing. Method for allocating IP addresses and IP routing. 12 | CNI:: 13 | Container Networking Interface. Creates a generic plugin-based networking solution for containers based on spec files in JSON format. 14 | CRD:: 15 | Custom Resource Definition. Functionality to define non-default resources for {kube} pods. 16 | FQDN:: 17 | Fully Qualified Domain Name. The complete domain name for a specific computer, or host, on the internet, consisting of two parts: the hostname and the domain name. 18 | GKE:: 19 | Google Kubernetes Engine. Manager for container orchestration built on {kube} by Google. Similar for example to Amazon Elastic Kubernetes Service (Amazon EKS) and Azure Kubernetes Service (AKS). 20 | HPA:: 21 | Horizontal Pod Autoscaler. Based on CPU usage, HPA controls the number of pods in a deployment/replica or stateful set or a replication controller. 22 | KVM:: 23 | Kernel-based Virtual Machine. Linux native virtualization tool that allows the kernel to function as a hypervisor. 24 | LDAP:: 25 | Lightweight Directory Access Protocol. A client/server protocol used to access and manage directory information. It reads and edits directories over IP networks and runs directly over TCP/IP using simple string formats for data transfer. 26 | OCI:: 27 | Open Containers Initiative. A project under the Linux Foundation with the goal of creating open industry standards around container formats and runtime. 28 | OIDC:: 29 | OpenID Connect. Identity layer on top of the OAuth 2.0 protocol. 30 | OLM:: 31 | Operator Lifecycle Manager. Open Source tool for managing operators in a {kube} cluster. 32 | POC:: 33 | Proof of Concept. Pioneering project directed at proving the feasibility of a design concept. 34 | PSP:: 35 | Pod Security Policy. PSPs are cluster-level resources that control security-sensitive aspects of pod specification. 36 | PVC:: 37 | Persistent Volume Claim. A request for storage by a user. 38 | RBAC:: 39 | Role-based Access Control. An approach to restrict authorized user access based on defined roles. 40 | RMT:: 41 | Repository Mirroring Tool. Successor of the SMT. Helps optimize the management of {sle} software updates and subscription entitlements. 42 | RPO:: 43 | Recovery Point Objective. Defines the interval of time that can occur between to backup points before normal business can no longer be resumed. 44 | RTO:: 45 | Recovery Time Objective. This defines the time (and typically service level from SLA) with which backup relevant incidents must be handled within. 46 | RSA:: 47 | Rivest-Shamir-Adleman. Asymmetric encryption technique that uses two different keys as public and private keys to perform the encryption and decryption. 48 | SLA:: 49 | Service Level Agreement. A contractual clause or set of clauses that determines the guaranteed handling of support or incidents by a software vendor or supplier. 50 | SMT:: 51 | SUSE Subscription Management Tool. Helps to manage software updates, maintain corporate firewall policy and meet regulatory compliance requirements in {sle} 11 and 12. Has been replaced by the RMT and {susemgr} in newer {sle} versions. 52 | STS:: 53 | StatefulSet. Manages the deployment and scaling of a set of Pods, and provides guarantees about the ordering and uniqueness of these Pods for a "stateful" application. 54 | SMTP:: 55 | Simple Mail Transfer Protocol. A communication protocol for electronic mail transmission. 56 | TOML:: 57 | Tom's Obvious, Minimal Language. Configuration file format used for configuring container registries for {crio}. 58 | VPA:: 59 | Vertical Pod Autoscaler. VPA automatically sets the values for resource requests and container limits based on usage. 60 | VPC:: 61 | Virtual Private Cloud. Division of a public cloud, which supports private cloud computing and thus offers more control over virtual networks and an isolated environment for sensitive workloads. 62 | 63 | // Define these 64 | -------------------------------------------------------------------------------- /adoc/common_intro_available_doc.adoc: -------------------------------------------------------------------------------- 1 | = Available Documentation 2 | :imagesdir: ./images 3 | 4 | (((help,SUSE manuals))) 5 | 6 | 7 | We provide HTML and PDF versions of our books in different languages. 8 | Documentation for our products is available at https://documentation.suse.com/, where you can also find the latest updates and browse or download the documentation in various formats. 9 | 10 | The following documentation is available for this product: 11 | // 12 | // Architecture Guide:: 13 | // The {productname} Architecture Guide gives you a rough overview of the software architecture. 14 | // It is as of yet incomplete and will change infrequently. 15 | 16 | Deployment Guide:: 17 | The {productname} Deployment Guide gives you details about installation and configuration of {productname} 18 | along with a description of architecture and minimum system requirements. 19 | 20 | Quick Start Guide:: 21 | The {productname} 22 | Quick Start guides you through the installation of a minimum cluster in the fastest way possible. 23 | 24 | Admin Guide:: 25 | The {productname} 26 | Admin Guide discusses authorization, updating clusters and individual nodes, monitoring, logging, use of Helm, troubleshooting and integration with {ses} and {cap}. 27 | 28 | ifdef::backend-docbook[] 29 | [index] 30 | == Index 31 | // Generated automatically by the DocBook toolchain. 32 | endif::backend-docbook[] 33 | -------------------------------------------------------------------------------- /adoc/common_intro_feedback.adoc: -------------------------------------------------------------------------------- 1 | = Feedback 2 | :imagesdir: ./images 3 | 4 | 5 | Several feedback channels are available: 6 | 7 | Bugs and Enhancement Requests:: 8 | For services and support options available for your product, refer to http://www.suse.com/support/. 9 | + 10 | To report bugs for a product component, go to https://scc.suse.com/support/requests, log in, and click menu:Create New[]. 11 | 12 | User Comments:: 13 | We want to hear your comments about and suggestions for this manual and the other documentation included with this product. 14 | Use the User Comments feature at the bottom of each page in the online documentation or go to https://documentation.suse.com/, click Feedback at the bottom of the page and enter your comments in the Feedback Form. 15 | 16 | Mail:: 17 | For feedback on the documentation of this product, you can also send a mail to ``doc-team@suse.com``. 18 | Make sure to include the document title, the product version and the publication date of the documentation. 19 | To report errors or suggest enhancements, provide a concise description of the problem and refer to the respective section number and page (or URL). 20 | -------------------------------------------------------------------------------- /adoc/common_intro_target_audience.adoc: -------------------------------------------------------------------------------- 1 | = Required Background 2 | :imagesdir: ./images 3 | 4 | To keep the scope of these guidelines manageable, certain technical assumptions have been made. 5 | These documents are not aimed at beginners in {kube} usage and require that: 6 | 7 | * You have some computer experience and are familiar with common technical terms. 8 | * You are familiar with the documentation for your system and the network on which it runs. 9 | * You have a basic understanding of Linux systems. 10 | * You have an understanding of how to follow instructions aimed at experienced Linux administrators 11 | and can fill in gaps with your own research. 12 | * You understand how to plan, deploy and manage {kube} applications. 13 | -------------------------------------------------------------------------------- /adoc/common_intro_typography.adoc: -------------------------------------------------------------------------------- 1 | include::attributes.adoc[] 2 | include::entities.adoc[] 3 | 4 | = Documentation Conventions 5 | :imagesdir: ./images 6 | :experimental: 7 | 8 | The following notices and typographical conventions are used in this documentation: 9 | 10 | * [path]``/etc/passwd`` : directory names and file names 11 | * ``: replace `` with the actual value 12 | * [var]``PATH``: the environment variable PATH 13 | * `ls`, [option]``--help``: commands, options, and parameters 14 | * [username]``user`` : users or groups 15 | * [package]#package name# : name of a package 16 | * kbd:[Alt], kbd:[Alt+F1] : a key to press or a key combination; keys are shown in uppercase as on a keyboard 17 | * menu:File[Save As] : menu items, buttons 18 | * _Dancing Penguins_ (Chapter __Penguins__, ↑Another Manual): This is a reference to a chapter in another manual. 19 | * Commands that must be run with {rootuser} privileges. Often you can also prefix these commands with the `sudo` command to run them as non-privileged user. 20 | + 21 | [source,bash,subs="attributes"] 22 | ---- 23 | sudo command 24 | ---- 25 | * Commands that can be run by non-privileged users. 26 | + 27 | [source,bash,subs="attributes"] 28 | ---- 29 | command 30 | ---- 31 | * Notices: 32 | + 33 | [WARNING] 34 | ==== 35 | Vital information you must be aware of before proceeding. 36 | Warns you about security issues, potential loss of data, damage to hardware, or physical hazards. 37 | ==== 38 | + 39 | [IMPORTANT] 40 | ==== 41 | Important information you should be aware of before proceeding. 42 | ==== 43 | + 44 | [NOTE] 45 | ==== 46 | Additional information, for example about differences in software versions. 47 | ==== 48 | + 49 | [TIP] 50 | ==== 51 | Helpful information, like a guideline or a piece of practical advice. 52 | ==== 53 | + 54 | -------------------------------------------------------------------------------- /adoc/common_tech_preview.adoc: -------------------------------------------------------------------------------- 1 | [IMPORTANT] 2 | ==== 3 | This feature is offered as a "tech preview". 4 | 5 | We release this as a tech-preview in order to get early feedback from our customers. 6 | Tech previews are largely untested, unsupported, and thus not ready for production use. 7 | 8 | That said, we strongly believe this technology is useful at this stage in order to make the right improvements based on your feedback. 9 | A fully supported, production-ready release is planned for a later point in time. 10 | ==== 11 | -------------------------------------------------------------------------------- /adoc/deployment-bare-metal-or-kvm.adoc: -------------------------------------------------------------------------------- 1 | include::entities.adoc[] 2 | 3 | [#deployment-bare-metal] 4 | == Deployment on Bare Metal or KVM 5 | 6 | .Preparation Required 7 | [NOTE] 8 | You must have completed <> to proceed. 9 | 10 | [NOTE] 11 | ==== 12 | If deploying on KVM virtual machines, you may use a tool such as `virt-manager` 13 | to configure the virtual machines and begin the {sls} {base_os_version} installation. 14 | ==== 15 | 16 | === Environment Description 17 | 18 | [NOTE] 19 | ==== 20 | You must have a load balancer configured as described in <>. 21 | ==== 22 | 23 | [NOTE] 24 | ==== 25 | The {ay} file found in `skuba` is a template. It has the base requirements. 26 | This {ay} file should act as a guide and should be updated with your company's standards. 27 | ==== 28 | 29 | [NOTE] 30 | ==== 31 | To account for hardware/platform-specific setup criteria (legacy BIOS vs. (U)EFI, drive partitioning, networking, etc.), 32 | you must adjust the {ay} file to your needs according to the requirements. 33 | 34 | Refer to the official {ay} documentation for more information: link:https://documentation.suse.com/sles/15-SP2/single-html/SLES-autoyast/#book-autoyast[{ay} Guide]. 35 | ==== 36 | 37 | ==== Machine Configuration Prerequisites 38 | 39 | Deployment with {ay} will require a minimum *disk size of 40 GB*. 40 | That space is reserved for container images without any workloads (10 GB), 41 | for the root partition (30 GB) and the EFI system partition (200 MB). 42 | 43 | === {ay} Preparation 44 | 45 | . On the management machine, get an example {ay} file from `/usr/share/caasp/autoyast/bare-metal/autoyast.xml`, 46 | (which was installed earlier on as part of the management pattern (`sudo zypper in -t pattern SUSE-CaaSP-Management`). 47 | . Copy the file to a suitable location to modify it. Name the file `autoyast.xml`. 48 | . Modify the following places in the {ay} file (and any additional places as required by your specific configuration/environment): 49 | .. `` 50 | + 51 | Change the pre-filled value to your organization's NTP server. Provide multiple servers if possible by adding new `` subentries. 52 | .. `` 53 | + 54 | Adjust the timezone your nodes will be set to. Refer to: link:https://documentation.suse.com/sles/15-SP2/single-html/SLES-autoyast/#id-1.7.5.13.6[{sls} {ay} Guide: Country Settings] 55 | .. `sles` 56 | + 57 | Insert your authorized key in the placeholder field. 58 | .. `` 59 | + 60 | You can add additional users by creating new blocks in the configuration containing their data. 61 | + 62 | [NOTE] 63 | ==== 64 | If the users are configured to not have a password like in the example, ensure the system's `sudoers` file is updated. 65 | Without updating the sudoers file the user will only be able to perform basic operations that will prohibit many administrative tasks. 66 | 67 | The default {ay} file provides examples for a disabled `root` user and a `sles` user with authorized key SSH access. 68 | 69 | The password for root can be enabled by using the `passwd` command. 70 | ==== 71 | .. `` 72 | + 73 | Insert the email address and {productname} registration code in the placeholder fields. This activates {sls} {base_os_version}. 74 | .. `` 75 | + 76 | Insert the {productname} registration code in the placeholder field. This enables the {productname} extension module. 77 | Update the {ay} file with your registration keys and your company's best practices and hardware configurations. 78 | + 79 | [NOTE] 80 | ==== 81 | Your {productname} registration key can be used to both activate {sls} {base_os_version} and enable the extension. 82 | ==== 83 | 84 | + 85 | Refer to the official {ay} documentation for more information: link:https://documentation.suse.com/sles/15-SP2/single-html/SLES-autoyast/#book-autoyast[{ay} Guide]. 86 | . Host the {ay} files on a Web server reachable inside the network you are installing the cluster in. 87 | 88 | ==== Deploying with local {RMT} server 89 | 90 | In order to use a local {RMT} server for deployment of packages, you need to specify 91 | the server configuration in your {ay} file. To do so add the following section: 92 | 93 | [source,xml,subs="-macros"] 94 | ---- 95 | 96 | true 97 | true 98 | 99 | https://rmt.example.org // <1> 100 | https://rmt.example.org/rmt.crt // <2> 101 | SHA1 102 | 0C:A4:A1:06:AD:E2:A2:AA:D0:08:28:95:05:91:4C:07:AD:13:78:FE // <3> 103 | false 104 | 105 | 106 | sle-module-containers 107 | 15.2 108 | x86_64 109 | 110 | 111 | sle-module-public-cloud 112 | 15.2 113 | x86_64 114 | 115 | 116 | caasp 117 | 4.5 118 | x86_64 119 | 120 | 121 | 122 | ---- 123 | <1> Provide FQDN of the {RMT} server 124 | <2> Provide the location on the server where the certificate can be found 125 | <3> Provide the certificate fingerprint for the {rmt} server 126 | 127 | === Provisioning the Cluster Nodes 128 | 129 | Once the {ay} file is available in the network that the machines will be configured in, you can start deploying machines. 130 | 131 | The default production scenario consists of 6 nodes: 132 | 133 | * 1 load balancer 134 | * 3 masters 135 | * 2 workers 136 | 137 | Depending on the type of load balancer you wish to use, you need to deploy at least 5 machines to serve as cluster nodes and provide a load balancer from the environment. 138 | 139 | The load balancer must point at the machines that are assigned to be used as `master` nodes in the future cluster. 140 | 141 | [TIP] 142 | If you do not wish to use infrastructure load balancers, please deploy additional machines and refer to <>. 143 | 144 | Install {sls} {base_os_version} from your preferred medium and follow the steps for link:https://documentation.suse.com/sles/15-SP2/single-html/SLES-autoyast/#invoking-autoinst[Invoking the Auto-Installation Process] 145 | 146 | Provide `autoyast=https://[webserver/path/to/autoyast.xml]` during the {sls} {base_os_version} installation. 147 | 148 | ==== {sls} Installation 149 | 150 | [NOTE] 151 | ==== 152 | Use AutoYaST and make sure to use a staged frozen patchlevel via RMT/SUSE Manager to ensure a 100% reproducible setup. 153 | link:https://documentation.suse.com/sles/15-SP2/single-html/SLES-rmt/#cha-rmt-client[RMT Guide] 154 | ==== 155 | 156 | Once the machines have been installed using the {ay} file, you are now ready proceed with <>. 157 | 158 | === Container Runtime Proxy 159 | 160 | [IMPORTANT] 161 | ==== 162 | {crio} proxy settings must be adjusted on all nodes before joining the cluster! 163 | 164 | Please refer to: link:{docurl}html/caasp-admin/_miscellaneous.html#_configuring_httphttps_proxy_for_cri_o[] 165 | 166 | In some environments you must configure the container runtime to access the container registries through a proxy. 167 | In this case, please refer to: {docurl}html/caasp-admin/_miscellaneous.html#_configuring_httphttps_proxy_for_cri_o[{productname} Admin Guide: Configuring HTTP/HTTPS Proxy for CRI-O] 168 | ==== 169 | -------------------------------------------------------------------------------- /adoc/deployment-cilium.adoc: -------------------------------------------------------------------------------- 1 | == Cilium Network Policy Config Examples 2 | 3 | The following example allows all pods in the namespace in which the policy is created to communicate with kube-dns on port 53/UDP in the kube-system namespace. 4 | 5 | [NOTE] 6 | ==== 7 | Versions of {productname} after 4.1 are slated to include L7 policy management which will enable policies to be enforced on items like memcached verbs, gRPC methods, and Cassandra tables. 8 | ==== 9 | 10 | The default behavior of {kube} is that all pods can communicate with all other pods within a cluster, whether those pods are hosted by the same {kube} node or different ones. 11 | This behavior is intentional, and aids greatly in the development process as the complexity of networking is effectively removed from both the developer and the operator. 12 | 13 | However, when a workload is deployed in a {kube} cluster in production, any number of reasons may arise leading to the need to isolate some workloads from others. 14 | For example, if a Human Resources department is running workloads processing PII (Personally Identifiable Information), those workloads should not by default be accessible by any other workload in the cluster. 15 | 16 | Network policies are the mechanism provided by {kube} which allow a cloud operator to isolate workloads from each other in a variety of ways. 17 | For example, a policy could be defined which only allows a database server workload to be accessed only by the web servers whose pages use the data in the database. 18 | Another policy could be defined in the cluster which allows only web browsers outside the cluster to access the web server workloads in the cluster and so on. 19 | 20 | To implement network policies, a network plugin must be correctly integrated into the cluster. {productname} incorporates Cilium as its supported network policy management plugin. 21 | Cilium leverages link:https://www.kernel.org/doc/html/latest/bpf/index.html[BPF (Berkeley Packet Filter)] where every bit of communication transits through a packet processing engine in the kernel. 22 | Other policy management plugins in the {kube} ecosystem leverage `iptables`. 23 | 24 | {suse} has supported `iptables` since its inception in the Linux world, but believes BPF brings sufficiently compelling advantages (fine-grained control, performance) over `iptables`. 25 | Not only does Cilium have performance benefits brought on by BPF, it also has benefits far higher in the network stack. 26 | 27 | The most typically used policies in {kube} cover L3 and L4 events in the network stack, allowing workloads to be protected by specifying IP addresses and TCP ports. 28 | To implement the earlier example of a dedicated webserver accessing a critical secured database, an L3 policy would be define allowing a web server workload running at IP address `192.168.0.1` to access a MySQL database workload running at IP address `192.168.0.2` on TCP port `3306`. 29 | 30 | [source,yaml] 31 | ---- 32 | apiVersion: "cilium.io/v2" 33 | kind: CiliumNetworkPolicy 34 | metadata: 35 | name: "allow-to-kubedns" 36 | spec: 37 | endpointSelector: 38 | {} 39 | egress: 40 | - toEndpoints: 41 | - matchLabels: 42 | k8s:io.kubernetes.pod.namespace: kube-system 43 | k8s-app: kube-dns 44 | toPorts: 45 | - ports: 46 | - port: '53' 47 | protocol: UDP 48 | ---- 49 | -------------------------------------------------------------------------------- /adoc/deployment-default.adoc: -------------------------------------------------------------------------------- 1 | = Default Deployment Scenario 2 | // :doctype: book 3 | :sectnums: 4 | :toc: left 5 | :icons: font 6 | :experimental: 7 | // :imagesdir: images 8 | 9 | The default scenario consists of a {productname} cluster, an external load balancer, and a management workstation. 10 | 11 | The minimum viable failure tolerant configuration for the cluster is 3 master nodes and 2 worker nodes. 12 | For more information, refer to <>. 13 | 14 | image::caasp_cluster_components.png[CaaSP Components,width=800,pdfwidth=80%] 15 | -------------------------------------------------------------------------------- /adoc/deployment-sles.adoc: -------------------------------------------------------------------------------- 1 | == Deployment on Existing SLES Installation 2 | 3 | If you already have a running {sls} {base_os_version} installation, you can add {productname} 4 | to this installation using SUSE Connect. You also need to enable the "Containers" and "Public Cloud" 5 | modules because it contains some dependencies required by {productname}. 6 | 7 | === Requirements 8 | 9 | .Preparation Required 10 | [NOTE] 11 | You must have completed <> to proceed. 12 | 13 | ==== Dedicated Cluster Nodes 14 | 15 | [IMPORTANT] 16 | ==== 17 | Adding a machine with an existing use case (e.g. web server) as a cluster node is not supported! 18 | ==== 19 | 20 | {productname} requires dedicated machines as cluster nodes. 21 | 22 | The instructions in this document are meant to add {productname} to an existing {sle} 23 | installation that has no other active use case. 24 | 25 | For example: You have installed a machine with {sle} but it has not yet been commissioned to run 26 | a specific application and you decide now to make it a {productname} cluster node. 27 | 28 | 29 | ==== Disabling Swap 30 | 31 | When using a pre-existing {sle} installation, `swap` will be enabled. You must disable `swap` 32 | for all cluster nodes before performing the cluster bootstrap. 33 | 34 | On all nodes that are meant to join the cluster; run: 35 | ---- 36 | sudo swapoff -a 37 | ---- 38 | 39 | Then modify `/etc/fstab` on each node to remove the `swap` entries. 40 | 41 | [IMPORTANT] 42 | ==== 43 | It is recommended to reboot the machine to finalize these changes and prevent accidental reactivation of 44 | `swap` during an automated reboot of the machine later on. 45 | ==== 46 | 47 | === Adding {productname} repositories 48 | 49 | Retrieve your {productname} registration code and run the following. 50 | Substitute `` for the code from <>. 51 | 52 | [source,bash] 53 | ---- 54 | SUSEConnect -p sle-module-containers/15.2/x86_64 55 | SUSEConnect -p sle-module-public-cloud/15.2/x86_64 56 | 57 | SUSEConnect -p caasp/4.5/x86_64 -r 58 | ---- 59 | 60 | Repeat all preparation steps for any cluster nodes you wish to join. 61 | You can then proceed with <>. 62 | -------------------------------------------------------------------------------- /adoc/docinfo.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | https://github.com/SUSE/doc-caasp/issues/new 4 | SUSE CaaS Platform 5 | 6 | https://github.com/SUSE/doc-caasp/edit/master/adoc/ 7 | yes 8 | 9 | 10 | SUSE CaaS Platform 11 | 4.5.2 12 | -------------------------------------------------------------------------------- /adoc/entities.adoc: -------------------------------------------------------------------------------- 1 | //// 2 | Linux 3 | //// 4 | 5 | :grub: GRUB 2 6 | :grub-efi: GRUB 2 for EFI 7 | :gnome: GNOME 8 | 9 | :xgeneric: X Window System 10 | :xvendor: X.Org 11 | :gnomecc: {gnome} control center 12 | :nm: NetworkManager 13 | :fspot: Shotwell 14 | :yelp: Help 15 | 16 | //// 17 | Applications 18 | //// 19 | 20 | :aa: AppArmor 21 | :aareg: AppArmor(R) 22 | :cpufreq: CPUfreq 23 | :crio: CRI-O 24 | :gpg: GPG or GnuGP 25 | :helm: Helm 26 | :jeos: JeOS Just enough OS 27 | :kdump: Kdump 28 | :kexec: Kexec 29 | :kiwi: KIWI 30 | :kprobes: Kprobes 31 | :kube: Kubernetes 32 | :mariadb: Maria DB 33 | :musicplayer: Banshee 34 | :musicplayerreg: Banshee(TM) 35 | :mysql: MariaDB 36 | :nautilus: {gnome} Files 37 | :nvidia: NVIDIA 38 | :oprof: OProfile 39 | :ostack: OpenStack 40 | :pk: PolKit 41 | :postgresql: PostgreSQL 42 | :powertop: powerTOP 43 | :salt: Salt 44 | :selnx: SELinux 45 | :stap: SystemTap 46 | :sudo: sudo 47 | :tf: Terraform 48 | 49 | //// 50 | Company and Products 51 | //// 52 | 53 | :suse: SUSE 54 | :novell: NovellSpecial characters: 55 | :suselinux: {suse} Linux 56 | 57 | :cap: {suse} Cloud Application Platform 58 | :scf: {suse} Cloud Foundry 59 | :pkghub: {suse} Package Hub 60 | 61 | :obs: Open Build Service 62 | :obsa: OBS 63 | 64 | :scc: {suse} Customer Center 65 | :sccreg: {suse}(R) Customer Center 66 | :sccurl: https://scc.suse.com/ 67 | 68 | :yast: YaST 69 | :yastcc: _{yast} Control Center_ 70 | :ycc_runlevel: Services Manager 71 | :ycc_services_manager: Services Manager 72 | :ay: AutoYaST 73 | 74 | :susefirewall: SuSEFirewall2 75 | :susefirewallfiles: SuSEfirewall2 76 | :suseconnect: SUSEConnect 77 | 78 | //// 79 | Products 80 | //// 81 | 82 | :opensuse: openSUSE 83 | :opensusereg: openSUSE(R) 84 | :sle: SUSE Linux Enterprise 85 | :slea: SLE 86 | :slereg: SUSE(R) Linux Enterprise 87 | :slert: {sle} Real Time 88 | :slerta: {sle} RT 89 | :slerte: {slert} Extension 90 | :slertreg: {slereg} Real Time 91 | :slertereg: {slereg} Real Time Extension 92 | :sls: SUSE Linux Enterprise Server 93 | :slsa: SLES 94 | :slsreg: SUSE(R) Linux Enterprise Server 95 | :slemm: {sle} Maintenance Model 96 | :sled: SUSE Linux Enterprise Desktop 97 | :sleda: SLED 98 | :sledreg: SUSE(R) Linux Enterprise Desktop 99 | :sdk: SUSE Software Development Kit 100 | :slreg: SUSE(R) Linux 101 | :slepos: SUSE Linux Enterprise Point of Service 102 | :sleposreg: SUSE(R) Linux Enterprise Point of Service 103 | :hasi: High Availability Extension 104 | :hageo: GEO Clustering for {sle} {hasi} 105 | :susemgr: {suse} Manager 106 | :smtool: Subscription Management Tool 107 | :smt: SMT 108 | :rmt: Repository Mirroring Tool 109 | :RMT: {rmt} (RMT) 110 | :mos: MicroOS 111 | :soc: {suse} {ostack} Cloud 112 | :ses: {suse} Enterprise Storage 113 | 114 | //// 115 | Product-specific 116 | //// 117 | 118 | :productname: {suse} CaaS Platform 119 | :productnamereg: {suse}(R) CaaS Platform 120 | 121 | 122 | //// 123 | User/Group Names 124 | //// 125 | 126 | 127 | //// 128 | Use exampleuser in text 129 | Use exampleuser_plain in screen, command, option and prompt 130 | //// 131 | 132 | :exampleuserfull: Tux Linux 133 | :exampleuserid: tux 134 | :exampleuser: {exampleuserid} 135 | :exampleuser2full: Wilber Fox 136 | :exampleuser2id: wilber 137 | :exampleuser2: {exampleuser2id} 138 | :exampleuser3full: Suzanne Geeko 139 | :exampleuser3id: geeko 140 | :exampleuser3: exampleuser3id 141 | :examplegroup_plain: users 142 | :examplegroup: users 143 | :rootuser: root 144 | :ovs: Open vSwitch 145 | :netteam: Network Teaming 146 | 147 | // daemons 148 | :systemd: _systemd_ 149 | :crond: _cron_ 150 | :oprofd: _oprofile_ 151 | 152 | //// 153 | Virtualization 154 | //// 155 | 156 | :xenstore: XenStore 157 | :lxc: LXC 158 | :xen: Xen 159 | :xenreg: Xen(R) 160 | :kvm: KVM 161 | :vmware: VMware 162 | :vmhost: VM Host Server 163 | :vmguest: VM Guest 164 | :dom0: Dom0 165 | :lvmcache: _lvmcache_ 166 | :bcache: _bcache_ 167 | :libvirt: _libvirt_ 168 | :libvirtd: _libvirtd_ 169 | :vmm: Virtual Machine Manager 170 | :qemu: QEMU 171 | :qemusystemarch: qemu-system-_ARCH_ 172 | :pciback: PCI Pass-Through 173 | :usbback: USB Pass-Through 174 | :vgaback: VGA Pass-Through 175 | :lvs: Linux Virtual Server 176 | 177 | //// 178 | HA 179 | //// 180 | 181 | :ha: High Availability 182 | :ais: OpenAIS 183 | :stonith: STONITH 184 | 185 | //// 186 | Books 187 | //// 188 | 189 | :deploy: Deployment Guide 190 | :instquick: Installation Quick Start 191 | :admin: Administration Guide 192 | :user: User Guide 193 | :reference: Reference 194 | :startup: Start-Up 195 | 196 | //// 197 | Book Abstracts 198 | //// 199 | 200 | :abstract_installquick: Lists the system requirements and guides you step-by-step through the installation of {productname} from DVD, or from an ISO image. 201 | 202 | //// 203 | Platforms 204 | //// 205 | 206 | :x86: x86 207 | :amd64: AMD64 208 | :zseries: z{nbsp}Systems 209 | :ipseries: POWER 210 | :ppc: POWER 211 | :power: POWER 212 | :intel64: Intel{nbsp}64 213 | :x86-64: {amd64}/{intel64} 214 | :arm: ARM 215 | :aarch64: AArch64 216 | :arm64: {arm}{nbsp}{aarch64} 217 | :powerlinux: PowerLinux 218 | :powerkvm: PowerKVM 219 | :mac: MAC 220 | :hwmac: hwtype.MAC 221 | :confmac: config.MAC 222 | :btrfs: Btrfs 223 | 224 | //// 225 | Miscellaneous 226 | //// 227 | :cncf: Cloud Native Computing Foundation (CNCF) 228 | :ptp: Precision Time Protocol 229 | :ntp: Network Time Protocol 230 | :altf2: kbd:[Alt+F2] 231 | :admin_node: administration node 232 | :Admin_node: Administration node 233 | :Admin_Node: Administration Node 234 | :master_node: master node 235 | :worker_node: worker node 236 | :Worker_node: Worker node 237 | :Worker_Node: Worker Node 238 | :cluster_node: cluster node 239 | :Cluster_node: Cluster node 240 | :Cluster_Node: Cluster Node 241 | :smaster: {salt} master 242 | :sminion: {salt} minion 243 | :dashboard: Velum 244 | :kmaster: {kube} master 245 | :kworker: {kube} worker 246 | :kubeconfig: `kubeconfig` 247 | :kubectl: `kubectl` 248 | :tupdate: `transactional-update` 249 | :caasp-cli: `caasp-cli` 250 | :skuba: `skuba` 251 | :psp: PodSecurityPolicy 252 | 253 | //// 254 | Prompts 255 | //// 256 | 257 | :prompt_root: root # 258 | :prompt_root_admin: root@admin # 259 | :prompt_root_master: root@master # 260 | :prompt_user: {exampleuserid} > 261 | :prompt_user2: {exampleuser2id} > 262 | :prompt_sudo: {exampleuserid} > sudo 263 | :prompt_bash: bash-4.3 # 264 | 265 | //// 266 | Integrations 267 | //// 268 | ifdef::env-github[] 269 | //Admonitions 270 | :tip-caption: :bulb: 271 | :note-caption: :information_source: 272 | :important-caption: :heavy_exclamation_mark: 273 | :caution-caption: :fire: 274 | :warning-caption: :warning: 275 | endif::[] 276 | -------------------------------------------------------------------------------- /adoc/images/airgap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/airgap.png -------------------------------------------------------------------------------- /adoc/images/caasp_cluster_airgap_network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/caasp_cluster_airgap_network.png -------------------------------------------------------------------------------- /adoc/images/caasp_cluster_components.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/caasp_cluster_components.png -------------------------------------------------------------------------------- /adoc/images/caasp_cluster_software.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/caasp_cluster_software.png -------------------------------------------------------------------------------- /adoc/images/deploy-loadbalancer-ip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/deploy-loadbalancer-ip.png -------------------------------------------------------------------------------- /adoc/images/logo_cilium.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/logo_cilium.png -------------------------------------------------------------------------------- /adoc/images/logo_crio.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /adoc/images/logo_dex.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/logo_dex.png -------------------------------------------------------------------------------- /adoc/images/logo_etcd.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /adoc/images/logo_kubernetes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/logo_kubernetes.png -------------------------------------------------------------------------------- /adoc/images/logo_kured.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/logo_kured.png -------------------------------------------------------------------------------- /adoc/images/oidc_flow_cli.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/oidc_flow_cli.png -------------------------------------------------------------------------------- /adoc/images/oidc_flow_web.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/oidc_flow_web.png -------------------------------------------------------------------------------- /adoc/images/rbac-configure-kubectl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/rbac-configure-kubectl.png -------------------------------------------------------------------------------- /adoc/images/sm_logo_cilium.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/sm_logo_cilium.png -------------------------------------------------------------------------------- /adoc/images/sm_logo_dex.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/sm_logo_dex.png -------------------------------------------------------------------------------- /adoc/images/sm_logo_kubernetes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/sm_logo_kubernetes.png -------------------------------------------------------------------------------- /adoc/images/sm_logo_kured.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/sm_logo_kured.png -------------------------------------------------------------------------------- /adoc/images/src/caasp_cluster_airgap.drawio: -------------------------------------------------------------------------------- 1 | 7V1rc6M2F/41mWk/2COJ+8fETnYzzabppm/b/bRDQLZ5F4MXcC799ZUAYUCyjW1uvmR6sWUhQOfoOVcdXUmj+funwFzMvvg2dq8QsN+vpPEVQlBG6Ir+A+yPpEU19KRhGjh22mnV8Oz8i9NGkLYuHRuHhY6R77uRsyg2Wr7nYSsqtJlB4L8Vu018t3jXhTnFXMOzZbp869+OHc3SVlWRVz98xs50xm4NVSP5ZW6y3umrhDPT9t9yTdLtlTQKfD9KPs3fR9ils8cmJrnubs2v2ZMF2IuqXDB5cB+WN2awNP56Xhiv4c879+tA1ZJhXk13mb7yaBlG/hwHpPXemwRmGAVLK1oGOH2N6INNTuAvPRvT4cGVdPM2cyL8vDAt+usbYQfSNovmLvkGyceJ47oj3/WD+Frp7u5G0ehlE9+L7sy541IOGfnLwIlv/YjJVN2YrjP1SLuLJ+Qdb1zzBbtPfuhEjk+bLfLqpLd084qDyCFUeyh1ePEj8i65DtfpgJFPH4+8mv8D554KxH/kF5+8hxPRR5LpV36u0+mn4+L3XFM6958wmcIo+CBd0l81ZSinTJOuBU1Xku9vK85SGLfMckwla2mjmXLzNBt9RW/yISX5DuSXePJ/MT3C03P6ogj87Qc/wsiMZxOBX54fbsn/IHls8PyEfq2VISbxX8cMUQOdIeAIbSgqR2hJERCaMODhhH6T3/5yxhY0v4X39z8f/nAeB/IAcrTCNgG69KsfRDN/6nume7tqvSlSc9XnwadzFdPw/ziKPlLUNpeRX6QwmcTg4x/yZQCGALGGb3TAITIk1jB+T2+RfPvIf3vCgUNmgNJ0vHEhhoRRLLyJ1dPFFpnBFEcbOuopsegEbSR4gF2yMl6LkqL+JaoIlmgYJWuCyNum1+Xd3dhQ1abXZbL8yosyW61tArUOCosXIolbvJomQmnUGEobHAtcIdWlE207r+TjlH4cEfoEPh3syTU9zHqQO+Y6HSFkb2GNOiBbKZFc40kuiQQzaozkUO0esWHWkkC2amj7QDbRH4LomurilNquGYaOxZrvHHd1a5vvRBpzXTayGf1RBBAHSAy1osSQ5LolRnwpmQ7zI9dh4TteFOZGfqINOTYmOmZRwYSwxIjJkCu2zJ5tf80CdcqnkFcs9LYVi6ps0jfFQuWkClX3W1MsDG0MNK1j6bFSH6Sa1AdIYWebAqFKrSoQUAUc6dpcpWCo5Bcp3GeBdixG8LsT/cPuQT5/W70W+bZ6EfqFvccBmKIdmehRJDQsCh8ItHqlj5izNZlDsbH/5rm+aVNN2LR+mFMcctxPFnNUZFYGNByiMMyYO7adrAscOv+aL/FQlMzp5JBxlZsrZcxQ6obcexqvoZ1V5uq6zOblzqFW5ihNH/8q74oUoRmRp0CCRcIm33ZjqBXHsC7+ZBIS1i5DWw08oembzaUcH6g/l9T9Gk/4IIxx7JoKPH3xvvpxZTslg9DO+4/ygKcEjnLWWTJc8RZbjLYtgtYMF4kvfOK8U/zeYD7nZbLne7iqOM5WSR/NucxWSzlWlwEngaEuDxXEC+F8e+1yWOV9Oc8fYYTnqauVohaPVe2Z6P2mqqEMZb0IRSqvWkFZTNgGbXV2u+5sIJjZPKmprmcK1xHpWAfoS1Wduz3Rl2S1ZKmXQndl9Uoteah27l+zJ2DNOuCjSfceYR7XdbwpNSH9SfRm0kAiGJB/c3oa9V2ajocDMmd3K6XtbjQjnBem/b8uvZUCQ2CHtk98ylZva7HztPU8BjyH63kDMISaoRaYBvVd0WPofxGnu4tT2dju80aiMEdzclTh9fYLFbc5m1StilKEmE3UkkYEOUr+WAaEWuupCHemIgA38BbtCLF38V89k5858RheGgJtVJjP0ZyfT+pUEwVDtJOjr6BvpuZnXtkEvVQ2YVVl0+iFrolK1rBUzhMr9zcO649Sf3fDuiaHMIKQOI4se+c4+BEiUebwYySTO0ciPmXBclxnOT+tiedEQPcTv3ZdVHVbkikRuC1zttvIny8IVsegs8aLudUKK3okmeeRd0ZWt9ZEnFOUbBuZpAZm0EuBP4n3OmZYWdDDmuKFSmlD5sIZhDh4pXNx+kiJZGVoFLMzod75mhX4hbvQ47pUx/aNFR+gxoGKahzzstSsxwmCqEX8kFniABsiedL0qvrdKWxCClYbsWhT501tGIDu7gxlH3s6w/rqUqFpzC/pvplXpTssQRwRw3BWs+XNKNj07Gol00XW9Y6nl7m42wXm/eZ5OwJKFREQ1Y6Ah/E47/cNCVKZx8nkEHEbRiCA/NaglnGEAclxpIwdqSdJrqqC6P1agAqvsCbRufUO+04icVVSWg4NwmUSt45kKw2lMbgDdcmBBMr5eKUxmgvLQQZeF+joAXQw27I30MGnnZ8vdDAYrQE6gCyV4oD1AAkaGqVx2wMSPrd39PV+8PuR6nlEqSvHarOU/+70PKB3AdZHCrpVt/rUHvk70L/IrSMr2Szq4mAwjzf8B/Uuqq79vJJcCsyIMkXbXWh8cpxN3uWkZp0LHXcfDuNziqamN30zP05r5pVS7Kn7XBSWTHgxApqTR8wvt90I6JdAMngsvLYsHNKM3pG7TKpbHLsRkAzpBzYOSsMFflpSRxoPjB3sBbakakn3RTJzo6aQodTjedDAEEpG7q9wE6LToyGrZ9G8BcEWSCmkZdo156G0ZUNwxUmA1rUBgXhnPJ3iwSLw309dyHav3mgdS9m8jM2J3NOSspVdbf1I+JTKGJHKjXUJnKX+RurQXtddlg/rr6ebtRrND0W864iikhW5xwn8wiChxpeVaxl+pE7g50hhpGpNB+Yp6Imyjnhl3bXNRZY0WONywtBW8NaSMNxudkPVJFOty0vLLTS9+2A8MC5ifrc6LavaLN/YHRuo04L0imu6X/Y34r1ioTXD9tI9NT9weWdE935gvdvCfj30ix2w/IyKy4+puD1Zfzo6VbWpBVrK/cqF0nn/x9MyvuwzdulOdFbDoA/+zMqbn3k6bmTkmjySSjFlaZBO7aEpDBDoQ0kSDt2CC5LfhTOLGeNUzFDI6oN1JlIZuuemOHLck9NlypsNu4+uSryX5Z5ilyfYMsIVJYtBCwe3rzjBrlhEsTMyKKrZZjjL5N3avYNcyKXGCg0cInPrbjKJ1x157gV9z/n7lJ6DMpyHlomH8VWLwAnx0GHT0sIalTW5nLOlqohnFSgulKU1JSllfpkeB5dsOTRiO5ek6LyVS6wk9vk9da20sWW1iOWCUrWyAFCUpgBFFm1fvrBIlyxSqZ5xu0yC9mCSHNl5bZkjjzOPz4WKJze1pyBatY+d+ZQ8uuu80BegJKK1ylgNs+9x0TPPwuEwfJ1u5DPBjvc6SFbhPAPR8TPNUYzfLnK8mxKJ3FS5PN7ONybKfI7n0/Xot+tPt0c6y5KChqhYkE4XnKPU8iwLjkv7/fHP6/vH26/1zjN4gXirsbEBWBp0qQooY8CubUCZd2mfog0IEb+JQJK6tgOhyqMPN+8n4uGsPzAsV/V8sv0wPfF8yrxn61IvYp9NQJUL0CgNUVIR1Ws8zRw+CLQ+AiirvXKJCjaHs8zZtT1a2K9oPVR4N+v5bpmt7WiTuOS1VE+ICZY227eW4A6Z4n0Bjh4AB0su6Q1w8LHp8wWOdarCfsBRMoDr2WwvSeU4dXub7bOzBE5is325LpugFmfLhcP4pXiKBXGhDDj1GnWuXgsUqBOsRy+c+86LmwpKRRGzkow9w8tNxzocIwEEtmUPmJ/3zU0Dc2J6NdfL6+Xsd8/+oiLMpWk/72AlUCud69ZqwFLlHWLWMozI6JSV2XFTJ7Z+BKGF7tGLJS6dPSU6RzKVV6IuSJYnW5UjtdpFsQpRubOmGJQMbYhK+cxdyx6odlJjep1Lb+XEOyWXHtuqfyRnOOlaEVrYicbr9tyX+0Owub8E1E39m9mjDwUHdY+SunSk8bflS5wxfTKHt+9b9SfDg1o8lzJgObZsJ0wqEw50XRa1FXKjchp5g8eA8iYW1QkHL6ZLhUtQq3LYgy3qskI34YDVXzH21P12dZ3XO54jP6AaAX2O5JShlyA7WgoHrw6Z/a2qyVkleofpjLXgK4dFYUEespIGJG8AicPKw1U4DqkL5giSdz90N5GLJ9FBnIGtZYC/vzpBtDTd73PTmjnb42d1qMuQ2/1nqDzYiHiF1aZuQFvupH7xeQXAIahaAosdhtB1CSyuTt4WhVnbckFDGjBTXS7M2wfmlWs/KqEd5iXaXdl8a4V7mZEpKuP66NvUcgO/PD9//pVj8uO04Hge3LyqazHYJKCnbLmvhdZCFhrgfcNhOOPIfhSpI1K5HqLWtT0FBQX8n8zImvG21Je4mP8cx4fxXuysCtq0bZLVaIb4e/jhWbPA9wi4JM/QTrbazvyplcMMghgdy2Bux2EtKLJ/Yc9zZU+olH3PnfOnxLsPLtUT69ayYeVdbLUnSe+lZUMdlZL2NblQmJi7QjUOvQCCNmofZwuuUJbnlUAfaXsWHpR6nPp4LqKyi3rO4KAe9VzStAKN6wmnSHLZ05bt/mxBnWexm364JU4zAr0DYDLrqmPEREYZz1hFsnUAyF9hpNkVDTsm2Ao/E8fE3qFlbZ16t1f9RsRq29YUWs6QTxuykGIb4NerWtvo3NVF1A/wK9WR3oZ8atmFq6tXzeOezjtkM8Vv3fEX54Z4dfpmVc0oijflQMRjQ7OsZ2ZoZPuL29hRzKfTZGeygWt77ngcD52Vi8b8dxng4TKML2ov1LCHI4ZT1wDvyWYVrVryxFxSW5sXrSw2cWynQmYPnkOeMX7Frr8QJPFdMKeHmCOjMuYoeueY06sEoVPFHHismMPv9COY8/uCughuvanj4Qv2HAf2KFrZod899ghKFfMGGbh+um/ZLKN4FabbtboUFwoYgvxfMY1JEyQiQ1F2KWqMgAof2jZt2/dOrXKBoHRED6riCTbPn+Lsl88V7v5gD6gJTmh/enq4v34c1VtQugf7aYQFjdmmqzZI8Ca//eWMLWh+C+/vfz784TwO5AEfw2UpM7bzynJmHsbXT6TTKPFckclJu5A75nqdt/7CpdSY9vdJWAPfIFA+kKfFkxGEPCPaoc/xjO1bP+KlFeCpE8a3rcQ0lxq+WeZIKcNJrrzjroYCvkLCC85X+OEvsH8hYnUitlmFWUhEwa7JL3+Skb7G/xUkTP7vmchikORU8kbqhdTrSK0wUO6M1LxVkRJztKptM0om7iK7m5Dd0CjJ7syurF92k6+B70f5MB19uS++jWmP/wA= -------------------------------------------------------------------------------- /adoc/images/src/caasp_cluster_airgap_network.drawio: -------------------------------------------------------------------------------- 1 | 7Vxdd5s4E/41Oae9sA/f4MvGSXZ7umm99fbs9mqPArLNG2x5QU7s/vpXAgkjJDCOMXZd+yKBQQgxM3pmNDPoxhzO17/FYDl7RAGMbgwtWN+YdzeGYWimTv5Ryiaj6JqjZZRpHAaMtiWMwx+QN2TUVRjARGiIEYpwuBSJPlosoI8FGohj9Co2m6BIfOoSTKFEGPsgkql/hwGeMaruDLYXfofhdMYe7RludmEOeGP2JskMBOi1QDLvb8xhjBDOjubrIYwo9zhfsvseKq7mA4vhAje54Z+7P9Hzt3EvuEtev//t3YLPD7hnmFk3LyBasTd+DAnbYkIbw/gF0oN3HxcYxgsQvWdvgjecPTFaLQJIn6DdmLevsxDD8RL49Oor0QhCm+F5RM50cgiicLogxxGckBHfRuAJRiOUhDhElOxD+hhygTwVh0QEf5QaPCGM0bzQ4APrECP6pAla4AcwDyOqa0O0isN09J8hYfqtzC3GQNoXXBdIjHu/QTSHON6QJuyqxQTJVNnkmvy61QvDZbRZQSV03hAwXZzmXW/FRQ6YxNTSS+wvn770JvYn9PrgfVx/+vyfu+zZtiQQGBD1ZacoxjM0RURw91vq7Zb6B6J8S0XzP4jxhs0+sMJIFBzhWbz5hwq5r2s2J3wnhJ7W16wBp9ytmSJkZ5vi2QjGIXlrKuCUGIBklioOfUCCQYw/0OlKCAu0gJz2EFKOpDfUipdeZMPXDdb7CGCqtukjDM2iA1mHOHsNw2an3/krkuPt8OkJH32CY/ScT34j1yXK6cqpyEgJGaYP6+afxTANxFOI60StVs0YRgCHL+JA2tczR0KJb8sAYEhRAqOYYihFHyei8/qJiMWZ0qOXEMjE0WyT0MlL7hhmoE1nd1mPyZzEavSQYIKjwDwMglTDY5iEP8BT2hWV4BKFC5zyxL69se849NwC/3maAtgQRShOn2tO0p9CoxqgiwQluVlig9kag6Ku1EztSiyi887lNmgjPLKxSrDOR5Q7Qs8DUwQ6q++ZYjdoMkmIupb1Kh9nI1WrnRD1Bul+3apBylVqb5OUWZ6yJuaG6ugmyRFt0qCpSbJbMElK+Q0k8Y2/je/pXAdgPKL/o1VCefqLC+6EclPijaVLEjmuK5GffE8Ncu5VVPgQgrtQK52tkdcIbG2tPHtGpZ2nJ2UnRfBL/AgkxHKVXBO9XYeAr3uKDkENUHbgEdQOszDPs0UCswqCHkURWa2lLt0MLGHKSbQKupjluUtQ6SvUe5VExAXfQDMeHgZ2OwDguiICGO5AggDdUECAdzQEMDtfTDiGLaKAcfBKogFwtzNP7YbzVNfUetDRPLWr7PFXOA2T9BFlB51TgvCFk94RTx2DcJHy8uOc+PvJe96MjKvQUtIhkCyzuMwkXFMpZd44jO9fYOaU6xkwZLERQZ6aqDjZQqw4H9PfESIJEvBIQDCZpEDAAW2+ntL4V3+e+AD207uWcZjAPlguI9I/fey/Seq+NlxLHAQuuqaL6KIIVfDpXsSW43kXHQUq9vAStig00F0RhUzPfQsOtYkuTlfool4Eel4p1mWUNCMbGLur/VWgLgcc+DJiRlyvJMWvJZ3PSIVg736H0TxvLC8Sr5gkYlInkKS5fVt0eU6OStYZo5Knl1ZIeraaOSUqub84KrmVqATAmPwbkdeboJhBz2JBk2ESNn0dPdKmwH9mbtQVnM4QnPRTg5PdETgVHSFHRJw3eUHVeZ3q+MleqZ0WAS3PKJ/3Ii5XaBl3VgmZlimzhtnUk/FmFKNg5ePtkg9kCRdtiAJ4hZ8S/AT/TpLTII5pyxEgSxEAOhrimLJ5G63S2+4AViTyJjGilo6nZ/YGKzHifxh0CXEkfQdudRUj4sUVO+Fl0BG6qL1g7aSyO1oeQF0C0FY2X8nKpuYkU4yTCdyQ5nmezeeTWZnJV2T4f6JkfofJe6vCXdim2C0e4eBr4ezs0Ny92Xd1od98zdJq5r5O+Qta9Yv4FiwptNO3iJD//GZdlOCoUrvsUm7ZMm3JrzAHXa5kZO8V+qtYLB66gy8hwdarxhQ1JmHs6UBp3AGtiRP0hofHCnpjK/xR61h2Si4l+fr4F+lpTP+W9GTrj1BZ78grV6tClaSOzX7LkdhPVggS+5UJ4aNVlypy/IrUIFuJPoIF0dS4YVLwAsXlnlpctmxvjx0w8qxS/t4zvHZjRm3UArfp4BsNPfzOAka1wxQ8fDKNIKALdiE5drW4gsUlyyDwBBK4V+z5ICxRRIIsW+GxeV16bN2XAtklJBnsCgT85EDSNDR0YiCRPw8qAIlcFHRFkxOjiWeUFn+nhhJDdiKTZ7SE6NJ8QkcTOX96/12RNpoR09+bqwP2l8b/kzvkPABW4v+lMd4+O72XjdaPzXJZ+83JT8l53Ts71stfbw2/fux9uTTOl3X+9FgjB1kD5D+nbx3nld4XLQTTlV2djoUgV61ebMSyxHzLOjnzK4vz8tDkZUmgjP5noP9ehcOj+bxqO4bLi/P8y4I4/VwwZQ9IYjpcBKXYiBA8KXC8xNeJ50PfV0niybMte2dURVjt+2hOKwIVGy1YewRcZNHsyFlx2oH5drP0EZ9d/jgviwNJBck7OzpeYbNaX2S3rVDY8RniVxQ/39AP8AnrwXmWeMQIAxbf6Q32Cu05dTDw5hoQs6LGh3Wv9XXX9gSh94xWlJJ3k/fq9D0GktnPEHs8XkWIJWdw+U5ChPouqwJ4X9SwnUolYg4DriJAMVJz5VOZG7G6rR1F2sO1KtWFKvYGcBSYVsaM9rIBcgiP779BxThaPUWhfxWjJEZPShIrRGmqSnyPJ0pP9gOOnNhx9yrNPeecjd3086eDt2ZIbyVcAJtCA2YVK3HfLSkb31SkyudwHLOuPTnIRtCqSbBlk9CkxnybdDo3t+PU20R5tRjU0/qGx+u4eLCqLTdDVJ+e6XTlVzgdpafPH4wOxRhJqvk3bnxF03AlcihY2U49WEnjcjoAK0deFmU+a0Rf9a8YLJIlUasbcRO8B/JXwDSNgVje6Iphiulcs1IyBrZYLd/OTnfW4Dj72qlf8i07YBSjIx1+HyVEaJqAoPxRzd6bdeaBJ/VnO7+OG+dI5fuDejeuvC1teRvgUnt7YNe1PxKS1mwgKjt9V6Csw5BaZ8/Ry9//HAaVxwdGW87bfAjmoRxh5NVWq3n0wceoUYFXA5mqa8AKOoZWOAoXcJhvML5FvNayEGpH8aDwg1vaAsrS5M+LzXaSEuR0u415phfb3eDN+/8D -------------------------------------------------------------------------------- /adoc/images/src/caasp_cluster_components.drawio: -------------------------------------------------------------------------------- 1 | 7V1tk6K4Fv41XbX7QSsJ7x+77Xama3tm+07f2t35NEVLVO4guID9Mr/+JkAQSFRUIGjbtVujMQTIOXnOa06ulNHi7VNoL+dfAgd7Vwg4b1fK7RVCUEXoiv4HnPe0RbfMtGEWuk7Wad3w5P7CWSPIWleug6NSxzgIvNhdlhsnge/jSVxqs8MweC13mwZe+a5Le4a5hqeJ7fGtf7tOPM9adU1d//AZu7M5uzXUrfSXhc16Z68SzW0neC00KXdXyigMgjj9tHgbYY/OHpuY9Lrxhl/zJwuxH9e5YPrgPaxu7HBl/fW0tF6if8fet4FupMO82N4qe+XRKoqDBQ5J670/De0oDleTeBXi7DXidzY5YbDyHUyHB1fKzevcjfHT0p7QX18JO5C2ebzwyDdIPk5dzxsFXhAm1yrj8Y1m0MumgR+P7YXrUQ4ZBavQTW79FZOpurE9d+aTdg9PyTveePYz9h6DyI3dgDZPyKuT3srNCw5jl1DtodLhOYjJuxQ6XGcDxgF9PPJqwU9ceCqQ/JFfAvIebkwfSaVf+bnOpp+Oi98KTdncf8JkCuPwnXTJfjW0oZoxTbYWDFNLv7+uOUtj3DIvMJVqZI12xs2zfPQ1vcmHjOR7kF/hyf/F9glPL+iLIvB3EP6MYjuZTQR+e3q4I/9A8tjg6RH93ihDTJM/yQzRAJ0h4AhtaTpHaEUTEJow4PGEflVf/3JvJ9D+Ht3f//vwH/frQB1AjlbYIUCXfQ3CeB7MAt/27tatN2Vqrvs8BHSuEhr+D8fxe4ba9ioOyhQmkxi+/0O+DMAQINbwnQ44RJbCGm7fsluk396L3x5x6JIZoDS93boQI8IoE7yN1bPFFtvhDMdbOpoZsegEbSV4iD2yMl7KkqL5JaoJlmgUp2uCyNu21+V4fGvpetvrMl1+1UWZr9YugdoEpcULkcItXsMQoTRqDaUtjgWukO7RiXbcF/JxRj+OCH3CgA726Nk+Zj3IHQudThCyd7BGE5CtVUhu8CRXRIIZtUZyqMtHbJi3pJCtW8YhkE30hzC+pro4pbZnR5E7Yc1j11vf2uE7kcZCl61sRn8UAcQREkOvKTEUtWmJkVxKpsN+L3RYBq4fR4WRH2lDgY2JjllWMCGsMGI65Jot82c7XLNAUvkU8oqF2bViUZdN+qZY6JxUoep+Z4qFZdwCw5AsPdbqg9KQ+gAp7OxSIHSlUwUC6oAjXZerFAy14iKFhyxQyWIEv7nxP+we5PP39WuRb+sXoV/YexyBKcaJiR5NQcOy8IHAaFb6iDnbUDkUuw1efS+wHaoJ25Of9gxHHPeTxRyXmZUBDYcoDDMWruOk6wJH7i/7ORmKkjmbHDKudnOl3TKUuiH3niVraG+Vub4us325c6iVO0qzx78quiJFaEbkKVBgmbDpt/0Yas0xrEswnUaEtavQ1gBPGOZ2c6nAB/q/K+p+TSZ8ECU4dk0Fnrl8W/+4tp3SQWjnw0d5wDMCRwXrLB2ufIsdRtsOQWtHy9QXPnXfKH5vMZ+LMtkPfFxXHOerpI/mXG6rZRxrqoCTwNBUhxrihXCxvXE5rPO+nKf3KMaLzNVKUYvHqu5M9H5T1dKGqlmGIp1XraAqJmyLtjq7nTwbCOY2T2aqm7nCdUI61hH6Ul3nbk/0JVWvWOqV0F1VvdIrHqq9+zfsCdiwDvho0r1PmMfzXH9GTchgGr/aNJAIBuT/gp5GfZe26+OQzNl4rbSNR3PCeVHW/9vKXyswBHZo+zSgbPW6ETvPW89jwHO8njcAQ2hYeolpUN8VPYb+F3G6vzhVrd0+byQKc7QnRzVeb79QcZezSTfqKEWI2UQdaUSQo+TPVUiotZmKcG8qAnAD79CeEDtO/pqZ/NyJx/DSEmijwnyO9vx8ilRNFAzRXo6+kr6ZmZ9FZRP0UtmEdZVNqxe6JqpYw0o1T6za3zquP8r83S3rmhzCCELiOJ44e8fBTxCJcocfI5kqHYn4lIWJ67mrxXlNPCcC5E/8xnVR121JpkTgtizYbqNgsSRYnYDOBi/mTius7JFknkfeGVnfWhNxTlmybWWSBpjBrAT+FN7rmGNlSQ9rixdqpQ3ZS3cQ4fCFzsX5IyVStaFVzs6EpvQ1K/ALy9DjZKpjh8aKj1DjQE01jnlZGtbjBEHUMn6oLHGADZE+aXZV8+4UNiElq41YtJnzpjEMQOOxpR1iT+dYX18qtI35Fd0396rIwxLEETGK5g1b3oyCbc+uUTFdVNOUPL3Mxd0tMB82z7sRUKmJgKhxBDyOx3m/b0SQyj5NJoeI2zACAeS3BnWMIwxITiNl7EQ9SWpdFcTs1wLUeIU1jc5tdthLicTVSWk5NgiXS9wmkq0MlMXgjtQlBwqo5uNVxmgvLAcZeF2gowfQwWzL3kAHn3b+caGDwWgD0AFUpRIHbAZI0NCqjNsdkPC5vaNv94M/T1TPI0pdNVabp/zL0/OAKQOsTxR06271aTzyd6R/kVtHk3SzqIfDwSLZ8B82u6hk+3kVtRKYEWWKdrvQ+OQ4h7zLWc06FzqWHw7jc4pmtj97td/Pa+a1SuxJfi4KSya8GAHtySPml9ttBPRLIFk8Fl5PJjiiGb0jb5VWtzh1IyAdMggdHFaGC4OspI5yO7D2sBfYkmok3RepzI2aQYbWjOfBAEOoWIW/0k2ITo+GrJ5F+xYEWyCVkJbtNJyH0pUNwRUnAYZsAwLxzng6xYNlGLydu5CVr94YkqVsUcYWRO55SdnarrZ+JHwqVYzI5MamBM5KfytzaG/qrqrH9TezzVqt5oci3nVEUWkSe6cJ/MIgocGXlesYfhQp8HOiMFK3pgPzFPREWUe8su459jJPGmxwOWHoaHhnSRhuN7ulG4qtN+Wl5RaaKT8YD6yLmN+vTsu6Nst3dscW6rQgs+aa7pf9jXivWDSZY2flnZsfuLozQr4f2JRb2K+HfrEjlp9Vc/kxFbcn689E56o2dUBLtV+5UCbv/3hcJZd9xh7dic5qGPTBn1l78zNPx62M3JBHUiunLA2yqT02hQECc6gowqE7cEHyu3DmCWOcixkKWX0waSKVoXthimPXOztdprrZUH50VeG9LPcUu3zBlhGuKFkCWji8e8EpdiUiip2RQVHNsaN5Lu827h3kQi4NVmjgEJlbd9Npsu7Icy/pey7eZvQclOEimth4mFy1DN0ID102LR2sUdVQqzlbuo54VoHiQllGW5JS5ZfpaXDJjkMjdnNJhs47uWSSxj5/ZK6VLraslrFcUKpWFQCK1hagqKLtyxcWkckiteoZd8sk6AAmKZCd15Y58riL5FyoZHIzewqidfutu5iRR/fcZ/oClES0VhmrYfYjKXrmT3A0jF5mW/lMsOO9CZLVOM9AdPxMexTjt4uc7qZEIjd1Lo9X+sZElc/xfLwe/XH96e5EZ1nR0BCVC9KZgnOUOp5lwXFpf3797/X917tvzc4zeIZ4p7GxBVhadKkKKGNB2Tagyru0z9EGhIjfRKAosu1AqPPow837mXg4mw8Mq3U9n2w/TE88nyrv2brUizhkE1DtAjRaS5TURPUazzOHDwKjjwDKaq9cooLt4Sxzdu2OFvYrWg813s36cbfMNna0SVLyWmkmxAQrm+07S3CHTPG+AEcPgIMll/QGOPjY9McFjk2qwmHAUTGAm9lsryjVOHV3m+3zswTOYrN9tS6boBZnx4XD+KV4jgVxoQo49RpJV68FCtQZ1qMXzr304qaCUlHErCRjz/Fq27EOp0gAgW3ZA+bnfXOz0J7afsP18no5+/LZX1SEuTLtHztYCfRa57p1GrDUeYfYZBXFZHTKyuy4qTNbP4LQgnz0YolLH54S0pFM55WoC5IVyVbnSK1uUaxGVO5DUwwqljFElXxm2bIH6lJqTG9y6a2deOfk0mNb9U/kDCfTKEMLO9F40577an8ItvdXgL6tfzt79KHgoO5RWpeONP6xek4yps/m8PZDq/7keNCI51IFLMeW7YTJZMKRrsuytkJuVE0jb/EYUN7Eojrh4Nn2qHAJG1UOe7BFXdXoJhyw/ivHnuRvVzd5veMpDkKqEdDnSE8Zeg7zo6Vw+OKS2d+pmnyoRO8om7EOfOWwLCzIQ9bSgNQtIHFcebgaxyHJYI4wffdjdxN5eBofxRl4sgrxjxc3jFe292NhT+bu7vhZE+oy5Hb/WToPNiJeYbWpW9CWpdQv/lgBcAjqlsBihyHILoHF1cnboTAbOy5oSQNmqsuFefvAvGrjRyV0w7xEu6uab51wLzMyRWVcvwYOtdzAb09Pn3/nmPw0LTieB7ev6kYMNgWYGVseaqF1kIUGeN9wFM05sp9E6ohSrYdoyLanoKCA/6MdT+a8LfUlKea/wMlhvBc7q4Y27dhkNdoR/hG9+5N5GPgEXNJn6CZbbW/+NKphBkGMjmUwd+OwFhTZv7DnR2VPqFV9z9L5U+HdB5fqiU1r2bD2LrbGk6QP0rKhiSpJ+4ZaKkzMXaFbx14AQRe1j/MFVyrL80Kgj7Q9CQ9KPU19vBBR2Uc9Z3DQjHquGEaJxs2EUxS16mnLd392oM6z2E0/3BLnGYHeAzCZdSUZMZFVxTNWkWwTAPJXWFl2RcuOCbbCP4hj4uDQsrFJvTuofiNitW0bCi3nyGcMWUixC/DrVa1t9NHVRdQP8KvUkd6FfHrVhWvqV+3jnsk7ZHPFb9PxFx8N8Zr0zeqGVRZv2pGIx4ZmWc/M0Mj3F3exo5hPp8nPZAPXzsL1OR76UC4a+9cqxMNVlFzUXajhAEcMp64B3pPNKlp15Im5pLa2L1pZbOLUToXMH7yAPLf4BXvBUpDEd8GcHmKOiqqYo5nSMadXCULnijnwVDGH3+lHMOfPJXUR3Pkz18cX7DkN7NGMqkNfPvYIShXzBhm4frzv2CyjeBVl27VkigsNDEHxr5zGZAgSkaEouxS1RkCND23bjhP451a5QFA6ogdV8QSb589x9qvnCss/2AMaghPaHx8f7q+/jpotKN2D/TTCgsZs01ULJCBfwyCIi24bKmi/BA6mPf4P -------------------------------------------------------------------------------- /adoc/images/src/caasp_cluster_software.drawio: -------------------------------------------------------------------------------- 1 | 7Vtbc9o6EP41zLQPYXzBBh4JTdrMNOcwZTo97Quj2AJ0kC1XFgH668/KloxvIYR7ctLptNZKlsTup29XK7lh94PlZ46i6T3zMW1Yhr9s2J8almW2LKsh/xr+KpW0jVYqmHDiq0ZrwZD8wUpoKOmc+DguNBSMUUGiotBjYYg9UZAhztmi2GzMaHHUCE1wRTD0EK1KfxBfTJXUdLvrii+YTKZq6I7VTisCpBurXxJPkc8WOZF907D7nDGRPgXLPqZSeVov6Xu3T9RmE+M4FNu88PPewuNfMzZEv3uD2e1g1qPelerlEdG5+sHD78MbkHzDExILvmpYLoXurx84PE3kk5b45FGLPvRZKBAJMTQy7gJQSfxRN4MJ5VoqVYiV1i+Ko9RsY7LEMNPriJFQYH7zCD9L2s4EWaY6Q46L4qlsmRSmIqC6keBshvuMMp50bRvJH6ih6AHTAYuJICyEOg/LEaDiEXNBwNZfSw0Ei3K1PUomUvrAhGABVCAlyPoZE0rzA1vjcddJ5x3J3xksJ3J1NIPYQ7iZvBVxEuMmiiIKQ8hhRzHmMKIluwNt3qKAULlm+mzOSaLYv/Ai02De7NqGMF28zIkUDD5jFuDEkoaqtfXiUmvSdFR5sUZ4y05F0xy2dTOk1tQk63kNO3hQyHsBCu2nUNifIg4okHCMpHlYHSA/fME0yBp/fIfYMxA7AcLa7QtDWMutwAL7QPSqyLiYsgkLEb1ZS685m4d+hgOPBcRTz+v2X5k0ZIKOf7EQK+XB0FywInZAmXz1j3y/6ejiz3zdp6XqPC2t8qUB5gQUIW2XCAtojQXgvid9nQQMRXEs55mKbwl9Hry7oSGGhh7epHPlrhGfYLGhnfKk0h4bscUxBRg/Fh3z4ZHSeUfKpSLFdA4NleRV0Aha5Rok/iHO9TyQgjW7OZ1ugd1cq9t0irFX6Q3bdja/AQ/pLNbAzX7OHliu+FXtPeMIhQWQu7/nLHGryJtNEjhfeSkAelKfk4cPlgMzBn6GqRil54/r97VT/sqQD5XgyFDoSZBk0WA69GsKBzOvXHbXqRPf2VejP3OOmxQ0NXpQehpNMETRyarkTCA1gd2X3vYu2ym5bNfWEM07bbt9QqftdM9NxSdkz4Oy/u5U3N6SijUQDkzFVa4twbJll9CWzlS9VQLcASi0XaHQH4zPUrsxmZm4BAbj6WLcd7NB8Vi8mL7SPcWIg984xcbCKcGhY1U4Sjc5CUd1a1zsBSDizD7tkXAxR3QUIG9KQjwaYyRAfgKAdMzdANJyjgQQ06ogRCc3EBrCfwPgxzHjKoMRhjKBWklxfBvcy6awyFRy7TVg7G3nOBy3XUDa2XMcztl3rq9is2luu9s8fIizH5FUd3P3KBYXFoqoGGJPStABzSWHInY5Mj13KGI5z8ciLyWEHUjgWMu2c54Nh+u2mqXY4Iktx0vTSJZbGqnzXBqp8kY2t6OmkdrVU8J4Nn9ASQroFv6FAhANraANVpIogqPi88tUEBDfT3GIY/IHPSRdGZq7Es061w3nk6ae6yxfleOJcfLn5VjcvKzKzJAdOqtZNvLnunWMcWU0wYZ2wYRXao3uiVrtHEq96g7YeBzjo+yP9bJ83wA9FZzG2APnNCptiU7goEy7xBaOWZPRq3NRdudYMUx1u3yPQtjSBDhBs8ytxCrt+TyZFNEQslSrOTsp0faks5gSgYcRSjzRAuxZcYgnN5pZZ7RWjdGso8UVbsVoPT8gYUb/32Opg5K59LqYB7TnCbbVUtyC0etXaw4UbC4orK9+dh/oFFbrlIxm1ebOa4zmHstouuP3Y8zT7SztbZPnjnmUWPbF55glrmmZ9sbws9ze7hgb27daG9sfKVg1agKSyo01nYJTLLHdXbU3Ec5qYtg/nDWaptHt7he+Hj9Adc2KHc9HhFarnadCo9ltu+90eHA23M9zVmPU/re7q7/fKB08FevsQActxzYLhG9ePDnsnyE7HDmYHbdEDl3rwskBL4nIhXhQSmdu2Kq4nrcsrHKFw11Z2J5nrMsimmrq5Ncqimr2Um+DaazDMY2lb2nuSi2r2heOSDT2uYnmVYUMx07yt7tFR2UZp71V5FavFaUfMbzJha8/7zjAwre7HecgK//qZEvfrtr6TtouVMOdO1N+nstjlTQ50To5wc2N0iUyAFVjq7T40Y72tzi5/V8dopz7Fln5C7mz3yJzns5qSV0UoKKv4suKqziJB+QdflDJsnpPf8jGYgGqggZ3Ifh/StXpi8rt9+lc3THJH9Rk6bJ08K3zZW/xxMbqFE90dYItD5W61P/Rzmt0drnmxiHYEgaRP7+farZ603DAmT+XKVH9CbDGQ5/5+P3iYenioT8axyfAWMsq8pHVroKs7kxwh7tGUFx/k57GQOsv++2b/wA= -------------------------------------------------------------------------------- /adoc/images/vmware_extension.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_extension.png -------------------------------------------------------------------------------- /adoc/images/vmware_step1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step1.png -------------------------------------------------------------------------------- /adoc/images/vmware_step10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step10.png -------------------------------------------------------------------------------- /adoc/images/vmware_step11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step11.png -------------------------------------------------------------------------------- /adoc/images/vmware_step12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step12.png -------------------------------------------------------------------------------- /adoc/images/vmware_step13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step13.png -------------------------------------------------------------------------------- /adoc/images/vmware_step14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step14.png -------------------------------------------------------------------------------- /adoc/images/vmware_step15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step15.png -------------------------------------------------------------------------------- /adoc/images/vmware_step16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step16.png -------------------------------------------------------------------------------- /adoc/images/vmware_step17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step17.png -------------------------------------------------------------------------------- /adoc/images/vmware_step2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step2.png -------------------------------------------------------------------------------- /adoc/images/vmware_step3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step3.png -------------------------------------------------------------------------------- /adoc/images/vmware_step4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step4.png -------------------------------------------------------------------------------- /adoc/images/vmware_step5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step5.png -------------------------------------------------------------------------------- /adoc/images/vmware_step6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step6.png -------------------------------------------------------------------------------- /adoc/images/vmware_step6b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step6b.png -------------------------------------------------------------------------------- /adoc/images/vmware_step7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step7.png -------------------------------------------------------------------------------- /adoc/images/vmware_step8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step8.png -------------------------------------------------------------------------------- /adoc/images/vmware_step9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/doc-caasp/d093b564eaef29fe4de2d85121bfe239a8124e8a/adoc/images/vmware_step9.png -------------------------------------------------------------------------------- /adoc/network-decl.adoc: -------------------------------------------------------------------------------- 1 | // ...................................................................... 2 | // General Entities 3 | // ...................................................................... 4 | 5 | // domains 6 | :exampledomain: example.com 7 | :exampledomain1: example.net 8 | :exampledomain2: example.org 9 | 10 | // subnets 11 | :subnetI: 192.168.1 12 | :subnetImask: {subnetI}.0/24 13 | :subnetII: 192.168.2 14 | :subnetIImask: {subnetII}.0/24 15 | :subnetIII: 192.168.3 16 | :subnetIIImask: {subnetIII}.0/24 17 | :subnetIV: 192.168.4 18 | :subnetIVmask: {subnetIV}.0/24 19 | :subnetV: 192.168.5 20 | :subnetVmask: {subnetV}.0/30 21 | :subnetnat: 192.168.100 22 | :subnetnatmask: {subnetnat}.0/24 23 | 24 | :subnetv6: 2002:c0a8 25 | 26 | // Broadcast addresses 27 | :subnetIbc: {subnetI}.255 28 | :subnetIIbc: {subnetII}.255 29 | :subnetIIIbc: {subnetIII}.255 30 | :subnetIVbc: {subnetIV}.255 31 | :subnetVbc: {subnetV}.252 32 | 33 | // Netmask for C class network 34 | :subnetmask: 255.255.255.0 35 | 36 | // DNS names (server) 37 | :smbname: smb.{exampledomain} 38 | :nfsname: nfs.{exampledomain} 39 | :iscsiname: iscsi.{exampledomain} 40 | :proxyname: www-proxy.{exampledomain} 41 | :ldapname: ldap.{exampledomain} 42 | :nisname: nis.{exampledomain} 43 | :slpname: slp.{exampledomain} 44 | :ntpname: ntp.{exampledomain} 45 | :tftpname: tftp.{exampledomain} 46 | :pxename: pxe.{exampledomain} 47 | :kdcname: kdc.{exampledomain} 48 | :dnsname: dns.{exampledomain} 49 | :dhcpname: dhcp.{exampledomain} 50 | :cupsname: cups.{exampledomain} 51 | 52 | // DNS names (infrastructure) 53 | :wwwname: www.{exampledomain} 54 | :wwwname1: www.{exampledomain1} 55 | :wwwname2: www.{exampledomain2} 56 | :ftpname: ftp.{exampledomain} 57 | :mailname: mail.{exampledomain} 58 | :routerextname: routerext.{exampledomain} 59 | :routerintname: routerint.{exampledomain} 60 | :fwextname: fwext.{exampledomain} 61 | :fwintname: fwint.{exampledomain} 62 | :gatename: gate.{exampledomain} 63 | 64 | // DNS names (clients) 65 | :wsIname: jupiter.{exampledomain} 66 | :wsIIname: venus.{exampledomain} 67 | :wsIIIname: saturn.{exampledomain} 68 | :wsIVname: mercury.{exampledomain} 69 | 70 | // names (clients) 71 | :wsI: jupiter 72 | :wsII: venus 73 | :wsIII: saturn 74 | :wsIV: mercury 75 | 76 | // names (Xen) 77 | :xenhost name: earth.{exampledomain} 78 | :xenhost: earth 79 | :xenhostip: {subnetI}.20 80 | :xenguestname: alice.{exampledomain} 81 | :xenguest: alice 82 | :xenguestip: {subnetI}.21 83 | :xennatguest: dolly 84 | :xennatip: {subnetnat}.1 85 | 86 | 87 | // MAC addresses 88 | :wsImac: 00:30:6E:08:EC:80 89 | :wsIImac: 00:00:1C:B5:A4:32 90 | :wsIIImac: 00:17:31:CA:A3:4A 91 | :wsIVmac: 00:16:35:AF:94:4B 92 | 93 | // IP addresses (server) 94 | :smbip: {subnetI}.110 95 | :smbipv6: {subnetv6}:16e:: 96 | :nfsip: {subnetI}.110 97 | :nfsipv6: {subnetv6}:16e:: 98 | :iscsiip: {subnetI}.111 99 | :iscsiipv6: {subnetv6}:16f:: 100 | :proxyip: {subnetI}.112 101 | :proxyipv6: {subnetv6}170:: 102 | :ldapip: {subnetI}.113 103 | :ldapipv6: {subnetv6}:171:: 104 | :nisip: {subnetI}.113 105 | :nisipv6: {subnetv6}:171:: 106 | :ntpip: {subnetI}.116 107 | :ntpipv6: {subnetv6}:174:: 108 | :tftpip: {subnetI}.115 109 | :tftpipv6: {subnetv6}:173:: 110 | :pxeip: {subnetI}.115 111 | :pxeipv6: {subnetv6}:173:: 112 | :kdcip: {subnetI}.114 113 | :kdcipv6: {subnetv6}:172:: 114 | :dnsip: {subnetI}.116 115 | :dnsipv6: {subnetv6}:174:: 116 | :dnsip117: {subnetI}.117 117 | :dnsip118: {subnetI}.118 118 | :vpnip: {subnetI}.120 119 | 120 | 121 | :slpip: {subnetII}.254 122 | :slpipv6: {subnetv6}:2fe:: 123 | :dhcpip: {subnetII}.254 124 | :dhcpipv6: {subnetv6}:2fe:: 125 | :cupsip: {subnetII}.253 126 | :cupsipv6: {subnetv6}:2fd:: 127 | 128 | // IP addresses (infrastructure) 129 | :routerintipI: {subnetI}.1 130 | :routerintipIv6: {subnetv6}:101:: 131 | :fwintipI: {subnetI}.1 132 | :fwintipIv6: {subnetv6}:101:: 133 | 134 | :routerintipII: {subnetII}.1 135 | :routerintipIIv6: {subnetv6}:201:: 136 | :fwintipII: {subnetII}.1 137 | :fwintipIIv6: {subnetv6}:201:: 138 | 139 | :wwwip: {subnetIII}.100 140 | :wwwipv6: {subnetv6}:364:: 141 | :wwwip1: {subnetIII}.101 142 | :wwwip1v6: {subnetv6}:365:: 143 | :wwwip2: {subnetIII}.102 144 | :wwwip2v6: {subnetv6}:366:: 145 | :ftpip: {subnetIII}.105 146 | :ftpipv6: {subnetv6}:369:: 147 | :mailip: {subnetIII}.108 148 | :mailipv6: {subnetv6}:36c:: 149 | :routerextipIII: {subnetIII}.2 150 | :routerextipIIIv6: {subnetv6}:302:: 151 | :fwextipIII: {subnetIII}.2 152 | :fwextipIIIv6: {subnetv6}:302:: 153 | 154 | :routerintipIV: {subnetIV}.1 155 | :routerintipIVv6: {subnetv6}:401:: 156 | :routerextipIV: {subnetIV}.2 157 | :routerextipIVv6: {subnetv6}:402:: 158 | :fwextipIV: {subnetIV}.2 159 | :fwextipIVv6: {subnetv6}:402:: 160 | :fwintipIV: {subnetIV}.1 161 | :fwintipIVv6: {subnetv6}:401:: 162 | 163 | :routerextipV: {subnetV}.2 164 | :routerextipVv6: {subnetv6}:502:: 165 | :fwextipV: {subnetV}.2 166 | :fwextipVv6: {subnetv6}:502:: 167 | :gateip: {subnetV}.1 168 | :gateipv6: {subnetv6}:501:: 169 | 170 | // IP addresses (clients) 171 | :wsIip: {subnetII}.100 172 | :wsIipv6: {subnetv6}:264:: 173 | :wsIIip: {subnetII}.101 174 | :wsIIipv6: {subnetv6}:265:: 175 | :wsIIIip: {subnetII}.102 176 | :wsIIIipv6: {subnetv6}:266:: 177 | :wsIVip: {subnetII}.103 178 | :wsIVipv6: {subnetv6}:267:: 179 | 180 | :vpnclient1: {subnetII}.110 181 | :vpnclient2: {subnetII}.111 182 | -------------------------------------------------------------------------------- /adoc/quick-deployment.adoc: -------------------------------------------------------------------------------- 1 | include::entities.adoc[] 2 | 3 | = {productname} {productversion} QuickStart Guide 4 | :doctype: book 5 | :sectnums: 6 | :toc: left 7 | :toclevels: 2 8 | :icons: font 9 | :experimental: true 10 | :revdate: 2019-03-29 11 | :imagesdir: images/ 12 | 13 | [WARNING] 14 | ==== 15 | This is an internal release and MUST NOT be distributed outside SUSE 16 | ==== 17 | 18 | [IMPORTANT] 19 | ==== 20 | This is a very early pre-release of the software. You will encounter bugs 21 | and incomplete features. Please do not use for any production deployments. 22 | ==== 23 | 24 | [NOTE] 25 | ==== 26 | This guide assumes a configured SUSE {slea} {base_os_version} workstation environment. 27 | ==== 28 | 29 | == Purpose of this document 30 | 31 | This guide describes the deployment for {productname} {productversion}. 32 | 33 | // System requirements 34 | include::deployment-sysreqs.adoc[System Requirements] 35 | 36 | include::deployment-openstack.adoc[ECP Instructions] 37 | 38 | include::deployment-bootstrap.adoc[Bootstrapping,leveloffset=+1] 39 | -------------------------------------------------------------------------------- /adoc/suse-rbac-oidc-flow-cli.xml: -------------------------------------------------------------------------------- 1 | 5Vzdk6I4EP9rrNp9GIskfPk4o7N3Uzt7Z511dTv3chU1o9wgsSCOun/9JhCE8KE4orLiwxRpQhP649fdoZkO6i82v/l4Of9Gp8TtQG266aBBB0LbMvlfQdhKAkIRYeY704gEEsLI+UEkUZPUlTMlgTKRUeoyZ6kSJ9TzyIQpNOz7dK1Oe6WuetclnpEcYTTBbp76jzNlc/kUhpbQfyfObB7fGWjyzALHkyUhmOMpXadI6LGD+j6lLDpabPrEFbKL5RJd96Xk7G5hPvFYlQv+W6x7+I8Xzfk2mOvQG/1rroZ3kkvAtvEDkyl/fjmkPpvTGfWw+5hQHyYr/50IpoAPfLrypuFI46PkgmdKl3LK/4SxrdQsXjHKSXO2cOVZsnHY99Txi2DVNeRosJGcw8E2NRgS31kQRvyY5jF/+z25WAxfYr5ikLAKRzGvgGGf3QtL4QSPeiSmfXFcd3fFNJ4xcXEQOJOIKKeIW0RyFMIrVU8sa7ryJ2SPTnRp5tifEbZnHuztrIh7H6FcGv6WX+gTFzPnXV0Iln4w281LTIUfSGs5wnJgxPcduyt5p0/m55w1MbJhqr59Ejg/8DicIGS7pI7HwsUZDx1jwCnYdWaeEDWXnFDvwzvxmcNd8l6eWDjTaWiJLh4T9wFP3mahGfapS/3wvug1/O30IjiQTRFgyJUkbprWWLnL5KUuud9pXU1HRsRrq9yxsl4k86GQTIpzvELJ9a6ncqCvrwG3l6xed0v8uKr1nKr/DrhisrqO1fQstDKkgcMcKtQ1pozRRYEemYCItG1wiFwKZovNTAST7hhzV+sGC8cl21JF5lysVDdQVyXYiyW6TpB9h9/zNKrHxNq9yGgM/mpdaKQgGOwF4JpAM4XZ1lGgXSPaWhXR1joRbIudGmac2rIylhatS16VGBuXLN6mpkkU3XMfFZJsLZMsZOdbe+fzg2gFteKMmQ8pRitCirEXtrQu4CFFxX5YT0jJcAXWpUKKlVN1//mpA01XqHbMY4s5E0efgrfVGIsVrThAQ82lM8fLm0TAfPq2y8+hAovCQLA/kfgHhH2s5w4joyUOcWHNI41qTjljeaUei6/X9xnAEaFIU/3eNPVrhyL7A6GoxuiTDj67eHM4+CShJR1/QDZMXjaDB72KQQXCs0QVPZPo6KBaVKnLu3t5IIetAHJ7r9Pz2gCZmUAcQ3DdSG5cCslj02pACqtuIcD9ewjVUtg4P00npzHry+Sn1bEEnYglJ0UPAPI+j1rh80Arlnvi9BBlsum4oj7R6VFP5XqxDQEAG+PzmuLzR7h8eeKw83pwpNenkgwJKEmGcS1UOLVsPQ0UUFsTAVCS2KVAAZ5nlxDpKiiYFwMFqzGgoCYC1oGXCSVB3jrk7pXhpJmlx3XTBTuPDFY7kKEEkVPIsKsRTi0DgQoFl6sJ8gXgn0+DfsH+zoBsbmE7B/XUgtu4/puFeAUpHdyv2Jw/EPcFRv0CZdAl8Z4H98PP+XO3oCNL1RHSrq+jfNHUch3ZGR2B6+sI7tMRaZ+KMlCHGgB1+TJjxL1HdPZAEy+EkLxxsAyfP6etr3ZwCxHINJoXgfJ9A38RmcBC7VYFb2vo6oJvaiX2sULsqDqsER1csGoRFhXrV2vhKijC2tHDBQ8WYQDEm5zSr+XooyXZ+YsumC+6vq7GXDzuDaBsNutoAMqi5rz4UjfBD7z4Ktve/jX3u1DVbllw1W5ZVPB6rB37Xejg6zFNL36PdepOuMoVqtefD4cRbCgufHAf/LZhAV41A0MFL8jsdsACPJiBGXF+WlOrzAVcP1/x3g+fOkIobdl3aEAjI2pQU/2vU/4WfVNVIyBXxePztNmDTJu9bSCVRbT+szVEooLWdtAOnD/Y265basfCiYV2cUNkRt1nDAHN2Xs7KvurCUWiAJYyLS38nVJ21ghCF+uaKkGhXvajmmooVND4l/lqCOgqo5q+GkJm5qWCfIDSdWXmW7JD8axfDaGCXUy9Hdi6fxeTV9a2fb2UmQ+Tj9uj6cl/CECPPwE= -------------------------------------------------------------------------------- /adoc/suse-rbac-oidc-flow-web.xml: -------------------------------------------------------------------------------- 1 | 7VzLcqM4FP0aV6UXSSHxMF4mcTrT1ZmZ1KT6kSW2ZUwaIxfIsd1fPwgkQAhixzwMTmeRsi4gQPeeo6sjiYF6u9ze+9Zq8TeeIXcAldl2oI4HEI6UUfifGnaxYTjSYoPtO7PYBFLDk/MbMaPCrGtnhgLhRIKxS5yVaJxiz0NTItgs38cb8bQ5dsW7riwbSYanqeXK1h/OjCxiq6krqf0v5NgLfmegsCMTa/rL9vHaY/fzsIfiI0uLV8NODRbWDG8yJvVuoN76GJP413J7i1zaqrzF4us+lxxNHtlHHjnkgoX7oiyCF+/bavnP3A7uvj/cTy915qeA7HhboFnYNKyIfbLANvYs9y613kzX/iuitYKwEL18VFLCUnrBA8YrdsoLImTHnG6tCQ5NC7J02VG0dcjPzO9nWtWVzkrjLas5KuwyhUfkO0tEkM9tHvF3P9OLafGZ10sLaVVRidcVEMsn1zSIQkPkP2b77LhucsWMnzF1rSBwprGRnUJvIbuDeSjAa3+K3vABD3jLtxF5y1cwPpE6KHMH5u17hMPW8HfhCT5yLeK8irFtMYjYyXlprIQ/WLi8I3SAYsY1v1rumt3rYvhJiieCtkT0uI8C57c1iU6grbvCjkeix9NvBvo4tFiuY3u0scPGpA6+eUU+cUK8XrMDS2c2i2LRtSbIvUlQeItd7Ef3VefRX6Fn3oQCvRXaFtEOe2QBv0LDs6sulStFU/X42p1Q08GuYZU/0qbJ1MzJhNV6ORJrwPN5EMZQ3rXJI1bwtmF0himULjBFygNZsgBdIQtgqN1iC4krvgWhQ/IhxXH+QGH9iAOHOJjifYIJwcsCIiA0drJBEnazK1rZcmvTVOVqYoUOuAqWjot2pc0uYb4U21ATETjiiNykeUOSHSyyOQM31t+0YNgdaCZwfM4cKYZmTR1vGZqVPWg+HoPDQmQdDsoKGCzuFsBI7GxMRRWriJ+MXdVA5wAKUgHY31QgQVT1XEC5AmEuIHbasJ5cIFcrf+bmc4Gh5OwfaDKAhkudOwlJ3bDpr4t7y7M31k6Og4D4+Fcy0oICAdGosPwpYxpAg2KzcAh6WlkRKDchq4sxJEXIHHuEX6+9hfTDaR/kEi/99LSvHpOQNUT0CbfvJ/ryvEnokRpNosxD+RrUnUMVQ1nLJRUayAVN0/zN9Zssf6v95W8OjVqGcqoBxd6VE2/d/K23xd8akDzbDdEHvj2WOyxh5NmgkAuyqhtJBw9mk46NyBRdBr3WX9BrZZ36MaCH6jAHeq0W0KsjsdbW9Bueg3YA8++Rb94FeSBC/vSILx4ong7wXOHMAF7vMeDVOgEPmxFsVU0EvNEW4AHoEOLfoQodi+YuiLMHZwJdE2dBATP0OP9PYr8O/UZTTLHTVs1aqMHMTeWorVEDBMYfd5eP96CZ42y1Yu7XAtnDrg7phnum50qGa8OzovuOsT2EMvxHPYZ/qZx7VCKYyD1VRT0gskhr8o48F/Pvl/FtgT4/Rttz0ObVUee0eVlOvV6TRfiKIRQI9gt8gVfIexhfP36Sj52Di4aii1Tl5C7iEvsfH3Ef5RJSFZzeR/KqkYyP0MdzUY7p1NMzXZJ6py56CtFD19tCw1rSRvImwSp6f8lbX83gHDogQ+9cB1QwoP8PsYQWKufa8Mm6l9M1vDaS2rX1gVi6IAo2vL6RKfLpgIoP8LK3P2iNQGk07F8RycW09uX44sQfGvklgrnUP34laTZfXtZliJK00fqyAJncL4z+DhQ5NutQBaGRV/YBG1VXnTHg9fB5CE2soUlZUJFc2w0R6TgN6V0S0ik2fvANHfsFJH1UN3VV6uRg0XyB2V9iSCK/FgUJgByEKxJD88jngZhx59f1JHxr9wySxPygqQNJIjC6I9eLqzH2rMAqm5zthVyfsOhevjW7xbaKLFX1Wq83yjB83E674iVVVdMwsVbYXhLWoWUbAjUcOZN3XszAE5+uUAMoWMMJlP5yA6x1Kh/o0BTJoeLS7RZm8nkoZjXvxy8D+rIfRT81DO30GVqH9lR+hLHwCVW8ioFSsPUR9JeAk8CvZeujNhRFspokMnHnTG6va4O5mdIhUkjzsf3iej0zAsfTQtwlZaJSif5aIoyujeYK9tr1eK90Aoo6CEMxK66tbeNDKPKkyA39AlXB1yz6l4rl9zk3KpaFxfTzV7F/0q+LqXf/Aw== -------------------------------------------------------------------------------- /make_release_package.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Fail script on error 3 | set -e 4 | 5 | #Get version number from attributes file 6 | MAJOR=$(cat adoc/attributes.adoc| grep :productmajor:|awk '{ print $2}') 7 | MINOR=$(cat adoc/attributes.adoc| grep :productminor:|awk '{ print $2}') 8 | PATCH=$(cat adoc/attributes.adoc| grep :productpatch:|awk '{ print $2}') 9 | 10 | #Remove outdated package artifacts if present 11 | rm -rf $PWD/build/pack/ 12 | 13 | #Generate output 14 | daps -d DC-caasp-admin html --single 15 | daps -d DC-caasp-admin pdf 16 | daps -d DC-caasp-deployment html --single 17 | daps -d DC-caasp-deployment pdf 18 | daps -d DC-caasp-airgap html --single 19 | daps -d DC-caasp-airgap pdf 20 | # daps -d DC-caasp-quickstart html --single 21 | # daps -d DC-caasp-quickstart pdf 22 | 23 | #Create package directory 24 | mkdir $PWD/build/pack/ 25 | 26 | #Move HTML content to package dir 27 | mv $PWD/build/caasp-admin/single-html/caasp-admin $PWD/build/pack/ 28 | mv $PWD/build/caasp-deployment/single-html/caasp-deployment $PWD/build/pack/ 29 | mv $PWD/build/caasp-airgap/single-html/caasp-airgap $PWD/build/pack/ 30 | # mv $PWD/build/caasp-quickstart/single-html/caasp-quickstart $PWD/build/pack/ 31 | 32 | #Move and rename PDFs to package dir 33 | mv $PWD/build/caasp-admin/caasp-admin_color_en.pdf $PWD/build/pack/SUSE-CaaSP-$MAJOR.$MINOR.$PATCH-Admin-Guide.pdf 34 | mv $PWD/build/caasp-deployment/caasp-deployment_color_en.pdf $PWD/build/pack/SUSE-CaaSP-$MAJOR.$MINOR.$PATCH-Deployment-Guide.pdf 35 | mv $PWD/build/caasp-airgap/caasp-airgap_color_en.pdf $PWD/build/pack/SUSE-CaaSP-$MAJOR.$MINOR.$PATCH-Airgap-Deployment-Guide.pdf 36 | # mv $PWD/build/caasp-quickstart/caasp-quickstart_color_en.pdf $PWD/build/pack/SUSE-CaaSP-$MAJOR.$MINOR.$PATCH-Quickstart-Guide.pdf 37 | 38 | #Compress contents of package dir 39 | cd $PWD/build/pack/ 40 | tar czvf SUSE-CaaSP-$MAJOR.$MINOR.$PATCH-Docs.tar.gz * 41 | --------------------------------------------------------------------------------