├── .gitignore
├── images
└── src
│ └── png
│ ├── stratos.png
│ ├── stratos-profile.png
│ ├── stratos-edit-profile.png
│ ├── stratos-app-metrics-tab.png
│ ├── stratos-kubernetes-view.png
│ ├── stratos-app-instances-metrics.png
│ ├── stratos-kubernetes-node-metrics.png
│ └── stratos-kubernetes-view-caasp.png
├── xml
├── schema.xml
├── appendix.xml
├── authors.xml
├── common_legal.xml
├── cap_depl_private_registry.xml
├── cap_depl_terraform.xml
├── common_intro_target_audience_i.xml
├── MAIN_cap_guides.xml
├── cap_intro.xml
├── common_intro_available_doc_i.xml
├── common_intro_making_i.xml
├── cap_depl_admin_notes.xml
├── common_copyright_gfdl.xml
├── cap_admin_uaa_ui.xml
├── common_intro_feedback_i.xml
├── cap_kube_requirements.xml
├── cap_admin_configuration_changes.xml
├── cap_admin_secret_rotation.xml
├── cap_admin_passwords.xml
├── common_intro_typografie_i.xml
├── book_cap_guides.xml
├── app_cf_operator_values_yaml.xml
├── cap_depl_air_gap_registry.xml
├── cap_admin_create_admin.xml
├── cap_admin_credhub.xml
├── cap_admin_app_domains.xml
├── cap_admin_upgrade.xml
├── common_intro_support_statement_i.xml
├── cap_depl_eirini.xml
├── cap_admin_memory_limits.xml
├── cap_admin_nproc_limits.xml
├── cap_user_cf_cli.xml
├── network-decl.ent
├── cap_admin_ccdb_key_rotation.xml
├── cap_depl_eks.xml
├── cap_troubleshooting.xml
├── cap_admin_app_autoscaler.xml
├── cap_depl_aks.xml
├── cap_depl_gke.xml
├── cap_admin_logging.xml
└── entity-decl.ent
├── DC-cap-guides
├── .editorconfig
├── .github
└── workflows
│ └── docbook.yml
└── README.adoc
/.gitignore:
--------------------------------------------------------------------------------
1 | build/
2 | # emacs temp/autosave
3 | \#*#
4 | .#*
5 | *~
6 |
--------------------------------------------------------------------------------
/images/src/png/stratos.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SUSE/doc-cap/HEAD/images/src/png/stratos.png
--------------------------------------------------------------------------------
/images/src/png/stratos-profile.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SUSE/doc-cap/HEAD/images/src/png/stratos-profile.png
--------------------------------------------------------------------------------
/images/src/png/stratos-edit-profile.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SUSE/doc-cap/HEAD/images/src/png/stratos-edit-profile.png
--------------------------------------------------------------------------------
/images/src/png/stratos-app-metrics-tab.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SUSE/doc-cap/HEAD/images/src/png/stratos-app-metrics-tab.png
--------------------------------------------------------------------------------
/images/src/png/stratos-kubernetes-view.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SUSE/doc-cap/HEAD/images/src/png/stratos-kubernetes-view.png
--------------------------------------------------------------------------------
/images/src/png/stratos-app-instances-metrics.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SUSE/doc-cap/HEAD/images/src/png/stratos-app-instances-metrics.png
--------------------------------------------------------------------------------
/images/src/png/stratos-kubernetes-node-metrics.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SUSE/doc-cap/HEAD/images/src/png/stratos-kubernetes-node-metrics.png
--------------------------------------------------------------------------------
/images/src/png/stratos-kubernetes-view-caasp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SUSE/doc-cap/HEAD/images/src/png/stratos-kubernetes-view-caasp.png
--------------------------------------------------------------------------------
/xml/schema.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
8 |
9 |
--------------------------------------------------------------------------------
/DC-cap-guides:
--------------------------------------------------------------------------------
1 | ## ----------------------------
2 | ## Doc Config File for SUSE CAP
3 | ## Deployment Guide
4 | ## ----------------------------
5 |
6 | ## Basics
7 | MAIN="MAIN_cap_guides.xml"
8 | ROOTID=book-cap-guides
9 |
10 | ## Profiling
11 | #PROFOS="sles"
12 | #PROFARCH="x86_64;zseries;power;aarch64"
13 | ROLE="admin;worker;plain"
14 |
15 | ## stylesheet location
16 | STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2021-ns"
17 | FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse-ns"
18 |
--------------------------------------------------------------------------------
/xml/appendix.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
10 | Appendix
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | root = true
2 |
3 | [*.xml]
4 | end_of_line = lf
5 | insert_final_newline = false
6 | charset = utf-8
7 | indent_style = space
8 | indent_size = 1
9 | max_line_length = 80
10 |
--------------------------------------------------------------------------------
/xml/authors.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
10 | CarlaSchroder
11 |
12 | BillyTat
13 |
14 | Claudia-AmeliaMarin
15 |
16 | LukasKucharczyk
17 |
18 |
19 |
--------------------------------------------------------------------------------
/xml/common_legal.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
8 |
12 | GNU Licenses
13 |
14 |
15 |
16 | yes
17 |
18 |
19 |
20 | This appendix contains the GNU Free Documentation License version 1.2.
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/xml/cap_depl_private_registry.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | &suse; Private Registry
13 |
14 |
15 | yes
16 |
17 |
18 | &readmefirst;
19 |
20 | &productname; offers &suse; Private Registry as an Open Container Initiative
21 | (OCI) registry solution to store, replicate, manage, and secure OCI images and
22 | artifacts. Operators who opt to use &suse; Private Registry can follow the
23 | configuration and installation instructions from
24 | .
25 |
26 |
27 |
--------------------------------------------------------------------------------
/xml/cap_depl_terraform.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Deploying &productname; Using Terraform
13 |
14 |
15 | yes
16 |
17 |
18 | &readmefirst;
19 |
20 | In addition to the manual deployment methods mentioned earlier in this guide,
21 | operators have the option to deploy &productname; on AWS, Azure, or GCP using
22 | Terraform. The Terraform scripts will deploy the entirety of &productname;,
23 | including &kubecf;, &operator;, Stratos, and Stratos Metrics. Operators can
24 | deploy using Terraform by following the instructions from
25 | .
26 |
27 |
28 |
--------------------------------------------------------------------------------
/xml/common_intro_target_audience_i.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
8 |
13 | Required Background
14 |
15 |
16 |
17 |
18 | yes
19 |
20 |
21 |
22 |
23 | To keep the scope of these guidelines manageable, certain technical
24 | assumptions have been made:
25 |
26 |
27 |
28 |
29 |
30 | You have some computer experience and are familiar with common technical
31 | terms.
32 |
33 |
34 |
35 |
36 | You are familiar with the documentation for your system and the network on
37 | which it runs.
38 |
39 |
40 |
41 |
42 | You have a basic understanding of Linux systems.
43 |
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/xml/MAIN_cap_guides.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
8 | %entities;
9 | ]>
10 |
14 |
15 |
16 | &productname; &productnumber; Documentation
17 |
18 |
19 | btat@suse.com
20 | editing
21 |
22 |
23 |
24 |
25 | CAP
26 |
27 | https://github.com/SUSE/doc-cap/issues/new
28 | bug,low priority
29 | btat
30 |
31 |
32 |
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/xml/cap_intro.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 | About This Guide
12 |
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | &productname; is a software platform for cloud-native applications based on
20 | Cloud Foundry Application Runtime (&operator;, &kubecf;, and Stratos) with
21 | additional supporting components.
22 |
23 |
24 | ∩ is designed to run on any &kube; cluster. This guide describes how
25 | to deploy it on:
26 |
27 |
28 | &deployment-platforms;
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/xml/common_intro_available_doc_i.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
8 |
12 | Available Documentation
13 |
14 |
15 |
16 |
17 |
18 |
19 | no
20 |
21 |
22 |
23 |
24 | We provide HTML and PDF versions of our books in different languages.
25 | Documentation for our products is available at
26 | , where you can also
28 | find the latest updates and browse or download the documentation in various
29 | formats.
30 |
31 |
32 |
33 | The following documentation is available for this product:
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 | The &productname; guide is a comprehensive guide providing deployment,
42 | administration, and user guides, and architecture and minimum system
43 | requirements.
44 |
45 |
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/xml/common_intro_making_i.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
8 |
9 |
13 | About the Making of This Documentation
14 |
15 |
16 |
17 |
18 | yes
19 |
20 |
21 |
22 |
23 | This documentation is written in
24 | GeekoDoc,
25 | a subset of
26 | DocBook 5.
29 | The XML source files were validated by jing (see
30 | ), processed by
31 | xsltproc, and converted into XSL-FO using a customized
32 | version of Norman Walsh's stylesheets. The final PDF is formatted through FOP
33 | from
34 | Apache
36 | Software Foundation. The open source tools and the environment used to
37 | build this documentation are provided by the DocBook Authoring and Publishing
38 | Suite (DAPS). The project's home page can be found at
39 | .
40 |
41 |
42 |
43 | The XML source code of this documentation can be found at
44 | .
45 |
46 |
47 |
--------------------------------------------------------------------------------
/xml/cap_depl_admin_notes.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Deployment and Administration Notes
13 |
14 |
15 | yes
16 |
17 |
18 |
20 |
21 | Important things to know before deploying &productname;.
22 |
23 |
24 | Important Changes
25 |
26 | Schedulers such as Diego and Eirini, and stacks such as
27 | cflinuxfs3 or sle15, have different
28 | memory requirements for applications. Not every combination is tested so
29 | there is no universal memory setting for ∩, and because it depends on the
30 | application deployed, it is up to the user to adjust the setting based on
31 | their application.
32 |
33 |
34 |
35 | Status of Pods during Deployment
36 | &deployment-pod-status;
37 |
38 |
39 | Length of Release Names
40 | &release-name-length;
41 |
42 |
43 | Releases and Associated Versions
44 |
45 | &kubecf-operator-versions;
46 |
47 |
48 | The supported upgrade method is to install all upgrades, in order. Skipping
49 | releases is not supported. This table matches the &helm; chart versions to
50 | each release as well as other version related information.
51 |
52 | &releases-table;
53 |
54 |
55 |
--------------------------------------------------------------------------------
/xml/common_copyright_gfdl.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
8 |
14 |
18 |
19 |
20 | yes
21 |
22 |
23 |
24 | Copyright © 2006–
25 |
26 | &suse; LLC and contributors. All rights reserved.
27 |
28 |
29 | Permission is granted to copy, distribute and/or modify this document under
30 | the terms of the GNU Free Documentation License, Version 1.2 or (at your
31 | option) version 1.3; with the Invariant Section being this copyright notice
32 | and license. A copy of the license version 1.2 is included in the section
33 | entitled GNU Free Documentation License
.
34 |
35 |
36 | For &suse; trademarks, see
37 | . All other
38 | third-party trademarks are the property of their respective owners. Trademark
39 | symbols (®, ™ etc.) denote trademarks of &suse; and its affiliates.
40 | Asterisks (*) denote third-party trademarks.
41 |
42 |
43 | All information found in this book has been compiled with utmost attention to
44 | detail. However, this does not guarantee complete accuracy. Neither &suse; LLC,
45 | its affiliates, the authors nor the translators shall be held liable for
46 | possible errors or the consequences thereof.
47 |
48 |
49 |
--------------------------------------------------------------------------------
/xml/cap_admin_uaa_ui.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Accessing the UAA User Interface
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | After UAA is deployed successfully, users will not be able to log in to the
20 | UAA user interface (UI) with the admin user and the
21 | UAA_ADMIN_CLIENT_SECRET credentials. This user is only an
22 | OAuth client that is authorized to call UAA REST APIs and will need to create
23 | a separate user in the UAA server by using the UAAC utility.
24 |
25 |
26 | Prerequisites
27 |
28 |
29 | The following prerequisites are required in order to access the UAA UI.
30 |
31 |
32 |
33 |
34 | &cfcli-prereq;
35 |
36 |
37 | &uaac-prereq;
38 |
39 |
40 |
41 | UAA has been successfully deployed.
42 |
43 |
44 |
45 |
46 |
47 | Procedure
48 |
49 | &uaac-target;
50 | &uaac-authenticate;
51 |
52 |
53 | Create a new user.
54 |
55 | &prompt.user;uaac user add NEW-USER -p PASSWORD --emails NEW-USER-EMAIL
56 |
57 |
58 |
59 | Go to the UAA UI at
60 | , replacing example.com with your domain.
61 |
62 |
63 |
64 |
65 | Log in using the the newly created user. Use the username and password as
66 | the credentials.
67 |
68 |
69 |
70 |
71 |
72 |
--------------------------------------------------------------------------------
/xml/common_intro_feedback_i.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
8 |
9 |
10 | Feedback
11 |
12 |
13 |
14 | yes
15 |
16 |
17 |
18 |
19 | Several feedback channels are available:
20 |
21 |
22 |
23 |
24 | Bugs and Enhancement Requests
25 |
26 |
27 | For services and support options available for your product, refer to
28 | .
29 |
30 |
31 |
32 | To report bugs for a product component, go to
33 | , log in, and
34 | click Create New.
35 |
36 |
37 |
38 |
39 | User Comments
40 |
41 |
42 | We want to hear your comments about and suggestions for this manual and
43 | the other documentation included with this product. Use the User Comments
44 | feature at the bottom of each page in the online documentation or go to
45 | and
46 | enter your comments there.
47 |
48 |
49 |
50 |
51 | Mail
52 |
53 |
54 | For feedback on the documentation of this product, you can also send a
55 | mail to doc-team@suse.com. Make sure to include the
56 | document title, the product version and the publication date of the
57 | documentation. To report errors or suggest enhancements, provide a concise
58 | description of the problem and refer to the respective section number and
59 | page (or URL).
60 |
61 |
62 |
63 |
64 |
65 |
--------------------------------------------------------------------------------
/xml/cap_kube_requirements.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Other &kube; Systems
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | &kube; Requirements
20 |
21 |
22 | &productname; is designed to run on any &kube; system that meets the
23 | following requirements:
24 |
25 |
26 |
27 |
28 |
29 | &kube; API version of at least &min_kube;
30 |
31 |
32 |
33 |
34 | &kernel-prereq;
35 |
36 |
37 |
38 | The container runtime storage driver should
39 | not be aufs.
40 |
41 |
42 |
43 |
44 | Presence of a storage class for &productname; to use
45 |
46 |
47 |
48 |
49 | kubectl can authenticate with the apiserver
50 |
51 |
52 |
53 |
54 | kube-dns or core-dns should be
55 | running and ready
56 |
57 |
58 |
59 |
60 | ntp, systemd-timesyncd,
61 | or chrony must be installed and active
62 |
63 |
64 |
65 |
66 | The container runtime must be configured to allow privileged containers
67 |
68 |
69 |
70 |
71 | Privileged container must be enabled in kube-apiserver.
72 | See
73 | kube-apiserver.
74 |
75 |
76 |
77 |
78 | For &kube; deployments prior to version 1.15, privileged must be enabled
79 | in kubelet
80 |
81 |
82 |
83 |
84 | The TasksMax property of the
85 | containerd service definition must be set to infinity
86 |
87 |
88 |
89 |
90 |
91 |
--------------------------------------------------------------------------------
/xml/cap_admin_configuration_changes.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Configuration Changes
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | After the initial deployment of ∩, any changes made to your &helm; chart
20 | values, whether through your &values-filename; file
21 | or directly using &helm;'s --set flag, are applied using
22 | the helm upgrade command.
23 |
24 |
25 | Do Not Make Changes to Pod Counts During a Version Upgrade
26 |
27 | The helm upgrade command can be used to apply
28 | configuration changes as well as perform version upgrades to ∩. A change
29 | to the pod count configuration should not be applied simultaneously with a
30 | version upgrade. Sizing changes should be made separately, either before or
31 | after, from a version upgrade.
32 |
33 |
34 |
35 | Configuration Change Example
36 |
37 |
38 | Consider an example where you want to enable the App-AutoScaler.
39 |
40 |
41 |
42 | The entry below is added to your &values-filename; file and set with
43 | enabled set to true.
44 |
45 |
46 | features:
47 | autoscaler:
48 | enabled: true
49 |
50 |
51 |
52 | The changed is then applied with the helm upgrade command. This
53 | example assumes the suse/kubecf &helm; chart deployed was
54 | named kubecf.
55 |
56 |
57 | &prompt.user;helm upgrade kubecf suse/kubecf \
58 | --namespace kubecf \
59 | --values &values-file; \
60 | --version &kubecf_chart;
61 |
62 |
63 |
64 | When all pods are in a READY state, the configuration
65 | change will also be reflected. Assuming the chart was deployed to the
66 | kubecf namespace, progress can be monitored with:
67 |
68 |
69 | &prompt.user;watch --color 'kubectl get pods --namespace kubecf'
70 |
71 |
72 | Other Examples
73 |
74 |
75 | The following are other examples of using helm upgrade to
76 | make configuration changes:
77 |
78 |
79 |
80 |
81 |
82 | Secrets rotation (see )
83 |
84 |
85 |
86 |
87 | Enabling additional services (see
88 | )
89 |
90 |
91 |
92 |
93 |
94 |
--------------------------------------------------------------------------------
/.github/workflows/docbook.yml:
--------------------------------------------------------------------------------
1 | name: Validate/build docs
2 |
3 | on:
4 | push:
5 | paths:
6 | - 'DC-*'
7 | - 'xml/**'
8 | - 'adoc/**'
9 | - 'images/src/**'
10 | - '.github/workflows/**'
11 | - '**/DC-*'
12 | - '**/xml/**'
13 | - '**/adoc/**'
14 | - '**/images/src/**'
15 |
16 | jobs:
17 | select-dc-files:
18 | runs-on: ubuntu-latest
19 | outputs:
20 | validate-list: ${{ steps.select-dc-validate.outputs.dc-list }}
21 | build-list: ${{ steps.select-dc-build.outputs.dc-list }}
22 | allow-build: ${{ steps.select-dc-build.outputs.allow-build }}
23 | relevant-branches: ${{ steps.select-dc-build.outputs.relevant-branches }}
24 | steps:
25 | - uses: actions/checkout@v2
26 |
27 | - name: Checking basic soundness of DC files
28 | uses: openSUSE/doc-ci@gha-select-dcs
29 | with:
30 | mode: soundness
31 |
32 | - name: Selecting DC files to validate
33 | id: select-dc-validate
34 | uses: openSUSE/doc-ci@gha-select-dcs
35 | with:
36 | mode: list-validate
37 |
38 | - name: Selecting DC files to build
39 | id: select-dc-build
40 | uses: openSUSE/doc-ci@gha-select-dcs
41 | with:
42 | mode: list-build
43 | original-org: SUSE
44 |
45 | validate:
46 | runs-on: ubuntu-latest
47 | needs: select-dc-files
48 | strategy:
49 | # don't cancel all validation runners when one of them fails, we want full results
50 | fail-fast: false
51 | matrix:
52 | dc-files: ${{ fromJson(needs.select-dc-files.outputs.validate-list) }}
53 | steps:
54 | - uses: actions/checkout@v2
55 | - name: Validating DC file(s) ${{ matrix.dc-files }}
56 | uses: openSUSE/doc-ci@gha-validate
57 | with:
58 | dc-files: ${{ matrix.dc-files }}
59 | validate-ids: false
60 | xml-schema: docbook51
61 |
62 |
63 | build-html:
64 | runs-on: ubuntu-latest
65 | needs: [select-dc-files, validate]
66 | if: ${{ needs.select-dc-files.outputs.allow-build == 'true' }}
67 | outputs:
68 | artifact-name: ${{ steps.build-dc.outputs.artifact-name }}
69 | artifact-dir: ${{ steps.build-dc.outputs.artifact-dir }}
70 | strategy:
71 | matrix:
72 | dc-files: ${{ fromJson(needs.select-dc-files.outputs.build-list) }}
73 | steps:
74 | - uses: actions/checkout@v2
75 | - name: Building DC file(s) ${{ matrix.dc-files }}
76 | id: build-dc
77 | uses: openSUSE/doc-ci@gha-build
78 | with:
79 | dc-files: ${{ matrix.dc-files }}
80 | - name: Uploading builds as artifact
81 | uses: actions/upload-artifact@v2
82 | with:
83 | name: ${{ steps.build-dc.outputs.artifact-name }}
84 | path: ${{ steps.build-dc.outputs.artifact-dir }}/*
85 | retention-days: 3
86 |
87 |
88 | publish:
89 | runs-on: ubuntu-latest
90 | if: ${{ success() }}
91 | needs: [select-dc-files, build-html]
92 | continue-on-error: true
93 | steps:
94 | - name: Downloading all build artifacts
95 | uses: actions/download-artifact@v2
96 | with:
97 | path: artifact-dir
98 | - name: Publishing builds on susedoc.github.io
99 | uses: openSUSE/doc-ci@gha-publish
100 | env:
101 | DEPLOY_KEY: ${{ secrets.DEPLOY_KEY_CAP }}
102 | with:
103 | artifact-path: artifact-dir
104 | relevant-dirs: ${{ needs.select-dc-files.outputs.relevant-branches }}
105 |
--------------------------------------------------------------------------------
/README.adoc:
--------------------------------------------------------------------------------
1 | = SUSE Cloud Application Platform (CAP) Documentation
2 |
3 | image:https://travis-ci.org/SUSE/doc-cap.svg?branch=develop["Build Status", link="https://travis-ci.org/SUSE/doc-cap"]
4 |
5 | This is the source for the official SUSE Cloud Application Platform (CAP) Documentation
6 |
7 | Released versions of the documentation will be published at
8 | https://documentation.suse.com/ once available.
9 |
10 | == Branches
11 |
12 | ***On Jan 29, 2020, we changed to a new branching model. We have switched the default branch from `develop` to `master`.***
13 |
14 | * *Use the master branch* as the basis of your commits/of new feature branches.
15 |
16 | * The *develop branch has been deleted* on the server. Do not to push to the `develop` branch.
17 | Your changes may go lost in the medium term and never make it to the proper branch.
18 |
19 | === How to Update Your Local Repository
20 |
21 | If you *created* a local clone or GitHub fork of this repo *before Jan 29, 2020, do the following*:
22 |
23 | 1. Make sure that your `master` and `develop` branches do not contain any important changes.
24 | If there are changes on either branch, export them using `git format-patch` or put them on a
25 | different branch.
26 |
27 | 1. Go to the master branch: `git checkout master`
28 | .
29 |
30 | 1. To pull the latest changes from the remote repository and to delete references to branches
31 | that do not exist anymore on the server, run the following command: `git pull --prune`
32 | .
33 |
34 | 1. Delete your local develop branch: `git branch -D develop`.
35 |
36 | 1. To check for stale local branches, run: `git branch -v`.
37 | For any branches marked as `[gone]`, check if you still need them. If not, delete them:
38 | `git branch -D BRANCHNAME`
39 |
40 |
41 | .Overview of important branches
42 | [options="header"]
43 | |================================================
44 | | Name | Purpose
45 | | `master` | doc development (latest development version)
46 | | `maintenance/*` | maintenance for released versions
47 | |================================================
48 |
49 | == Contributing
50 |
51 | Thank you for contributing to this repo. When creating a pull request, please follow the guidelines below:
52 |
53 | . If you want to contribute to the most recent release, create your pull request against the `master` branch (not `develop`). The `master` branch is protected.
54 |
55 | . If you want to contribute to a previous release, please create your pull request against the respective `maintenance/*` branch. These branches are also protected.
56 |
57 | . Make sure all validation (Travis CI) checks are passed.
58 |
59 | . For your pull request to be reviewed, please tag the relevant subject matter expert(s) from the development team (if applicable) and members of the documentation team.
60 |
61 | . Implement the required changes. If you have any questions, ping a documentation team member in #susedoc on RocketChat.
62 |
63 | . For help on style and structure, refer to the https://doc.opensuse.org/products/opensuse/Styleguide/opensuse_documentation_styleguide_sd/[Documentation Styleguide].
64 |
65 |
66 | == Editing DocBook
67 |
68 | To contribute to the documentation, you need to write DocBook.
69 |
70 | * You can learn about DocBook syntax at http://docbook.org/tdg5/en/html .
71 | * SUSE documents are generally built with DAPS (package `daps`) and the
72 | SUSE XSL Stylesheets (package `suse-xsl-stylesheets`).
73 |
74 | * Install the documentation environment with the following command:
75 | +
76 | [source]
77 | ----
78 | sudo /sbin/OneClickInstallUI https://gitlab.nue.suse.com/susedoc/doc-ymp/raw/master/Documentation.ymp
79 | ----
80 |
81 | * Basic daps usage:
82 | ** `$ daps -d DC- validate`: Make sure what you have written is
83 | well-formed XML and valid DocBook 5
84 | ** `$ daps -d DC- pdf`: Build a PDF document
85 | ** `$ daps -d DC- html`: Build multi-page HTML document
86 | ** `$ daps -d DC- optipng`: Always optimize new PNG images
87 | ** Learn more at https://opensuse.github.io/daps
88 |
--------------------------------------------------------------------------------
/xml/cap_admin_secret_rotation.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Rotating Automatically Generated Secrets
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | ∩ uses a number of automatically generated secrets (passwords and
20 | certificates) for use internally provided by &operator;. This removes
21 | the burden from human operators while allowing for secure communication.
22 |
23 | From time to time, operators may wish to change such secrets, either manually
24 | or on a schedule. This is called rotating a secret.
25 |
26 |
27 | Finding Secrets
28 |
29 | Retrieve the list of all secrets maintained by &kubecf;:
30 |
31 |
32 | &prompt.user;kubectl get quarkssecret --namespace kubecf
33 |
34 |
35 | To see information about a specific secret, for example the NATS
36 | password:
37 |
38 |
39 | &prompt.user;kubectl get quarkssecret --namespace kubecf kubecf.var-nats-password --output yaml
40 |
41 |
42 | Note that each quarkssecret has a corresponding regular &kube; secret
43 | that it controls:
44 |
45 |
46 | &prompt.user;kubectl get secret --namespace kubecf
47 | &prompt.user;kubectl get secret --namespace kubecf kubecf.var-nats-password --output yaml
48 |
49 |
50 |
51 | Rotating Specific Secrets
52 |
53 | To rotate a secret, for example kubecf.var-nats-password:
54 |
55 |
56 |
57 | Create a YAML file for a ConfigMap of the form:
58 |
59 | ---
60 | apiVersion: v1
61 | kind: ConfigMap
62 | metadata:
63 | name: rotate-kubecf.var-nats-password
64 | labels:
65 | quarks.cloudfoundry.org/secret-rotation: "true"
66 | data:
67 | secrets: '["kubecf.var-nats-password"]'
68 |
69 |
70 | The name of the ConfigMap can be anything allowed by &kube; syntax but
71 | we recommend using a name derived from the name of the secret itself.
72 |
73 |
74 | Also, the example above rotates only a single secret but
75 | the data.secrets key accepts an array of secret
76 | names, allowing simultaneous rotation of many secrets.
77 |
78 |
79 |
80 | Apply the ConfigMap:
81 |
82 | &prompt.user;kubectl apply --namespace kubecf -f /path/to/your/yaml/file
83 |
84 |
85 | The result can be seen in the &operator;'s log.
86 |
87 |
88 |
89 | After the rotation is complete, that is after secrets have been
90 | changed and all affected pods have been restarted, delete the config
91 | map again:
92 |
93 |
94 | &prompt.user;kubectl delete --namespace kubecf -f /path/to/your/yaml/file
95 |
96 |
97 |
98 |
99 |
100 |
--------------------------------------------------------------------------------
/xml/cap_admin_passwords.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Managing Passwords
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | The various components of &productname; authenticate to each other using
20 | passwords that are automatically managed by the ∩ secrets-generator. The
21 | only passwords managed by the cluster administrator are passwords for human
22 | users. The administrator may create and remove user logins, but cannot change
23 | user passwords.
24 |
25 |
26 |
27 |
28 | The cluster administrator password is initially defined in the deployment's
29 | values.yaml file with
30 | CLUSTER_ADMIN_PASSWORD
31 |
32 |
33 |
34 |
35 | The Stratos Web UI provides a form for users, including the administrator,
36 | to change their own passwords
37 |
38 |
39 |
40 |
41 | User logins are created (and removed) with the &cf; Client,
42 | &cfcli;
43 |
44 |
45 |
46 |
47 | Password Management with the &cf; Client
48 |
49 |
50 | The administrator cannot change other users' passwords. Only users may
51 | change their own passwords, and password changes require the current
52 | password:
53 |
54 |
55 |
56 | &prompt.user;cf passwd
57 | Current Password>
58 | New Password>
59 | Verify Password>
60 | Changing password...
61 | OK
62 | Please log in again
63 |
64 |
65 |
66 | The administrator can create a new user:
67 |
68 |
69 |
70 | &prompt.user;cf create-user NEW_USER PASSWORD
71 |
72 |
73 | and delete a user:
74 |
75 |
76 |
77 | &prompt.user;cf delete-user NEW_USER PASSWORD
78 |
79 |
80 | Use the &cfcli; to assign space and org roles. Run cf help
81 | -a for a complete command listing, or see
82 | Creating
83 | and Managing Users with the &cfcli;.
84 |
85 |
86 |
87 | Changing User Passwords with Stratos
88 |
89 |
90 | The Stratos Web UI provides a form for changing passwords on your profile
91 | page. Click the overflow menu button on the top right to access your
92 | profile, then click the edit button on your profile page. You can manage
93 | your password and username on this page.
94 |
95 |
96 |
97 | Stratos Profile Page
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 | Stratos Edit Profile Page
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
--------------------------------------------------------------------------------
/xml/common_intro_typografie_i.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
8 |
12 |
13 |
14 | Documentation Conventions
15 |
16 |
17 |
18 | yes
19 |
20 |
21 |
22 |
23 | The following notices and typographical conventions are used in this
24 | documentation:
25 |
26 |
27 |
28 |
29 |
30 | /etc/passwd: directory names and file names
31 |
32 |
33 |
34 |
35 | PLACEHOLDER: replace
36 | PLACEHOLDER with the actual value
37 |
38 |
39 |
40 |
41 | PATH: the environment variable PATH
42 |
43 |
44 |
45 |
46 | ls, : commands, options, and
47 | parameters
48 |
49 |
50 |
51 |
52 | user: users or groups
53 |
54 |
55 |
56 |
57 | package name : name of a package
58 |
59 |
60 |
61 |
62 | ,
63 | F1 : a key to press or a key combination; keys
64 | are shown in uppercase as on a keyboard
65 |
66 |
67 |
68 |
69 | File, File Save
70 | As : menu items, buttons
71 |
72 |
73 |
74 |
75 | This paragraph is only relevant for the &amd64;/&intel64; architecture. The
76 | arrows mark the beginning and the end of the text block.
77 |
78 |
79 | This paragraph is only relevant for the architectures
80 | &zseries; and &power;. The arrows
81 | mark the beginning and the end of the text block.
82 |
83 |
84 |
85 |
86 | Dancing Penguins (Chapter
87 | Penguins, ↑Another Manual): This is a reference
88 | to a chapter in another manual.
89 |
90 |
91 |
92 |
93 | Commands that must be run with &rootuser; privileges. Often you can also
94 | prefix these commands with the sudo command to run them
95 | as non-privileged user.
96 |
97 | &prompt.root;command
98 | &prompt.user;sudo command
99 |
100 |
101 |
102 | Commands that can be run by non-privileged users.
103 |
104 | &prompt.user;command
105 |
106 |
107 |
108 | Notices
109 |
110 |
111 | Warning Notice
112 |
113 | Vital information you must be aware of before proceeding. Warns you about
114 | security issues, potential loss of data, damage to hardware, or physical
115 | hazards.
116 |
117 |
118 |
119 | Important Notice
120 |
121 | Important information you should be aware of before proceeding.
122 |
123 |
124 |
125 | Note Notice
126 |
127 | Additional information, for example about differences in software
128 | versions.
129 |
130 |
131 |
132 | Tip Notice
133 |
134 | Helpful information, like a guideline or a piece of practical advice.
135 |
136 |
137 |
138 |
139 |
140 |
--------------------------------------------------------------------------------
/xml/book_cap_guides.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
8 | %entities;
9 | ]>
10 |
14 |
15 | &guide;s&productname;
16 | &productnumber;
17 |
18 |
19 |
20 |
21 |
22 | Introducing &productname;, a software platform for cloud-native application
23 | deployment based on &kubecf; and &kube;.
24 |
25 |
26 |
27 | no
28 |
29 |
30 |
33 |
34 |
37 |
38 | Overview of &productname;
39 |
40 |
41 |
42 |
43 |
46 |
47 | Deploying &productname;
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
63 |
64 | &productname; Administration
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
86 |
87 | &productname; User Guide
88 |
89 |
90 |
91 |
94 |
95 | Troubleshooting
96 |
97 |
98 |
99 |
102 |
103 |
104 |
105 |
--------------------------------------------------------------------------------
/xml/app_cf_operator_values_yaml.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
13 | Complete suse/cf-operator values.yaml File
14 |
15 |
16 | This is the complete output of helm inspect values suse/cf-operator for
17 | the current &productname; &productnumber; release.
18 |
19 |
20 | ## Default values for Quarks Operator Helm Chart.
21 | ## This is a YAML-formatted file.
22 | ## Declare variables to be passed into your templates.
23 |
24 |
25 | # applyCRD is a boolean to control the installation of CRD's.
26 | applyCRD: true
27 |
28 | cluster:
29 | # domain is the the Kubernetes cluster domain
30 | domain: "cluster.local"
31 |
32 | # fullnameOverride overrides the release name
33 | fullnameOverride: ""
34 |
35 | # image is the docker image of quarks job.
36 | image:
37 | # repository that provides the operator docker image.
38 | repository: quarks-operator
39 | # org that provides the operator docker image.
40 | org: registry.suse.com/cap
41 | # tag of the operator docker image
42 | tag: v7.2.1-0.gaeb6ef3
43 |
44 | # creates a service account for coredns-quarks, the must be unique as it is used for the cluster role too.
45 | corednsServiceAccount:
46 | create: true
47 | name: coredns-quarks
48 |
49 | # logrotateInterval is the time between logrotate calls for instance groups in minutes
50 | logrotateInterval: 1440
51 |
52 | # logLevel defines from which level the logs should be printed (trace,debug,info,warn).
53 | logLevel: debug
54 |
55 | # nameOverride overrides the chart name part of the release name
56 | nameOverride: ""
57 |
58 | # workers are the int values for running maximum number of workers of the respective controller.
59 | workers:
60 | boshdeployment: 1
61 |
62 | operator:
63 | webhook:
64 | # host under which the webhook server can be reached from the cluster
65 | host: ~
66 | # port the webhook server listens on
67 | port: "2999"
68 | # boshDNSDockerImage is the docker image used for emulating bosh DNS (a CoreDNS image).
69 | boshDNSDockerImage: "registry.suse.com/cap/coredns:0.1.0-1.6.7-bp152.1.19"
70 | hookDockerImage: "registry.suse.com/cap/kubecf-kubectl:v1.20.2"
71 |
72 | # serviceAccount contains the configuration
73 | # values of the service account used by quarks-operator.
74 | serviceAccount:
75 | # create is a boolean to control the creation of service account name.
76 | create: true
77 | # name of the service account.
78 | name:
79 |
80 | global:
81 | # Context Timeout for each K8's API request in seconds.
82 | contextTimeout: 300
83 | # MeltdownDuration is the duration (in seconds) of the meltdown period, in which we
84 | # postpone further reconciles for the same resource
85 | meltdownDuration: 60
86 | # MeltdownRequeueAfter is the duration (in seconds) for which we delay the requeuing of the reconcile
87 | meltdownRequeueAfter: 30
88 | image:
89 | # pullPolicy defines the policy used for pulling docker images.
90 | pullPolicy: IfNotPresent
91 | # credentials is used for pulling docker images.
92 | credentials: ~
93 | # username:
94 | # password:
95 | # servername:
96 | # monitoredID is a string that has to match the content of the 'monitored' label in each monitored namespace.
97 | monitoredID: cfo
98 | operator:
99 | webhook:
100 | # useServiceReference is a boolean to control the use of the
101 | # service reference in the webhook spec instead of a url.
102 | useServiceReference: true
103 | rbac:
104 | # create is a boolean to control the installation of rbac resources.
105 | create: true
106 | singleNamespace:
107 | # create is a boolean to control the creation of resources for a simplified setup
108 | create: true
109 | # name is the name of the single namespace, being watched for BOSH deployments.
110 | name: kubecf
111 |
112 | quarks-job:
113 | logLevel: info
114 | serviceAccount:
115 | # create is a boolean to control the creation of service account name.
116 | create: true
117 | # name of the service account.
118 | name:
119 | persistOutputClusterRole:
120 | # create is a boolean to control the creation of the persist output cluster role
121 | create: true
122 | # name of the cluster role.
123 | name: qjob-persist-output
124 | singleNamespace:
125 | createNamespace: false
126 |
127 | quarks-secret:
128 | logLevel: info
129 |
130 | quarks-statefulset:
131 | logLevel: info
132 |
133 |
134 |
--------------------------------------------------------------------------------
/xml/cap_depl_air_gap_registry.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Setting Up a Registry for an Air Gapped Environment
13 |
14 |
15 | yes
16 |
17 |
18 | &readmefirst;
19 |
20 | ∩, which consists of &docker; images, is deployed to a &kube; cluster
21 | through &helm;. These images are hosted on a &docker; registry at
22 | registry.suse.com. In an air gapped environment,
23 | registry.suse.com will not be accessible. You will need to
24 | create a registry, and populate it will the images used by ∩.
25 |
26 |
27 | This chapter describes how to load your registry with the necessary images to
28 | deploy ∩ in an air gapped environment.
29 |
30 |
31 | Prerequisites
32 |
33 |
34 | The following prerequisites are required:
35 |
36 |
37 |
38 |
39 |
40 | The &docker; Command Line. See
41 |
42 | for more information.
43 |
44 |
45 |
46 |
47 | A &docker; registry has been created in your air gapped environment. Refer to
48 | the &docker; documentation at
49 | for instructions.
50 |
51 |
52 |
53 |
54 |
55 |
56 | Mirror Images to Registry
57 |
58 | All the ∩ &helm; charts include an imagelist.txt
59 | file that lists all images from the registry.suse.com
60 | registry under the cap organization. They can be mirrored
61 | to a local registry with the following script.
62 |
63 |
64 | Replace the value of MIRROR with your registry's domain.
65 |
66 | #!/bin/bash
67 |
68 | MIRROR=MY_REGISTRY.COM
69 |
70 | set -ex
71 |
72 | function mirror {
73 | CHART=$1
74 | CHARTDIR=$(mktemp -d)
75 | helm fetch suse/$1 --untar --untardir=${CHARTDIR}
76 | IMAGES=$(cat ${CHARTDIR}/**/imagelist.txt)
77 | for IMAGE in ${IMAGES}; do
78 | echo $IMAGE
79 | docker pull registry.suse.com/cap/$IMAGE
80 | docker tag registry.suse.com/cap/$IMAGE $MIRROR/cap/$IMAGE
81 | docker push $MIRROR/cap/$IMAGE
82 | done
83 | docker save -o ${CHART}-images.tar.gz \
84 | $(perl -E "say qq(registry.suse.com/cap/\$_) for @ARGV" ${IMAGES})
85 | rm -r ${CHARTDIR}
86 | }
87 |
88 | mirror cf-operator
89 | mirror kubecf
90 | mirror console
91 | mirror metrics
92 | mirror minibroker
93 |
94 |
95 | The script above will both mirror to a local registry and save the images in
96 | a local tarball that can be restored with
97 | docker load foo-images.tgz. In general only one of these
98 | mechanisms will be needed.
99 |
100 |
101 | Also take note of the following regarding the script provided above.
102 |
103 |
104 |
105 |
106 | The nginx-ingress chart is not supported by this
107 | mechanism because it is not part of the cap organization
108 | (and cannot be configured with the
109 | kube.registry.hostname setting at deploy time either).
110 |
111 |
112 | Instead manually parse the &helm; chart for the image names and do a manual
113 | docker pull && docker tag && docker push
114 | on them.
115 |
116 |
117 |
118 |
119 | Before deploying ∩ using helm install, ensure the
120 | following in your
121 | &values-filename; has been updated to point to your
122 | registry, and not registry.suse.com.
123 |
124 | kube:
125 | registry:
126 | # example registry domain
127 | hostname: "MY_REGISTRY.COM"
128 | username: ""
129 | password: ""
130 | organization: "cap"
131 |
132 |
133 |
134 |
--------------------------------------------------------------------------------
/xml/cap_admin_create_admin.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Creating Admin Users
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | This chapter provides an overview on how to create additional
20 | administrators for your ∩ cluster.
21 |
22 |
23 | Prerequisites
24 |
25 |
26 | The following prerequisites are required in order to create
27 | additional ∩ cluster administrators:
28 |
29 |
30 |
31 |
32 | &cfcli-prereq;
33 |
34 |
35 | &uaac-prereq;
36 |
37 |
38 |
39 |
40 | Creating an Example ∩ Cluster Administrator
41 |
42 | The following example demonstrates the steps required to create a new
43 | administrator user for your ∩ cluster. Note that creating administrator
44 | accounts must be done using the UAAC and cannot be done using the &cfcli;.
45 |
46 |
47 | &uaac-target;
48 | &uaac-authenticate;
49 |
50 |
51 | Create a new user:
52 |
53 | &prompt.user;uaac user add NEW_ADMIN --password PASSWORD --emails new-admin@example.com --zone kubecf
54 |
55 |
56 |
57 | Add the new user to the following groups to grant administrator privileges
58 | to the cluster (see
59 | for information on privileges provided by each group):
60 |
61 | &prompt.user;uaac member add scim.write NEW_ADMIN --zone kubecf
62 |
63 | &prompt.user;uaac member add scim.read NEW_ADMIN --zone kubecf
64 |
65 | &prompt.user;uaac member add cloud_controller.admin NEW_ADMIN --zone kubecf
66 |
67 | &prompt.user;uaac member add clients.read NEW_ADMIN --zone kubecf
68 |
69 | &prompt.user;uaac member add clients.write NEW_ADMIN --zone kubecf
70 |
71 | &prompt.user;uaac member add doppler.firehose NEW_ADMIN --zone kubecf
72 |
73 | &prompt.user;uaac member add routing.router_groups.read NEW_ADMIN --zone kubecf
74 |
75 | &prompt.user;uaac member add routing.router_groups.write NEW_ADMIN --zone kubecf
76 |
77 |
78 |
79 |
80 | Log into your ∩ deployment as the newly created administrator:
81 |
82 | &prompt.user;cf api --skip-ssl-validation https://api.example.com
83 |
84 | &prompt.user;cf login -u NEW_ADMIN
85 |
86 |
87 |
88 |
89 | The following commands can be used to verify the new administrator account has sufficient permissions:
90 |
91 | &prompt.user;cf create-shared-domain TEST_DOMAIN.COM
92 |
93 | &prompt.user;cf set-org-role NEW_ADMIN org OrgManager
94 |
95 | &prompt.user;cf create-buildpack TEST_BUILDPACK /tmp/ruby_buildpack-cached-sle15-v1.7.30.1.zip 1
96 |
97 |
98 | If the account has sufficient permissions, you should not receive any authorization message similar to the following:
99 |
100 | FAILED
101 | Server error, status code: 403, error code: 10003, message: You are not authorized to perform the requested action
102 |
103 |
104 | See for other administrator-specific commands that can be run to confirm sufficient permissions are provided.
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
--------------------------------------------------------------------------------
/xml/cap_admin_credhub.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Integrating CredHub with &productname;
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | &productname; supports CredHub integration. You should already have a working
20 | CredHub instance, a CredHub service on your cluster, then apply the steps in
21 | this chapter to connect &productname;.
22 |
23 |
24 | Installing the CredHub Client
25 |
26 |
27 | Start by creating a new directory for the CredHub client on your local
28 | workstation, then download and unpack the CredHub client. The following
29 | example is for the 2.2.0 Linux release. For other platforms and current
30 | releases, see the cloudfoundry-incubator/credhub-cli at
31 |
32 |
33 |
34 | &prompt.user;mkdir chclient
35 | &prompt.user;cd chclient
36 | &prompt.user;wget https://github.com/cloudfoundry-incubator/credhub-cli/releases/download/2.2.0/credhub-linux-2.2.0.tgz
37 | &prompt.user;tar zxf credhub-linux-2.2.0.tgz
38 |
39 |
40 |
41 | Enabling and Disabling CredHub
42 |
43 |
44 | CredHub is enabled by default. To disable it, add the following the following
45 | block to your &values-filename; file.
46 |
47 |
48 | features:
49 | credhub:
50 | enabled: false
51 |
52 |
53 |
54 | To enable CredHub again, update the above block in your &values-filename; so that
55 | enabled is set to true.
56 |
57 |
58 |
59 | &config-new-vs-existing;
60 |
61 |
62 |
63 | On occasion, the credhub pod may fail to start due to
64 | database migration failures; this has been spotted intermittently on
65 | &aks-full; and to a lesser extent, other public clouds.
66 | In these situations, manual intervention is required to track the last
67 | completed transaction in credhub_user database and
68 | update the flyway schema history table with the record of the last
69 | completed transaction. Please contact support for further instructions.
70 |
71 |
72 |
73 |
74 | Connecting to the CredHub Service
75 |
76 |
77 | Set environment variables for the CredHub client, your CredHub service
78 | location, and ∩ namespace. In these guides the example namespace is
79 | kubecf:
80 |
81 | &prompt.user;CH_CLI=~/chclient/credhub
82 | &prompt.user;CH_SERVICE=https://credhub.example.com
83 | &prompt.user;NAMESPACE=kubecf
84 |
85 |
86 | Set up the CredHub service location:
87 |
88 |
89 | &prompt.user;SECRET="$(kubectl get secrets --namespace "${NAMESPACE}" | awk '/^secrets-/ { print $1 }')"
90 | &prompt.user;CH_SECRET="$(kubectl get secrets --namespace "${NAMESPACE}" "${SECRET}" --output jsonpath="{.data['uaa-clients-credhub-user-cli-secret']}"|base64 --decode)"
91 | &prompt.user;CH_CLIENT=credhub_user_cli
92 | &prompt.user;echo Service ......@ $CH_SERVICE
93 | &prompt.user;echo CH cli Secret @ $CH_SECRET
94 |
95 |
96 | Set the CredHub target through its &kube; service, then log into CredHub:
97 |
98 |
99 | &prompt.user;"${CH_CLI}" api --skip-tls-validation --server "${CH_SERVICE}"
100 | &prompt.user;"${CH_CLI}" login --client-name="${CH_CLIENT}" --client-secret="${CH_SECRET}"
101 |
102 |
103 | Test your new connection by inserting and retrieving some fake credentials:
104 |
105 |
106 | &prompt.user;"${CH_CLI}" set --name FOX --type value --value 'fox over lazy dog'
107 | &prompt.user;"${CH_CLI}" set --name DOG --type user --username dog --password fox
108 | &prompt.user;"${CH_CLI}" get --name FOX
109 | &prompt.user;"${CH_CLI}" get --name DOG
110 |
111 |
112 |
113 |
118 |
119 |
--------------------------------------------------------------------------------
/xml/cap_admin_app_domains.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 |
15 | Custom Application Domains
16 |
17 |
18 | yes
19 |
20 |
21 |
22 | In a standard &kubecf; deployment, applications will use the same domain as the
23 | one configured in your &values-filename; for &kubecf;.
24 | For example, if DOMAIN is set as
25 | EXAMPLE.COM in your
26 | &values-filename; and you deploy an application
27 | called MY_APP then the application's URL will be
28 | MY_APP.EXAMPLE.COM.
29 |
30 |
31 | This chapter describes the changes required to allow applications to use a
32 | separate domain.
33 |
34 |
35 | Customizing Application Domains
36 |
37 |
38 | Begin by adding the following to your &values-filename;. Replace
39 | MY_APP_DOMAIN.COM with the domain to use with your
40 | applications:
41 |
42 |
43 | bosh:
44 | instance_groups:
45 | - name: api-group
46 | jobs:
47 | - name: cloud_controller_ng
48 | properties:
49 | app_domains:
50 | - MY_APP_DOMAIN.COM
51 |
52 |
53 |
54 | If this is an initial deployment, use helm install to
55 | deploy kubecf:
56 |
57 | &prompt.user;kubectl create namespace kubecf
58 |
59 | &prompt.user;helm install kubecf suse/kubecf \
60 | --namespace kubecf \
61 | --values &values-file;
62 |
63 |
64 |
65 | If this is an existing deployment, use helm upgrade to
66 | apply the change:
67 |
68 | &prompt.user;helm upgrade kubecf suse/kubecf \
69 | --namespace kubecf \
70 | --values &values-file; \
71 | --version &kubecf_chart;
72 |
73 |
74 | &kubecf-deploy-complete;
75 |
76 |
77 | When the kubecf is complete, do the following to confirm
78 | custom application domains have been configured correctly.
79 |
80 |
81 |
82 | Run cf curl /v2/info and verify the &kubecf; domain is not
83 | MY_APP_DOMAIN.COM:
84 |
85 |
86 | &prompt.user;cf api --skip-ssl-validation https://api.EXAMPLE.COM
87 | &prompt.user;cf curl /v2/info | grep endpoint
88 |
89 |
90 |
91 | Deploy an application and examine the routes field to
92 | verify MY_APP_DOMAIN.COM is being used:
93 |
94 |
95 | &prompt.user;cf login
96 | &prompt.user;cf create-org MY_ORG
97 | &prompt.user;cf create-space MY_SPACE -oMY_ORG
98 | &prompt.user;cf target -o MY_ORG -sMY_SPACE
99 | &prompt.user;cf push MY_APP
100 | cf push MY_APP
101 | Pushing app MY_APP to orgMY_ORG / space MY_SPACE as admin...
102 | Getting app info...
103 | Creating app with these attributes...
104 | name: MY_APP
105 | path: /path/to/MY_APP
106 | routes:
107 | + MY_APP.MY_APP_DOMAIN.COM
108 |
109 | Creating app MY_APP...
110 | Mapping routes...
111 |
112 | ...
113 |
114 | Waiting for app to start...
115 |
116 | name: MY_APP
117 | requested state: started
118 | instances: 1/1
119 | usage: 1G x 1 instances
120 | routes: MY_APP.MY_APP_DOMAIN.COM
121 | last uploaded: Mon 14 Jan 11:08:02 PST 2019
122 | stack: sle15
123 | buildpack: ruby
124 | start command: bundle exec rackup config.ru -p $PORT
125 |
126 | state since cpu memory disk details
127 | #0 running 2019-01-14T19:09:42Z 0.0% 2.7M of 1G 80.6M of 1G
128 |
129 |
130 |
131 |
--------------------------------------------------------------------------------
/xml/cap_admin_upgrade.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Upgrading &productname;
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | &productname; upgrades are delivered as container images from the &suse;
20 | registry and applied with &helm;.
21 |
22 |
23 | For additional upgrade information, always review the release notes
24 | published at
25 | .
26 |
27 |
28 | Important Considerations
29 |
30 |
31 | Before performing an upgrade, be sure to take note of the following:
32 |
33 |
34 |
35 |
36 | Perform Upgrades in Sequence
37 |
38 |
39 | ∩ only supports upgrading releases in sequential order. If there are
40 | any intermediate releases between your current release and your target
41 | release, they must be installed. Skipping releases is not supported.
42 |
43 |
44 |
45 |
46 | Preserve &helm; Value Changes during Upgrades
47 |
48 |
49 | During a helm upgrade, always ensure your
50 | &values-filename; file is passed. This will
51 | preserve any previously set &helm; values while allowing additional
52 | &helm; value changes to be made.
53 |
54 |
55 |
56 |
57 | helm rollback Is Not Supported
58 |
59 |
60 | helm rollback is not supported in &productname; or in
61 | upstream &cf;, and may break your cluster completely, because database
62 | migrations only run forward and cannot be reversed. Database schema can
63 | change over time. During upgrades both pods of the current and the next
64 | release may run concurrently, so the schema must stay compatible with the
65 | immediately previous release. But there is no way to guarantee such
66 | compatibility for future upgrades. One way to address this is to perform a
67 | full raw data backup and restore. (See
68 | )
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 | Upgrading &productname;
77 |
78 |
79 | The supported upgrade method is to install all upgrades, in order. Skipping
80 | releases is not supported. This table matches the Helm chart versions to
81 | each release:
82 |
83 |
84 | &releases-table;
85 |
86 |
87 | Use helm list to see the version of your installed release
88 | . Perform sequential upgrades until you reach the desired &productname;
89 | release.
90 |
91 |
92 |
93 | The example procedure below demonstrates how to upgrade to the current
94 | release. If you are not upgrading to the current release, replace the
95 | version with the version you intend to upgrade to.
96 |
97 |
98 |
99 |
100 |
101 | Begin by upgrading &operator;.
102 |
103 | &prompt.user;helm upgrade cf-operator suse/cf-operator \
104 | --namespace cf-operator \
105 | --set "global.singleNamespace.name=kubecf" \
106 | --version &operator_chart;
107 |
108 |
109 |
110 |
111 | Wait until &operator; is successfully upgraded before proceeding. Monitor
112 | the status of your &operator; upgrade using the watch
113 | command.
114 |
115 | &prompt.user;watch --color 'kubectl get pods --namespace cf-operator'
116 |
117 |
118 |
119 | When the &operator; upgrade is completed, upgrade &kubecf;.
120 |
121 | &prompt.user;helm upgrade kubecf suse/kubecf \
122 | --namespace kubecf \
123 | --values &values-file; \
124 | --version &kubecf_chart;
125 |
126 |
127 |
128 |
129 | Monitor the status of your &kubecf; upgrade using the watch
130 | command.
131 |
132 | &prompt.user;watch --color 'kubectl get pods --namespace kubecf'
133 |
134 |
135 |
136 |
137 |
--------------------------------------------------------------------------------
/xml/common_intro_support_statement_i.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
10 | Support Statement for &productname;
11 |
12 |
13 |
14 |
15 |
16 |
17 | yes
18 |
19 |
20 |
21 |
22 | To receive support, you need an appropriate subscription with &suse;. For more
23 | information, see
24 | .
25 |
26 |
27 | The following definitions apply:
28 |
29 |
30 | Version Support
31 |
32 | Technical Support and Troubleshooting (L1 - L2): SUSE will provide technical
33 | support and troubleshooting for version 2.1 until May 31, 2022.
34 |
35 |
36 | Patches and updates (L3): SUSE will provide patches and updates for 2.1 (e.g.
37 | 2.1.1, 2.1.2) to resolve critical bugs or address high severity security issues.
38 | The patches may include updates from upstream Cloud Foundry releases.
39 |
40 |
41 | &productname; closely follows upstream &cf; releases which may implement
42 | fixes and changes which are not backwards compatible with previous releases.
43 | &suse; will backport patches for critical bugs and security issues on a best
44 | efforts basis.
45 |
46 |
47 |
48 | Platform Support
49 |
50 | &productname; is fully supported on Amazon EKS, Microsoft Azure AKS, Google
51 | GKE, Rancher Kubernetes Engine (RKE), and RKE Government (RKE2) Each release
52 | is tested by &productname; QA on these platforms.
53 |
54 |
55 | &productname; is fully supported on &caasp;, wherever it happens to be
56 | installed. If &caasp; is supported on a particular cloud service provider
57 | (CSP), the customer can get support for &productname; in that context.
58 |
59 |
60 | &suse; can provide support for &productname; on 3rd party/generic &kube; on a
61 | case-by-case basis provided:
62 |
63 |
64 |
65 |
66 | The &kube; cluster satisfies the Requirements listed here at
67 | .
68 |
69 |
70 |
71 |
72 | The kube-ready-state-check.sh script has been run on
73 | the target &kube; cluster and does not show any configuration problems.
74 |
75 |
76 |
77 |
78 | A &suse; Services or Sales Engineer has verified that &productname; works
79 | correctly on the target &kube; cluster.
80 |
81 |
82 |
83 |
84 |
85 | Technology Previews
86 |
87 | Technology previews are packages, stacks, or features delivered by &suse;
88 | to provide glimpses into upcoming innovations. The previews are included for
89 | your convenience to give you the chance to test new technologies within your
90 | environment. We would appreciate your feedback! If you test a technology
91 | preview, please contact your &suse; representative and let them know about
92 | your experience and use cases. Your input is helpful for future development.
93 |
94 |
95 | However, technology previews come with the following limitations:
96 |
97 |
98 |
99 |
100 | Technology previews are still in development. Therefore, they may be functionally
101 | incomplete, unstable, or in other ways not suitable
102 | for production use.
103 |
104 |
105 |
106 |
107 | Technology previews are not supported.
108 |
109 |
110 |
111 |
112 | Details and functionality of technology previews are subject to change.
113 | As a result, upgrading to subsequent releases of a technology preview may
114 | be impossible and require a fresh installation.
115 |
116 |
117 |
118 |
119 | Technology previews can be dropped at any time. For example, if &suse;
120 | discovers that a preview does not meet the customer or market needs, or does
121 | not prove to comply with enterprise standards. &suse; does not commit to
122 | providing a supported version of such technologies in the future.
123 |
124 |
125 |
126 |
127 |
128 | For an overview of technology previews shipped with your product, see the
129 | release notes at .
130 |
131 |
132 |
133 |
--------------------------------------------------------------------------------
/xml/cap_depl_eirini.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Eirini
13 |
14 |
15 | yes
16 |
17 |
18 |
19 |
20 | Eirini, an alternative to Diego, is a scheduler for the &cf; Application
21 | Runtime (CFAR) that runs &cf; user applications in &kube;. For details about
22 | Eirini, see
23 | and
24 |
25 |
26 |
27 | Different schedulers and stacks have different memory requirements for
28 | applications. Not every combination is tested so there is no universal memory
29 | setting for ∩, and because it depends on the application deployed, it is
30 | up to the user to adjust the setting based on their application.
31 |
32 |
33 |
34 | Limitations and Other Considerations
35 |
36 | When using Eirini, it is important to take into consideration:
37 |
38 |
39 |
40 |
41 | If you are upgrading from &productname; 2.0.1 to 2.1.0 and plan to convert
42 | from Diego to Eirini, please upgrade your Diego environment to &productname;
43 | 2.1.0 first and then migrate to Eirini as the earlier CAP versions relied a
44 | technical preview version of Eirini.
45 |
46 |
47 | In this situation, your current applications relying on the
48 | cflinuxfs3 stack need to be converted to the
49 | sle15 stack. You can re-push your applications with
50 | cf push APP_NAME -s sle15 to do so, otherwise your
51 | applications will crash on Eirini.
52 |
53 |
54 |
55 |
56 | Applications on Eirini will require slightly more memory than on Diego.
57 | From testing, add an additional 32 MB to your application's manifest. The
58 | increase may vary, depending on your application.
59 |
60 |
61 |
62 |
63 | TCP routing is not available in Eirini deployments at this time.
64 |
65 |
66 |
67 |
68 | Eirini requires the k8s-metrics-server to be installed
69 | on the &kube; environment where &productname; is installed in order for
70 | Stratos Metrics to work.
71 |
72 |
73 |
74 |
75 | Stratos Metrics will not show disk stats on Eirini.
76 |
77 |
78 |
79 |
80 | When there is a &kube; outage, Eirini will not automatically restart
81 | applications upon its return. You will need to manually start them up at
82 | present.
83 |
84 |
85 |
86 |
87 |
88 | Enabling Eirini
89 |
90 |
91 |
92 |
93 | To enable Eirini, and disable Diego, add the following to your
94 | &values-filename; file.
95 |
96 |
97 | features:
98 | eirini:
99 | enabled: true
100 |
101 |
102 | When Eirini is enabled, both features.suse_default_stack
103 | and features.suse_buildpacks must be enabled as well.
104 | A cflinuxfs3 Eirini image is currently not available, and the &suse; stack
105 | must be used. By default, both the &suse; stack and buildpacks are enabled.
106 |
107 |
108 |
109 |
110 |
111 | After enabling Eirini, you will still see the
112 | diego-api pod. This is normal behavior because the Diego pod has a component required by Eirini.
113 |
114 |
115 |
116 |
117 | Eirini will only work on a cluster that has the parameter --cluster-domain set to cluster.local.
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 | Deploy kubecf.
126 |
127 |
128 | Refer to the following for platform-specific instructions:
129 |
130 | &deployment-platforms;
131 |
132 |
133 |
134 | In order for Eirini to report application metrics, Metrics Server
135 | (link xlink:href="https://github.com/kubernetes-sigs/metrics-server"/> must
136 | be installed.
137 |
138 |
139 | Note that --kubelet-insecure-tls is not recommended for
140 | production usage, but can be useful in test clusters with self-signed
141 | Kubelet serving certificates. For production, use
142 | --tls-private-key-file.
143 |
144 | &prompt.user;helm install metrics-server stable/metrics-server --set args[0]="--kubelet-preferred-address-types=InternalIP" --set args[1]="--kubelet-insecure-tls"
145 |
146 |
147 |
148 |
149 |
--------------------------------------------------------------------------------
/xml/cap_admin_memory_limits.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Container Memory Limits and Requests
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | In &productname;, containers have predefined memory limits and request sizes.
20 | Depending on the workload, these may need to be adjusted in some cases.
21 |
22 |
23 | Enabling and Disabling Memory Limits and Request Sizes
24 |
25 | By default, memory limits and request sizes are enabled. To disable it, add
26 | the following block to your &values-filename; file.
27 |
28 | features:
29 | memory_limits:
30 | enabled: false
31 |
32 |
33 | To enable memory limits again, update the above block in your
34 | &values-filename; so that enabled is set to
35 | true.
36 |
37 |
38 |
39 | &config-new-vs-existing;
40 |
41 |
42 | Configuring Memory Limits and Request Sizes
43 |
44 | Configuring memory limits and request sizes requires that
45 | feature.memory_limits is enabled. The default memory limits
46 | and request sizes can be found by examining the resources
47 | block at
48 | .
49 | To configure memory limits and request sizes, add a
50 | resources block to your &values-filename;. It contains a
51 | mapping of instance groups to jobs to processes. The process then contains a
52 | resource definition with limits and requests. All values are integers and
53 | represent the number of megabytes (Mi) for the given limit or request. A fully
54 | expanded tree looks like:
55 |
56 | resources:
57 | some_ig:
58 | some_job:
59 | some_process:
60 | memory:
61 | limit: ~
62 | request: ~
63 |
64 |
65 | Each level can define a $defaults resource definition that
66 | will be applied to all processes below it, that don't have their own
67 | definition (or a default further down the tree closer to them):
68 |
69 | resources:
70 | '$defaults':
71 | memory:
72 | limit: ~
73 | request: ~
74 | some_ig:
75 | '$defaults': { ... }
76 | some_job:
77 | '$defaults': { ... }
78 | some_process: ~
79 |
80 |
81 | For convenience a $defaults value can be just an integer.
82 | This
83 |
84 | resources:
85 | '$defaults': 32
86 |
87 |
88 | is a shortcut for:
89 |
90 | resources:
91 | '$defaults': {memory: {limit: 32, request: ~}, cpu: {limit: ~, request:~}}
92 |
93 |
94 | In addition, an instance group, job, or process can also be set to just an
95 | integer. This:
96 |
97 | resources:
98 | some_ig: 32
99 |
100 |
101 | is a shortcut for:
102 |
103 | resources:
104 | some_ig:
105 | $defaults': 32
106 |
107 |
108 | Of course this means that any lower level jobs and processes will have to
109 | share this specific resource definition, as there is no way to explicitly
110 | enumerate the jobs or processes when the value is just an integer and not a
111 | map.
112 |
113 |
114 | Note that there is a difference between this
115 |
116 | resources:
117 | '$defaults': 32
118 | some_ig: 64
119 |
120 |
121 | and this:
122 |
123 | resources:
124 | '$defaults': 32
125 | some_ig:
126 | some_job: 64
127 |
128 |
129 | The former definitions sets the memory limit of
130 | all jobs under some_ig
131 | while the latter only specifies the limit for some_job. If
132 | there are more jobs in some_ig, then they will use the
133 | global limit (32) and only some_job will use the specific
134 | limit (64).
135 |
136 |
137 | Memory requests will have a calculated default value, which is a configurable
138 | percentage of the limit, at least some configurable minimum value, and never
139 | higher than the limit itself. The default is always at least a minimum value,
140 | but never larger than the limit itself. These defaults can be configured by
141 | using features.memory_limits.default_request_minimum and
142 | features.memory_limits.default_request_in_percent. The
143 | following is an example configuration where the example values are the
144 | respective defaults.
145 |
146 | features:
147 | memory_limits:
148 | default_request_minimum: 32
149 | default_request_in_percent: 25
150 |
151 |
152 |
153 | &config-new-vs-existing;
154 |
155 |
156 |
--------------------------------------------------------------------------------
/xml/cap_admin_nproc_limits.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Managing Nproc Limits of Pods
13 |
14 |
15 | yes
16 |
17 |
18 |
19 |
20 | Do Not Adjust Without Guidance
21 |
22 | It is not recommended to change these values without the guidance of
23 | &productname; developers. Please contact support for assistance.
24 |
25 |
26 |
27 |
28 | Nproc is the maximum number of processes allowed per user. In the case of
29 | kubecf, the nproc value applies to the
30 | vcap user. In kubecf, there are parameters,
31 | kube.limits.nproc.soft and
32 | kube.limits.nproc.hard, to configure a soft nproc limit
33 | and a hard nproc limit for processes spawned by the vcap
34 | user in kubecf pods. By default, the soft limit is 1024 while
35 | the hard limit is 2048. The soft and hard limits can be changed
36 | to suit your workloads. Note that the limits are applied to all pods.
37 |
38 |
39 |
40 | When configuring the nproc limits, take note that:
41 |
42 |
43 |
44 |
45 |
46 | If the soft limit is set, the hard limit must be set as well.
47 |
48 |
49 |
50 |
51 | If the hard limit is set, the soft limit must be set as well.
52 |
53 |
54 |
55 |
56 | The soft limit cannot be greater than the hard limit.
57 |
58 |
59 |
60 |
61 |
62 | Configuring and Applying Nproc Limits
63 |
64 |
65 | To configure the nproc limits, add the following to your
66 | &values-filename;. Replace
67 | the example values with limits suitable for your workloads:
68 |
69 |
70 | kube:
71 | limits:
72 | nproc:
73 | hard: 3072
74 | soft: 2048
75 |
76 |
77 |
78 | New Deployments
79 |
80 |
81 | For new &productname; deployments, follow the steps below to deploy
82 | &productname; with nproc limits configured:
83 |
84 |
85 |
86 |
87 |
88 | Deploy kubecf:
89 |
90 | &prompt.user;kubectl create namespace kubecf
91 |
92 | &prompt.user;helm install kubecf suse/kubecf \
93 | --namespace kubecf \
94 | --values &values-file;
95 |
96 |
97 |
98 |
99 | Monitor the deployment progress using the watch command:
100 |
101 | &prompt.user;watch --color 'kubectl get pods --namespace kubecf'
102 |
103 |
104 |
105 | Open a shell into any container. The command below opens a shell to the default
106 | container in the blobstore-0 pod:
107 |
108 | &prompt.user;kubectl exec --stdin --tty blobstore-0 --namespace kubecf -- env /bin/bash
109 |
110 |
111 |
112 | Use the vcap user identity:
113 |
114 | &prompt.user;su vcap
115 |
116 |
117 |
118 | Verify the maximum number of processes for the vcap user matches the limits you set:
119 |
120 | &prompt.user;ulimit -u
121 |
122 | &prompt.user;cat /etc/security/limits.conf | grep nproc
123 |
124 |
125 |
126 |
127 |
128 |
129 | Existing Deployments
130 |
131 |
132 | For existing &productname; deployments, follow the steps below to redeploy
133 | &productname; with nproc limits configured:
134 |
135 |
136 |
137 |
138 |
139 | Use helm upgrade to apply the change:
140 |
141 | &prompt.user;helm upgrade kubecf suse/kubecf \
142 | --namespace kubecf \
143 | --values &values-file; \
144 | --version &kubecf_chart;
145 |
146 |
147 |
148 |
149 | Monitor the deployment progress using the watch command:
150 |
151 | &prompt.user;watch --color 'kubectl get pods --namespace kubecf'
152 |
153 |
154 |
155 | Open a shell into any container. The command below opens a shell to the default
156 | container in the blobstore-0 pod:
157 |
158 | &prompt.user;kubectl exec --stdin --tty blobstore-0 --namespace kubecf -- env /bin/bash
159 |
160 |
161 |
162 | Use the vcap user identity:
163 |
164 | &prompt.user;su vcap
165 |
166 |
167 |
168 | Verify the maximum number of processes for the vcap user matches the limits you set:
169 |
170 | &prompt.user;ulimit -u
171 |
172 | &prompt.user;cat /etc/security/limits.conf | grep nproc
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
--------------------------------------------------------------------------------
/xml/cap_user_cf_cli.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Deploying and Managing Applications with the &cf; Client
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | Using the &cfcli; with &productname;
20 |
21 |
22 | The &cf; command line interface (&cfcli;) is for deploying and managing your
23 | applications. You may use it for all the orgs and spaces that you are a
24 | member of. Install the client on a workstation for remote administration of
25 | your &suse; &cf; instances.
26 |
27 |
28 |
29 | The complete guide is at
30 | Using the Cloud
31 | Foundry Command Line Interface, and source code with a demo video is
32 | on GitHub at
33 | Cloud
34 | Foundry CLI.
35 |
36 |
37 |
38 | The following examples demonstrate some of the commonly-used commands. The
39 | first task is to log into your new ∩ instance.
40 | You need to provide the API endpoint of your &productname; instance to log
41 | in. The API endpoint is the system_domain value you provided in
42 | &values-filename;, plus the
43 | api. prefix, as it shows in the above welcome screen. Set
44 | your endpoint, and use --skip-ssl-validation when you
45 | have self-signed SSL certificates. It asks for an e-mail address, but you
46 | must enter admin instead (you cannot change this to a
47 | different username, though you may create additional users), and the
48 | password is the one you created in
49 | &values-filename;:
50 |
51 |
52 | &prompt.user;cf login --skip-ssl-validation -a https://api.example.com
53 | API endpoint: https://api.example.com
54 |
55 | Email> admin
56 |
57 | Password>
58 | Authenticating...
59 | OK
60 |
61 | Targeted org system
62 |
63 | API endpoint: https://api.example.com (API version: 2.134.0)
64 | User: admin
65 | Org: system
66 | Space: No space targeted, use 'cf target -s SPACE'
67 |
68 |
69 | cf help displays a list of commands and options.
70 | cf help [command] provides information on specific
71 | commands.
72 |
73 |
74 |
75 | You may pass in your credentials and set the API endpoint in a single
76 | command:
77 |
78 |
79 | &prompt.user;cf login -u admin -p PASSWORD --skip-ssl-validation -a https://api.example.com
80 |
81 |
82 | Log out with cf logout.
83 |
84 |
85 |
86 | Change the admin password:
87 |
88 |
89 | &prompt.user;cf passwd
90 | Current Password>
91 | New Password>
92 | Verify Password>
93 | Changing password...
94 | OK
95 | Please log in again
96 |
97 |
98 | View your current API endpoint, user, org, and space:
99 |
100 |
101 | &prompt.user;cf target
102 |
103 |
104 | Switch to a different org or space:
105 |
106 |
107 | &prompt.user;cf target -o MY_ORG
108 | &prompt.user;cf target -s MY_SPACE
109 |
110 |
111 | List all apps in the current space:
112 |
113 |
114 | &prompt.user;cf apps
115 |
116 |
117 | Query the health and status of a particular app:
118 |
119 |
120 | &prompt.user;cf app MY_APP
121 |
122 |
123 | View app logs. The first example tails the log of a running app. The
124 | --recent option dumps recent logs instead of tailing,
125 | which is useful for stopped and crashed apps:
126 |
127 |
128 | &prompt.user;cf logs MY_APP
129 | &prompt.user;cf logs --recent MY_APP
130 |
131 |
132 | Restart all instances of an app:
133 |
134 |
135 | &prompt.user;cf restart MY_APP
136 |
137 |
138 | Restart a single instance of an app, identified by its index number, and
139 | restart it with the same index number:
140 |
141 |
142 | &prompt.user;cf restart-app-instance MY_APP APP_INSTANCE
143 |
144 |
145 | After you have set up a service broker (see
146 | ), create new services:
147 |
148 |
149 | &prompt.user;cf create-service SERVICE_NAME default MY_DB
150 |
151 |
152 | Then you may bind a service instance to an app:
153 |
154 |
155 | &prompt.user;cf bind-service MY_APP SERVICE_INSTANCE
156 |
157 |
158 | The most-used command is cf push, for pushing new apps
159 | and changes to existing apps.
160 |
161 |
162 | &prompt.user;cf push NEW_APP -b buildpack
163 |
164 |
165 | If you need to debug your application or run one-off tasks, start an SSH
166 | session into your application container.
167 |
168 | &prompt.user;cf ssh MY_APP
169 |
170 |
171 | When the SSH connection is established, run the following to have the
172 | environment match that of the application and its associated buildpack.
173 |
174 | &prompt.user;/tmp/lifecycle/shell
175 |
176 |
177 |
--------------------------------------------------------------------------------
/xml/network-decl.ent:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
--------------------------------------------------------------------------------
/xml/cap_admin_ccdb_key_rotation.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Cloud Controller Database Secret Rotation
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | The Cloud Controller Database (CCDB) encrypts sensitive information like
20 | passwords. The encryption key is generated when &kubecf; is deployed.
21 | If it is compromised or needs to be rotated for any other reason, new keys can be
22 | added. Note that existing encrypted information will not be updated. The
23 | encrypted information must be set again to have them re-encrypted with the
24 | new key. The old key cannot be dropped until all references to it are removed
25 | from the database.
26 |
27 |
28 | Updating these secrets is a manual process that involves decrypting the
29 | current contents of the database using the old key and re-encrypting the
30 | contents using a new key. The following procedure outlines
31 | how this is done.
32 |
33 |
34 |
35 |
36 | For each label under key_labels, &kubecf; will generate
37 | an encryption key. The current_key_label indicates which
38 | key is currently being used.
39 |
40 |
41 | ccdb:
42 | encryption:
43 | rotation:
44 | key_labels:
45 | - encryption_key_0
46 | current_key_label: encryption_key_0
47 |
48 |
49 |
50 |
51 | In order to rotate the CCDB encryption key, add a new label to
52 | key_labels (keeping the old labels), and mark
53 | the current_key_label with the newly added label:
54 |
55 |
56 | ccdb:
57 | encryption:
58 | rotation:
59 | key_labels:
60 | - encryption_key_0
61 | - encryption_key_1
62 | current_key_label: encryption_key_1
63 |
64 |
65 |
66 |
67 | Save the above information into a file, for example
68 | rotate-secret.yaml, and perform the rotation:
69 |
70 |
71 |
72 |
73 | Update the &kubecf; &helm; installation:
74 |
75 |
76 | &prompt.user;helm upgrade kubecf --namespace kubecf --values rotate-secret.yaml --reuse-values
77 |
78 |
79 |
80 |
81 | After &helm; finishes its updates, trigger the
82 | rotate-cc-database-key errand:
83 |
84 | &prompt.user;kubectl patch qjob kubecf-rotate-cc-database-key \
85 | --namespace kubecf \
86 | --type merge \
87 | --patch '{"spec":{"trigger":{"strategy":"now"}}}'
88 |
89 |
90 |
91 |
92 |
93 |
94 | Tables with Encrypted Information
95 |
96 |
97 | The CCDB contains several tables with encrypted information as follows:
98 |
99 |
100 |
101 |
102 | apps
103 |
104 |
105 | Environment variables
106 |
107 |
108 |
109 |
110 | buildpack_lifecycle_buildpacks
111 |
112 |
113 | Buildpack URLs may contain passwords
114 |
115 |
116 |
117 |
118 | buildpack_lifecycle_data
119 |
120 |
121 | Buildpack URLs may contain passwords
122 |
123 |
124 |
125 |
126 | droplets
127 |
128 |
129 | May contain &docker; registry passwords
130 |
131 |
132 |
133 |
134 | env_groups
135 |
136 |
137 | Environment variables
138 |
139 |
140 |
141 |
142 | packages
143 |
144 |
145 | May contain &docker; registry passwords
146 |
147 |
148 |
149 |
150 | service_bindings
151 |
152 |
153 | Contains service credentials
154 |
155 |
156 |
157 |
158 | service_brokers
159 |
160 |
161 | Contains service credentials
162 |
163 |
164 |
165 |
166 | service_instances
167 |
168 |
169 | Contains service credentials
170 |
171 |
172 |
173 |
174 | service_keys
175 |
176 |
177 | Contains service credentials
178 |
179 |
180 |
181 |
182 | tasks
183 |
184 |
185 | Environment variables
186 |
187 |
188 |
189 |
190 |
191 |
192 | Update Existing Data with New Encryption Key
193 |
194 | To ensure the encryption key is updated for existing data, the command (or
195 | its update- equivalent) can be run again with the same
196 | parameters. Some commands need to be deleted/recreated to update the label.
197 |
198 |
199 |
200 | apps
201 |
202 |
203 | Run cf set-env again
204 |
205 |
206 |
207 |
208 | buildpack_lifecycle_buildpacks, buildpack_lifecycle_data, droplets
209 |
210 |
211 | cf restage the app
212 |
213 |
214 |
215 |
216 | packages
217 |
218 |
219 | cf delete, then cf push the app
220 | (&docker; apps with registry password)
221 |
222 |
223 |
224 |
225 | env_groups
226 |
227 |
228 | Run cf set-staging-environment-variable-group or
229 | cf set-running-environment-variable-group again
230 |
231 |
232 |
233 |
234 | service_bindings
235 |
236 |
237 | Run cf unbind-service and cf
238 | bind-service again
239 |
240 |
241 |
242 |
243 | service_brokers
244 |
245 |
246 | Run cf update-service-broker with the appropriate
247 | credentials
248 |
249 |
250 |
251 |
252 | service_instances
253 |
254 |
255 | Run cf update-service with the appropriate
256 | credentials
257 |
258 |
259 |
260 |
261 | service_keys
262 |
263 |
264 | Run cf delete-service-key and cf
265 | create-service-key again
266 |
267 |
268 |
269 |
270 | tasks
271 |
272 |
273 | While tasks have an encryption key label, they are generally meant to be
274 | a one-off event, and left to run to completion. If there is a task still
275 | running, it could be stopped with cf terminate-task,
276 | then run again with cf run-task.
277 |
278 |
279 |
280 |
281 |
282 |
283 |
284 |
--------------------------------------------------------------------------------
/xml/cap_depl_eks.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Deploying &productname; on &eks-full; (EKS)
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | &readmefirst;
20 |
21 |
22 | This chapter describes how to deploy &productname; on &eks-full; (EKS), using
23 | Amazon's Elastic Load Balancer to provide fault-tolerant access to your
24 | cluster.
25 |
26 |
27 | Prerequisites
28 |
29 |
30 | The following are required to deploy and use &productname; on EKS:
31 |
32 |
33 |
34 |
35 |
36 | An Amazon AWS account with sufficient permissions. For details, refer to
37 | .
38 |
39 |
40 |
41 |
42 | eksctl, a command line client to create and manage
43 | &kube; clusters on &eks;. See
44 |
45 | for more information and installation instructions.
46 |
47 |
48 |
49 |
50 | &cfcli-prereq;
51 | &kubectl-prereq;
52 | &curl-prereq;
53 |
54 |
55 |
56 |
57 | Create an EKS Cluster
58 |
59 |
60 | Now you can create an EKS cluster using eksctl. Be sure to
61 | keep in mind the following minimum requirements of the cluster.
62 |
63 |
64 |
65 |
66 |
67 | Node sizes are at least t3.xlarge.
68 |
69 |
70 |
71 |
72 | The NodeVolumeSize must be a minimum of &node_size; GB.
73 |
74 |
75 |
76 |
77 | The &kube; version is at least &min_kube;.
78 |
79 |
80 |
81 |
82 | As a minimal example, the following command will create an EKS cluster. To
83 | see additional configuration parameters, see eksctl create cluster --help.
84 |
85 |
86 | &prompt.user;eksctl create cluster --name kubecf --version &min_kube; \
87 | --nodegroup-name standard-workers --node-type t3.xlarge \
88 | --nodes 3 --node-volume-size &node_size; \
89 | --region us-east-2 --managed \
90 | --ssh-access --ssh-public-key /path/to/some_key.pub
91 |
92 |
93 |
94 |
95 | &install-helm;
96 |
97 |
98 | &storage-class;
99 |
100 |
101 |
102 | Deployment Configuration
103 |
104 |
105 | Use this example &values-filename; as a template
106 | for your configuration.
107 |
108 |
109 | &values-file-changes;
110 |
111 | &supported-domains;
112 |
113 | &example-config;
114 |
115 |
116 |
117 |
118 |
119 |
120 | &certificates;
121 |
122 |
123 |
124 |
125 | &ingress-controller;
126 |
127 |
128 |
129 |
130 | &affinity;
131 |
132 |
133 |
134 |
135 | &high-availability;
136 |
137 |
138 |
139 |
140 | &external-blobstore;
141 |
142 |
143 |
144 |
145 | &external-database;
146 |
147 |
148 |
149 |
150 |
151 | Add the &kube; Charts Repository
152 |
153 |
154 | Download the &suse; &kube; charts repository with &helm;:
155 |
156 |
157 | &prompt.user;helm repo add suse https://kubernetes-charts.suse.com/
158 |
159 |
160 | You may replace the example suse name with any
161 | name. Verify with helm:
162 |
163 |
164 | &prompt.user;helm repo list
165 | NAME URL
166 | stable https://kubernetes-charts.storage.googleapis.com
167 | local http://127.0.0.1:8879/charts
168 | suse https://kubernetes-charts.suse.com/
169 |
170 |
171 | List your chart names, as you will need these for some operations:
172 |
173 |
174 | &helm-search-suse;
175 |
176 |
177 |
178 | Deploying &productname;
179 |
180 | This section describes how to deploy &productname; on &eks;.
181 |
182 |
183 | &kubecf-operator-versions;
184 |
185 |
186 | &deploy-operator;
187 |
188 |
189 |
190 | Deploy &kubecf;
191 |
192 | &deploy-kubecf;
193 |
194 |
195 | Create DNS CNAME records for the public services.
196 |
197 | &dns-mappings;
198 |
199 |
200 |
201 | When all pods are fully ready, verify your deployment. See for more information.
202 |
203 | &cf-auth;
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 | &ldap;
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 | Expanding Capacity of a ∩ Deployment on &eks;
223 |
224 |
225 | If the current capacity of your ∩ deployment is insufficient for your
226 | workloads, you can expand the capacity using the procedure in this section.
227 |
228 |
229 |
230 | These instructions assume you have followed the procedure in
231 | and have a running ∩ deployment on
232 | &eks;.
233 |
234 |
235 |
236 |
237 |
238 | Get the current number of &kube; nodes in the cluster.
239 |
240 | &prompt.user;eksctl get nodegroup --name standard-workers \
241 | --cluster kubecf \
242 | --region us-east-2
243 |
244 |
245 |
246 |
247 | Scale the nodegroup to the desired node count.
248 |
249 | &prompt.user;eksctl scale nodegroup --name standard-workers \
250 | --cluster kubecf \
251 | --nodes 4 \
252 | --region us-east-2
253 |
254 |
255 |
256 |
257 | Verify the new nodes are in a Ready state before
258 | proceeding.
259 |
260 | &prompt.user;kubectl get nodes
261 |
262 |
263 |
264 | Add or update the following in your
265 | &values-filename; file to increase the number of
266 | diego-cell in your ∩ deployment. Replace the
267 | example value with the number required by your workflow.
268 |
269 | sizing:
270 | diego_cell:
271 | instances: 5
272 |
273 |
274 |
275 |
276 | Perform a helm upgrade to apply the change.
277 |
278 | &prompt.user;helm upgrade kubecf suse/kubecf \
279 | --namespace kubecf \
280 | --values &values-file; \
281 | --version &kubecf_chart;
282 |
283 |
284 |
285 |
286 | Monitor progress of the additional diego-cell pods:
287 |
288 | &prompt.user;watch --color 'kubectl get pods --namespace
289 | kubecf'
290 |
291 |
292 |
293 |
294 |
--------------------------------------------------------------------------------
/xml/cap_troubleshooting.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Troubleshooting
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | Cloud stacks are complex, and debugging deployment issues often requires
20 | digging through multiple layers to find the information you need. Remember
21 | that the &kubecf; releases must be deployed in the correct order, and that each
22 | release must deploy successfully, with no failed pods, before deploying the
23 | next release.
24 |
25 |
26 | Before proceeding with in depth troubleshooting, ensure the following have
27 | been met as defined in the Support Statement at .
28 |
29 |
30 |
31 |
32 | The &kube; cluster satisfies the Requirements listed here at
33 | .
34 |
35 |
36 |
37 |
38 | The kube-ready-state-check.sh script has been run on
39 | the target &kube; cluster and does not show any configuration problems.
40 |
41 |
42 |
43 |
44 | A &suse; Services or Sales Engineer has verified that &productname; works
45 | correctly on the target &kube; cluster.
46 |
47 |
48 |
49 |
50 | Logging
51 |
52 |
53 | There are two types of logs in a deployment of &productname;, applications
54 | logs and component logs. The following provides a brief overview of each log
55 | type and how to retrieve them for monitoring and debugging use.
56 |
57 |
58 |
59 | &log-types-and-fetch;
60 |
61 |
62 | Using Supportconfig
63 |
64 |
65 | If you ever need to request support, or just want to generate detailed
66 | system information and logs, use the supportconfig
67 | utility. Run it with no options to collect basic system information, and
68 | also cluster logs including &docker;, etcd, flannel, and Velum.
69 | supportconfig may give you all the information you need.
70 |
71 |
72 |
73 | supportconfig -h prints the options. Read the "Gathering
74 | System Information for Support" chapter in any &sle; &admin; to
75 | learn more.
76 |
77 |
78 |
79 | Deployment Is Taking Too Long
80 |
81 |
82 | A deployment step seems to take too long, or you see that some pods are not
83 | in a ready state hours after all the others are ready, or a pod shows a lot
84 | of restarts. This example shows not-ready pods many hours after the others
85 | have become ready:
86 |
87 |
88 | &prompt.user;kubectl get pods --namespace kubecf
89 | NAME READY STATUS RESTARTS AGE
90 | router-3137013061-wlhxb 0/1 Running 0 16h
91 | routing-api-0 0/1 Running 0 16h
92 |
93 |
94 | The Running status means the pod is bound to a node and
95 | all of its containers have been created. However, it is not
96 | Ready, which means it is not ready to service requests.
97 | Use kubectl to print a detailed description of pod events
98 | and status:
99 |
100 |
101 | &prompt.user;kubectl describe pod --namespace kubecf router-0
102 |
103 |
104 | This prints a lot of information, including IP addresses, routine events,
105 | warnings, and errors. You should find the reason for the failure in this
106 | output.
107 |
108 |
109 |
110 | &deployment-pod-status;
111 |
112 |
113 |
114 | Deleting and Rebuilding a Deployment
115 |
116 |
117 | There may be times when you want to delete and rebuild a deployment, for
118 | example when there are errors in your &values-filename; file, you wish to
119 | test configuration changes, or a deployment fails and you want to try it again.
120 |
121 |
122 |
123 |
124 |
125 | Remove the kubecf release. All resources associated with
126 | the release of the suse/kubecf chart will be removed.
127 | Replace the example release name with the one used during your installation.
128 |
129 | &prompt.user;helm uninstall kubecf
130 |
131 |
132 |
133 | Remove the kubecf namespace. Replace with the namespace
134 | where the suse/kubecf chart was installed.
135 |
136 | &prompt.user;kubectl delete namespace kubecf
137 |
138 |
139 |
140 | Remove the cf-operator release. All resources associated
141 | with the release of the suse/cf-operator chart will be
142 | removed. Replace the example release name with the one used during your
143 | installation.
144 |
145 | &prompt.user;helm uninstall cf-operator
146 |
147 |
148 |
149 | Remove the cf-operator namespace. Replace with the namespace
150 | where the suse/cf-operator chart was installed.
151 |
152 | &prompt.user;kubectl delete namespace cf-operator
153 |
154 |
155 |
156 | Verify all of the releases are removed.
157 |
158 | &prompt.user;helm list --all-namespaces
159 |
160 |
161 |
162 | Verify all of the namespaces are removed.
163 |
164 | &prompt.user;kubectl get namespaces
165 |
166 |
167 |
168 |
169 | Querying with Kubectl
170 |
171 |
172 | You can safely query with kubectl to get information
173 | about resources inside your &kube; cluster. kubectl cluster-info
174 | dump | tee clusterinfo.txt outputs a large amount of information
175 | about the &kube; master and cluster services to a text file.
176 |
177 |
178 |
179 | The following commands give more targeted information about your cluster.
180 |
181 |
182 |
183 |
184 |
185 | List all cluster resources:
186 |
187 | &prompt.user;kubectl get all --all-namespaces
188 |
189 |
190 |
191 | List all of your running pods:
192 |
193 | &prompt.user;kubectl get pods --all-namespaces
194 |
195 |
196 |
197 | List all of your running pods, their internal IP addresses, and which
198 | &kube; nodes they are running on:
199 |
200 | &prompt.user;kubectl get pods --all-namespaces --output wide
201 |
202 |
203 |
204 | See all pods, including those with Completed or Failed statuses:
205 |
206 | &prompt.user;kubectl get pods --show-all --all-namespaces
207 |
208 |
209 |
210 | List pods in one namespace:
211 |
212 | &prompt.user;kubectl get pods --namespace kubecf
213 |
214 |
215 |
216 | Get detailed information about one pod:
217 |
218 | &prompt.user;kubectl describe --namespace kubecf po/diego-cell-0
219 |
220 |
221 |
222 | Read the log file of a pod:
223 |
224 | &prompt.user;kubectl logs --namespace kubecf po/diego-cell-0
225 |
226 |
227 |
228 | List all &kube; nodes, then print detailed information about a single
229 | node:
230 |
231 | &prompt.user;kubectl get nodes
232 | &prompt.user;kubectl describe node 6a2752b6fab54bb889029f60de6fa4d5.infra.caasp.local
233 |
234 |
235 |
236 | List all containers in all namespaces, formatted for readability:
237 |
238 | &prompt.user;kubectl get pods --all-namespaces --output jsonpath="{..image}" |\
239 | tr -s '[[:space:]]' '\n' |\
240 | sort |\
241 | uniq -c
242 |
243 |
244 |
245 | These two commands check node capacities, to verify that there are enough
246 | resources for the pods:
247 |
248 | &prompt.user;kubectl get nodes --output yaml | grep '\sname\|cpu\|memory'
249 | &prompt.user;kubectl get nodes --output json | \
250 | jq '.items[] | {name: .metadata.name, cap: .status.capacity}'
251 |
252 |
253 |
254 |
255 | Admission webhook denied
256 |
257 | When switching back to Diego from Eirini, the error below can occur:
258 |
259 |
260 | &prompt.user;helm install kubecf suse/kubecf --namespace kubecf --values &values-file;
261 | Error: admission webhook "validate-boshdeployment.quarks.cloudfoundry.org" denied the request: Failed to resolve manifest: Failed to interpolate ops 'kubecf-user-provided-properties' for manifest 'kubecf': Applying ops on manifest obj failed in interpolator: Expected to find exactly one matching array item for path '/instance_groups/name=eirini' but found 0
262 |
263 |
264 | To avoid this error, remove the eirini-persi-broker configuration
265 | before running the command.
266 |
267 |
268 |
269 | Namespace does not exist
270 |
271 | When running a &helm; command, an error occurs stating that a namespace does not
272 | exist. To avoid this error, create the namespace manually with kubectl; before
273 | running the command:
274 |
275 |
276 | &prompt.user;kubectl create namespace name
277 |
278 |
279 |
280 | Log-cache Memory Allocation Issue
281 |
282 | The log-cache component currently has a memory allocation issue where the node
283 | memory available is reported instead of the one assigned to the container under
284 | cgroups. In such a situation, log-cache would start allocating memory based on
285 | these values, causing a varying range of issues (OOMKills, performance
286 | degradation, etc.). To address this issue, node affinity must be used to tie
287 | log-cache to nodes of a uniform size, and then declaring the cache percentage
288 | based on that number. A limit of 3% has been identified as sufficient.
289 |
290 |
291 | Add the following to your &values-filename;. In the node affinity
292 | configuration, the values for key and
293 | values may need to be changed depending on how notes in
294 | your cluster are labeled. For more information on labels, see
295 | .
296 |
297 | properties:
298 | log-cache:
299 | log-cache:
300 | memory_limit_percent: 3
301 |
302 | operations:
303 | inline:
304 | - type: replace
305 | path: /instance_groups/name=log-cache/env?/bosh/agent/settings/affinity
306 | value:
307 | nodeAffinity:
308 | requiredDuringSchedulingIgnoredDuringExecution:
309 | nodeSelectorTerms:
310 | - matchExpressions:
311 | - key: kubernetes.io/hostname
312 | operator: In
313 | values:
314 | - LABEL_VALUE_OF_NODE
315 |
316 |
317 |
318 |
--------------------------------------------------------------------------------
/xml/cap_admin_app_autoscaler.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | App-AutoScaler
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | The App-AutoScaler service is used for automatically managing an
20 | application's instance count when deployed on &kubecf;. The scaling behavior is
21 | determined by a set of criteria defined in a policy (See
22 | ).
23 |
24 |
25 | Prerequisites
26 |
27 |
28 | Using the App-AutoScaler service requires:
29 |
30 |
31 |
32 |
33 |
34 | A running deployment of kubecf
35 |
36 |
37 |
38 |
39 | &cfcli-prereq;
40 |
41 |
42 |
43 | The &cf; CLI AutoScaler Plug-in, see
44 |
45 |
46 | The plugin can be installed by running the following command:
47 |
48 | &prompt.user;cf install-plugin -r CF-Community app-autoscaler-plugin
49 |
50 | If the plugin repo is not found, add it first:
51 |
52 | &prompt.user;cf add-plugin-repo CF-Community https://plugins.cloudfoundry.org
53 |
54 |
55 |
56 |
57 | Enabling and Disabling the App-AutoScaler Service
58 |
59 |
60 | App-AutoScaler is disabled by default. To enable it, add the following
61 | block to your &values-filename; file.
62 |
63 |
64 | features:
65 | autoscaler:
66 | enabled: true
67 |
68 |
69 |
70 | To disable App-AutoScaler again, update the above block in your &values-filename;
71 | so that enabled is set to false.
72 |
73 |
74 |
75 | &config-new-vs-existing;
76 |
77 |
78 |
79 |
84 |
85 |
86 | Using the App-AutoScaler Service
87 |
88 |
89 | Push the application without starting it
90 | first:
91 |
92 |
93 | &prompt.user;cf push MY_APPLICATION --no-start
94 |
95 |
96 |
97 | Attach autoscaling policy to the application:
98 |
99 |
100 | &prompt.user;cf attach-autoscaling-policy MY_APPLICATION MY_POLICY.json
101 |
102 |
103 | The policy is defined as a JSON file
104 | (See ) in a proper format
105 | (See ).
106 |
107 |
108 |
109 | Start the application:
110 |
111 |
112 | &prompt.user;cf start MY_APPLICATION
113 |
114 |
115 | Autoscaling policies can be managed using &cfcli; with the App-AutoScaler plugin
116 | as above (See ) or using the
117 | App-AutoScaler API (See ).
118 |
119 |
120 |
121 | The App-AutoScaler &cfcli; Plugin
122 |
123 | The App-AutoScaler plugin is used for managing the service with your
124 | applications and provides the following commands (with shortcuts in brackets). Refer to for details about each command:
125 |
126 |
127 |
128 | autoscaling-api (asa)
129 |
130 |
131 | Set or view AutoScaler service API endpoint. See for more information.
132 |
133 |
134 |
135 |
136 | autoscaling-policy (asp)
137 |
138 |
139 | Retrieve the scaling policy of an application. See for more information.
140 |
141 |
142 |
143 |
144 | attach-autoscaling-policy (aasp)
145 |
146 |
147 | Attach a scaling policy to an application. See for more information.
148 |
149 |
150 |
151 |
152 | detach-autoscaling-policy (dasp)
153 |
154 |
155 | Detach the scaling policy from an application. See for more information.
156 |
157 |
158 |
159 |
160 | create-autoscaling-credential (casc)
161 |
162 |
163 | Create custom metric credential for an application. See for more information.
164 |
165 |
166 |
167 |
168 | delete-autoscaling-credential (dasc)
169 |
170 |
171 | Delete the custom metric credential of an application.
172 | See for more information.
173 |
174 |
175 |
176 |
177 | autoscaling-metrics (asm)
178 |
179 |
180 | Retrieve the metrics of an application. See for more information.
181 |
182 |
183 |
184 |
185 | autoscaling-history (ash)
186 |
187 |
188 | Retrieve the scaling history of an application. See for more information.
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 | App-AutoScaler API
197 |
198 | The App-AutoScaler service provides a
199 | Public
200 | API with detailed usage information, see . It includes requests to:
201 |
202 |
203 |
204 |
205 | List scaling history of an application. For details, refer to
206 |
207 |
208 |
209 |
210 | List instance metrics of an application. For details, refer to
211 |
212 |
213 |
214 |
215 | List aggregated metrics of an application. For details, refer to
216 |
217 |
218 |
219 |
220 | Policy api. For details, refer to
221 |
222 |
223 |
224 |
225 | Delete policy. For details, refer to
226 |
227 |
228 |
229 |
230 | Get policy. For details, refer to
231 |
232 |
233 |
234 |
235 |
236 |
237 | Policies
238 |
239 |
240 | A policy identifies characteristics including minimum instance count,
241 | maximum instance count, and the rules used to determine when the number of
242 | application instances is scaled up or down. These rules are categorized into
243 | two types, scheduled scaling and dynamic scaling. (See
244 | ). Multiple scaling
245 | rules can be specified in a policy, but App-AutoScaler does not detect or
246 | handle conflicts that may occur. Ensure there are no conflicting rules to
247 | avoid unintended scaling behavior.
248 |
249 |
250 |
251 | Policies are defined using the JSON format and can be attached to an
252 | application either by passing the path to the policy file or directly as a
253 | parameter.
254 |
255 |
256 |
257 | The following is an example of a policy file, called
258 | my-policy.json.
259 |
260 |
261 | {
262 | "instance_min_count": 1,
263 | "instance_max_count": 4,
264 | "scaling_rules": [{
265 | "metric_type": "memoryused",
266 | "stat_window_secs": 60,
267 | "breach_duration_secs": 60,
268 | "threshold": 10,
269 | "operator": ">=",
270 | "cool_down_secs": 300,
271 | "adjustment": "+1"
272 | }]
273 | }
274 |
275 |
276 |
277 | For an example that demonstrates defining multiple scaling rules in a single
278 | policy, refer to the sample of a policy file at . The complete list of configurable policy values can be
279 | found at
280 | .
281 |
282 |
283 |
284 | Scaling Types
285 |
286 |
287 | Scheduled Scaling
288 |
289 |
290 | Modifies an application's instance count at a predetermined time. This
291 | option is suitable for workloads with predictable resource usage.
292 |
293 |
294 |
295 |
296 | Dynamic Scaling
297 |
298 |
299 | Modifies an application's instance count based on metrics criteria. This
300 | option is suitable for workloads with dynamic resource usage. The
301 | following metrics are available:
302 |
303 |
304 |
305 |
306 | memoryused
307 |
308 |
309 |
310 |
311 | memoryutil
312 |
313 |
314 |
315 |
316 | cpu
317 |
318 |
319 |
320 |
321 | responsetime
322 |
323 |
324 |
325 |
326 | throughput
327 |
328 |
329 |
330 |
331 | custom metric
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 | See
340 | for additional details.
341 |
342 |
343 |
344 |
345 |
--------------------------------------------------------------------------------
/xml/cap_depl_aks.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Deploying &productname; on &aks-full; (AKS)
13 |
14 |
15 | yes
16 |
17 |
18 | &readmefirst;
19 |
20 | &productname; supports deployment on &aks-full;
21 | (AKS), Microsoft's managed &kube; service. This chapter describes the steps
22 | for preparing Azure for a &productname; deployment, deployed with the default
23 | Azure Standard SKU load balancer (see
24 | ).
25 |
26 |
27 | In &kube; terminology a node used to be a minion, which was the name for a
28 | worker node. Now the correct term is simply node (see
29 | ).
30 | This can be confusing, as computing nodes have traditionally been defined as
31 | any device in a network that has an IP address. In Azure they are called
32 | agent nodes. In this chapter we call them agent nodes or &kube; nodes.
33 |
34 |
35 | Prerequisites
36 |
37 |
38 | The following are required to deploy and use &productname; on AKS:
39 |
40 |
41 |
42 |
43 |
44 | az, the Azure command line client. See
45 |
46 | for more information and installation instructions.
47 |
48 |
49 |
50 |
51 | A &ms; Azure account. For details, refer to
52 | .
53 |
54 |
55 |
56 |
57 | Your Azure account has sufficient quota. The minimal installation described
58 | in this chapter require 24 vCPUs. If your account has insufficient quota,
59 | you can request a quota increase by going to
60 | .
61 |
62 |
63 |
64 |
65 | A SSH key that can be used for access to the nodes of the cluster.
66 |
67 |
68 |
69 |
70 | &cfcli-prereq;
71 | &kubectl-prereq;
72 | &jq-prereq;
73 | &curl-prereq;
74 | &sed-prereq;
75 |
76 |
77 |
78 | Create Resource Group and AKS Instance
79 |
80 |
81 | Log in to your Azure account, which should have the
82 | Contributor role.
83 |
84 |
85 | &prompt.user;az login
86 |
87 |
88 |
89 |
90 |
91 | You can set up an AKS cluster with an automatically generated service
92 | principal. Note that to be be able to create a service principal your user
93 | account must have permissions to register an application with your Azure
94 | Active Directory tenant, and to assign the application to a role in your
95 | subscription. For details, see .
96 |
97 |
98 |
99 | Alternatively, you can specify an existing service principal but the service
100 | principal must have sufficient rights to be able to create resources at the
101 | appropriate level, for example resource group, subscription etc. For more
102 | details please see:
103 |
104 |
105 |
106 |
107 |
108 | Create a service principal:
109 |
110 |
111 |
112 |
113 | Create a role assignment for the service principal, at the subscription or
114 | resource group level:
115 |
116 |
117 |
118 |
119 | Create the cluster with the service principal:
120 |
121 |
122 |
123 |
124 |
125 | Specify the following additional parameters for creating the cluster: node
126 | count, a username for SSH access to the nodes, SSH key, VM type, VM disk size
127 | and optionally, the &kube; version and a nodepool name.
128 |
129 |
130 | &prompt.user;az aks create --resource-group my-resource-group --name cap-aks \
131 | --node-count 3 --admin-username cap-user \
132 | --ssh-key-value /path/to/some_key.pub --node-vm-size Standard_DS4_v2 \
133 | --node-osdisk-size &node_size; --nodepool-name mypool
134 |
135 |
137 |
138 |
139 | For more az aks create options see
140 | .
141 |
142 |
143 |
144 | This takes a few minutes. When it is completed, fetch your
145 | kubectl credentials. The default behavior for az
146 | aks get-credentials is to merge the new credentials with the
147 | existing default configuration, and to set the new credentials as as the
148 | current &kube; context. The context name is your AKS_NAME value. You should
149 | first backup your current configuration, or move it to a different location,
150 | then fetch the new credentials:
151 |
152 |
153 | &prompt.user;az aks get-credentials --resource-group $RG_NAME --name $AKS_NAME
154 | Merged "cap-aks" as current context in /home/&exampleuser_plain;/.kube/config
155 |
156 |
157 | Verify that you can connect to your cluster:
158 |
159 |
160 | &prompt.user;kubectl get nodes
161 |
162 |
163 | When all nodes are in a ready state and all pods are running, proceed to the
164 | next steps.
165 |
166 |
167 |
168 |
169 | &install-helm;
170 |
171 |
172 | &storage-class;
173 |
174 |
175 |
176 | Deployment Configuration
177 |
178 | The following file, &values-filename;, provides a
179 | minimal example deployment configuration.
180 |
181 |
182 | &values-file-changes;
183 |
184 | &supported-domains;
185 |
186 | &example-config;
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 | &certificates;
195 |
196 |
197 |
198 |
199 | &ingress-controller;
200 |
201 |
202 |
203 |
204 | &affinity;
205 |
206 |
207 |
208 |
209 | &high-availability;
210 |
211 |
212 |
213 |
214 | &external-blobstore;
215 |
216 |
217 |
218 |
219 | &external-database;
220 |
221 |
222 |
223 |
224 |
225 | Add the &kube; Charts Repository
226 |
227 |
228 | Download the &suse; &kube; charts repository with &helm;:
229 |
230 |
231 | &prompt.user;helm repo add suse https://kubernetes-charts.suse.com/
232 |
233 |
234 | You may replace the example suse name with any
235 | name. Verify with helm:
236 |
237 |
238 | &prompt.user;helm repo list
239 | NAME URL
240 | stable https://kubernetes-charts.storage.googleapis.com
241 | local http://127.0.0.1:8879/charts
242 | suse https://kubernetes-charts.suse.com/
243 |
244 |
245 |
246 | List your chart names, as you will need these for some operations:
247 |
248 |
249 | &helm-search-suse;
250 |
251 |
252 |
253 | Deploying &productname;
254 |
255 | This section describes how to deploy &productname; with a Azure Standard SKU
256 | load balancer.
257 |
258 |
259 | &kubecf-operator-versions;
260 |
261 |
262 | &deploy-operator;
263 |
264 |
265 |
266 | Deploy &kubecf;
267 |
268 | &deploy-kubecf;
269 |
270 |
271 | Create DNS A records for the public services.
272 |
273 | &dns-mappings;
274 |
275 |
276 |
277 | When all pods are fully ready, verify your deployment. See for more information.
278 |
279 | &cf-auth;
280 |
281 |
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 | &ldap;
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 | Expanding Capacity of a ∩ Deployment on &aks;
299 |
300 |
301 | If the current capacity of your ∩ deployment is insufficient for your
302 | workloads, you can expand the capacity using the procedure in this section.
303 |
304 |
305 |
306 | These instructions assume you have followed the procedure in
307 | and have a running ∩ deployment on
308 | &aks;. The instructions below will use environment variables defined in
309 | .
310 |
311 |
312 |
313 |
314 |
315 | Get the current number of &kube; nodes in the cluster.
316 |
317 | &prompt.user;export OLD_NODE_COUNT=$(kubectl get nodes --output json | jq '.items | length')
318 |
319 |
320 |
321 | Set the number of &kube; nodes the cluster will be expanded to. Replace the
322 | example value with the number of nodes required for your workload.
323 |
324 | &prompt.user;export NEW_NODE_COUNT=5
325 |
326 |
327 |
328 |
329 | Increase the &kube; node count in the cluster.
330 |
331 | &prompt.user;az aks scale --resource-group $RG_NAME --name $AKS_NAME \
332 | --node-count $NEW_NODE_COUNT \
333 | --nodepool-name $NODEPOOL_NAME
334 |
335 |
336 |
337 |
338 | Verify the new nodes are in a Ready state before proceeding.
339 |
340 | &prompt.user;kubectl get nodes
341 |
342 |
343 |
344 | Add or update the following in your
345 | &values-filename; file to increase the number of
346 | diego-cell in your ∩ deployment. Replace the
347 | example value with the number required by your workflow.
348 |
349 | sizing:
350 | diego_cell:
351 | instances: 5
352 |
353 |
354 |
355 |
356 | Perform a helm upgrade to apply the change.
357 |
358 | &prompt.user;helm upgrade kubecf suse/kubecf \
359 | --namespace kubecf \
360 | --values &values-file; \
361 | --version &kubecf_chart;
362 |
363 |
364 |
365 |
366 | Monitor progress of the additional diego-cell pods:
367 |
368 | &prompt.user;watch --color 'kubectl get pods --namespace kubecf'
369 |
370 |
371 |
372 |
373 |
--------------------------------------------------------------------------------
/xml/cap_depl_gke.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Deploying &productname; on &gke-full; (GKE)
13 |
14 |
15 | yes
16 |
17 |
18 | &readmefirst;
19 |
20 | &productname; supports deployment on &gke-full; (GKE). This chapter describes the steps
21 | to prepare a &productname; deployment on GKE using its integrated network load balancers. See
22 |
23 | for more information on GKE.
24 |
25 |
26 | Prerequisites
27 |
28 |
29 | The following are required to deploy and use &productname; on GKE:
30 |
31 |
32 |
33 |
34 |
35 | A Google Cloud Platform (GCP) user account or a service account with the
36 | following IAM roles. If you do not have an account, visit
37 | to create one.
38 |
39 |
40 |
41 |
42 | compute.admin. For details regarding this role, refer to
43 | .
44 |
45 |
46 |
47 |
48 | container.admin. For details regarding this role, refer to
49 | .
50 |
51 |
52 |
53 |
54 | iam.serviceAccountUser. For details regarding this role, refer to
55 | .
56 |
57 |
58 |
59 |
60 |
61 |
62 | Access to a GCP project with the &kube; Engine API enabled. If a
63 | project needs to be created, refer to
64 | .
65 | To enable access to the API, refer to
66 | .
67 |
68 |
69 |
70 |
71 | gcloud, the primary command line interface to Google
72 | Cloud Platform. See
73 | for more
74 | information and installation instructions.
75 |
76 |
77 |
78 |
79 | &cfcli-prereq;
80 | &kubectl-prereq;
81 | &jq-prereq;
82 | &curl-prereq;
83 | &sed-prereq;
84 |
85 |
86 |
87 | Creating a GKE cluster
88 |
89 |
90 | In order to deploy &productname;, create a cluster that:
91 |
92 |
93 |
94 |
95 |
96 | Is a Zonal or Regional type. Do not
97 | use a Alpha cluster.
98 |
99 |
100 |
101 |
102 | Uses Ubuntu as the host operating system. If using the
103 | gcloud CLI, include --image-type=UBUNTU
104 | during the cluster creation.
105 |
106 |
107 |
108 |
109 | Allows access to all Cloud APIs (in order for storage to work correctly).
110 |
114 |
115 |
116 |
117 |
118 | Has at least 3 nodes of machine type n1-standard-4. If using the
119 | gcloud CLI, include --machine-type=n1-standard-4
120 | and --num-nodes=3 during the cluster creation. For details, see
121 | .
122 |
123 |
124 |
125 |
126 | Has at least &node_size; GB local storage per node.
127 |
128 |
129 |
130 |
131 | (Optional) Uses preemptible nodes to keep costs low. For details, see
132 | .
133 |
134 |
135 |
136 |
137 |
138 |
139 | Set a name for your cluster:
140 |
141 | &prompt.user;export CLUSTER_NAME="cap"
142 |
143 |
144 |
145 | Set the zone for your cluster:
146 |
147 | &prompt.user;export CLUSTER_ZONE="us-west1-a"
148 |
149 |
150 |
151 | Set the number of nodes for your cluster:
152 |
153 | &prompt.user;export NODE_COUNT=3
154 |
155 |
156 |
157 | Create the cluster:
158 |
159 | &prompt.user;gcloud container clusters create ${CLUSTER_NAME} \
160 | --image-type=UBUNTU \
161 | --machine-type=n1-standard-4 \
162 | --zone ${CLUSTER_ZONE} \
163 | --num-nodes=$NODE_COUNT \
164 | --no-enable-basic-auth \
165 | --no-issue-client-certificate \
166 | --no-enable-autoupgrade
167 |
168 |
169 |
170 |
171 | Specify the --no-enable-basic-auth and
172 | --no-issue-client-certificate flags so that
173 | kubectl does not use basic or client certificate
174 | authentication, but uses OAuth Bearer Tokens instead. Configure the
175 | flags to suit your desired authentication mechanism.
176 |
177 |
178 |
179 |
180 | Specify --no-enable-autoupgrade to disable
181 | automatic upgrades.
182 |
183 |
184 |
185 |
186 | Disable legacy metadata server endpoints using
187 | --metadata disable-legacy-endpoints=true as a best
188 | practice as indicated in
189 | .
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 | Get kubeconfig File
199 |
200 |
201 | Get the kubeconfig file for your cluster.
202 |
203 |
204 | &prompt.user;gcloud container clusters get-credentials --zone ${CLUSTER_ZONE:?required} ${CLUSTER_NAME:?required} --project example-project
205 |
206 |
207 |
208 |
209 | &install-helm;
210 |
211 |
212 | &storage-class;
213 |
214 |
215 |
216 | Deployment Configuration
217 |
218 | The following file, &values-filename;, provides a
219 | minimal example deployment configuration.
220 |
221 |
222 | &values-file-changes;
223 |
224 | &supported-domains;
225 |
226 | &example-config;
227 |
228 |
229 |
230 |
231 |
232 |
233 | &certificates;
234 |
235 |
236 |
237 |
238 | &ingress-controller;
239 |
240 |
241 |
242 |
243 | &affinity;
244 |
245 |
246 |
247 |
248 | &high-availability;
249 |
250 |
251 |
252 |
253 | &external-blobstore;
254 |
255 |
256 |
257 |
258 | &external-database;
259 |
260 |
261 |
262 |
263 |
264 | Add the &kube; charts repository
265 |
266 |
267 | Download the &suse; &kube; charts repository with &helm;:
268 |
269 |
270 | &prompt.user;helm repo add suse https://kubernetes-charts.suse.com/
271 |
272 |
273 | You may replace the example suse name with any
274 | name. Verify with helm:
275 |
276 |
277 | &prompt.user;helm repo list
278 | NAME URL
279 | stable https://kubernetes-charts.storage.googleapis.com
280 | local http://127.0.0.1:8879/charts
281 | suse https://kubernetes-charts.suse.com/
282 |
283 |
284 |
285 | List your chart names, as you will need these for some operations:
286 |
287 |
288 | &helm-search-suse;
289 |
290 |
291 |
292 | Deploying &productname;
293 |
294 | This section describes how to deploy &productname; on &gke;, and how to
295 | configure your DNS records.
296 |
297 |
298 | &kubecf-operator-versions;
299 |
300 |
301 | &deploy-operator;
302 |
303 |
304 |
305 | Deploy &kubecf;
306 |
307 | &deploy-kubecf;
308 |
309 |
310 | Create DNS A records for the public services.
311 |
312 | &dns-mappings;
313 |
314 |
315 |
316 | When all pods are fully ready, verify your deployment. See for more information.
317 |
318 | &cf-auth;
319 |
320 |
321 |
322 |
323 |
324 |
325 |
326 |
327 |
328 | &ldap;
329 |
330 |
331 |
332 |
333 |
334 |
335 |
336 |
337 | Expanding Capacity of a ∩ Deployment on &gke;
338 |
339 |
340 | If the current capacity of your ∩ deployment is insufficient for your
341 | workloads, you can expand the capacity using the procedure in this section.
342 |
343 |
344 |
345 | These instructions assume you have followed the procedure in
346 | and have a running ∩ deployment on
347 | &aks;. The instructions below will use environment variables defined in
348 | .
349 |
350 |
351 |
352 |
353 |
354 | Get the most recently created node in the cluster.
355 |
356 | &prompt.user;RECENT_VM_NODE=$(gcloud compute instances list --filter=name~${CLUSTER_NAME:?required} --format json | jq --raw-output '[sort_by(.creationTimestamp) | .[].creationTimestamp ] | last | .[0:19] | strptime("%Y-%m-%dT%H:%M:%S") | mktime')
357 |
358 |
359 |
360 |
361 | Increase the &kube; node count in the cluster. Replace the example value
362 | with the number of nodes required for your workload.
363 |
364 | &prompt.user;gcloud container clusters resize $CLUSTER_NAME \
365 | --num-nodes 5
366 |
367 |
368 |
369 |
370 | Verify the new nodes are in a Ready state before proceeding.
371 |
372 | &prompt.user;kubectl get nodes
373 |
374 |
375 |
376 | Add or update the following in your
377 | &values-filename; file to increase the number of
378 | diego-cell in your ∩ deployment. Replace the
379 | example value with the number required by your workflow.
380 |
381 | sizing:
382 | diego_cell:
383 | instances: 5
384 |
385 |
386 |
387 |
388 | Perform a helm upgrade to apply the change.
389 |
390 | &prompt.user;helm upgrade kubecf suse/kubecf \
391 | --namespace kubecf \
392 | --values &values-file; \
393 | --version &kubecf_chart;
394 |
395 |
396 |
397 |
398 | Monitor progress of the additional diego-cell pods:
399 |
400 | &prompt.user;watch --color 'kubectl get pods --namespace kubecf'
401 |
402 |
403 |
404 |
405 |
--------------------------------------------------------------------------------
/xml/cap_admin_logging.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | %entities;
6 | ]>
7 |
11 |
12 | Logging
13 |
14 |
15 | yes
16 |
17 |
18 |
19 | There are two types of logs in a deployment of &productname;, applications
20 | logs and component logs. The following provides a brief overview of each log
21 | type and how to retrieve them for monitoring and debugging use.
22 |
23 |
24 |
25 | &log-types-and-fetch;
26 |
27 |
28 | Logging to an External Syslog Server
29 |
30 |
31 | ∩ supports sending the cluster's log data to external logging services
32 | where additional processing and analysis can be performed.
33 |
34 |
35 |
36 | Configuring ∩
37 |
38 | In your &values-filename; file add the following
39 | configuration values to the env: section. The example
40 | values below are configured for an external ELK stack.
41 |
42 | env:
43 | SCF_LOG_HOST: elk.example.com
44 | SCF_LOG_PORT: 5001
45 | SCF_LOG_PROTOCOL: "tcp"
46 |
47 |
48 |
49 | Example using the ELK Stack
50 |
51 | The ELK stack is an example of an external syslog server where log data can
52 | be sent to for log management. The ELK stack consists of:
53 |
54 |
55 |
56 | Elasticsearch
57 |
58 |
59 | A tool for search and analytics. For more information, refer to
60 | .
61 |
62 |
63 |
64 |
65 | Logstash
66 |
67 |
68 | A tool for data processing. For more information, refer to
69 | .
70 |
71 |
72 |
73 |
74 | Kibana
75 |
76 |
77 |
78 | A tool for data visualization. For more information, refer to
79 | .
80 |
81 |
82 |
83 |
84 |
85 |
86 | Prerequisites
87 |
88 | Java 8 is required by:
89 |
90 |
91 |
92 | Elasticsearch
93 |
94 |
95 |
96 | For more information, refer to
97 | .
98 |
99 |
100 |
101 |
102 | Logstash
103 |
104 |
105 | For more information, refer to
106 | .
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 | Installing and Configuring Elasticsearch
115 |
116 | For methods of installing Elasticsearch, refer to
117 | .
118 |
119 |
120 | After installation, modify the config file
121 | /etc/elasticsearch/elasticsearch.yml to set the
122 | following value.
123 |
124 | network.host: localhost
125 |
126 |
127 |
128 | Installing and Configuring Logstash
129 |
130 | For methods of installing Logstash, refer to
131 | .
132 |
133 |
134 | After installation, create the configuration file
135 | /etc/logstash/conf.d/00-scf.conf. In this example, we
136 | will name it 00-scf.conf. Add the following into the
137 | file. Take note of the port used in the input section. This value will
138 | need to match the value of the SCF_LOG_PORT property in
139 | your &values-filename; file.
140 |
141 | input {
142 | tcp {
143 | port => 5001
144 | }
145 | }
146 | output {
147 | stdout {}
148 | elasticsearch {
149 | hosts => ["localhost:9200"]
150 | index => "scf-%{+YYYY.MM.dd}"
151 | }
152 | }
153 |
154 |
155 | Additional input plug-ins can be found at
156 |
157 | and output plug-ins can be found at
158 | .
159 | For this example, we will demonstrate the flow of data through the stack,
160 | but filter plugins can also be specified to perform processing of the log
161 | data. For more details about filter plug-ins, refer to
162 | .
163 |
164 |
165 |
166 | Installing and Configuring Kibana
167 |
168 | For methods of installing Kibana, refer to
169 | .
170 |
171 |
172 | No configuration changes are required at this point. Refer to
173 |
174 | for additonal properties that can configured through the
175 | kibana.yml file.
176 |
177 |
178 |
179 |
180 |
181 | Log Levels
182 |
183 |
184 | The log level is configured through the
185 | &values-filename; file by using the
186 | LOG_LEVEL property found in the env:
187 | section. The LOG_LEVEL property is mapped to
188 | component-specific levels. Components have differing technology compositions
189 | (for example languages, frameworks) and results in each component
190 | determining for itself what content to provide at each level, which may vary
191 | between components.
192 |
193 |
194 |
195 | The following are the log levels available along with examples of log
196 | entries at the given level.
197 |
198 |
199 |
200 |
201 |
202 | off: disable log messages
203 |
204 |
205 |
206 |
207 | fatal: fatal conditions
208 |
209 |
210 |
211 |
212 | error: error conditions
213 |
214 | <11>1 2018-08-21T17:59:48.321059+00:00 api-group-0 vcap.cloud_controller_ng
215 | - - -
216 | {"timestamp":1534874388.3206334,"message":"Mysql2::Error: MySQL
217 | server has gone away: SELECT count(*) AS `count` FROM `tasks` WHERE
218 | (`state` = 'RUNNING') LIMIT 1","log_level":"error","source":"cc.db","data":
219 | {},"thread_id":47367387197280,"fiber_id":47367404488760,"process_id":3400,"file":"/
220 | var/vcap/packages/cloud_controller_ng/cloud_controller_ng/vendor/bundle/ruby/2.4.0/
221 | gems/sequel-4.49.0/lib/sequel/database/logging.rb","lineno":88,"method":"block in
222 | log_each"}
223 |
224 |
225 |
226 |
227 | warn: warning conditions
228 |
229 | <12>1 2018-08-21T18:49:37.651186+00:00 api-group-0 vcap.cloud_controller_ng
230 | - - -
231 | {"timestamp":1534877377.6507676,"message":"Invalid bearer token:
232 | #<CF::UAA::InvalidSignature: Signature verification failed> [\"/var/vcap/
233 | packages/cloud_controller_ng/cloud_controller_ng/vendor/bundle/ruby/2.4.0/gems/
234 | cf-uaa-lib-3.14.3/lib/uaa/token_coder.rb:118:in `decode'\", \"/var/vcap/packages/
235 | cloud_controller_ng/cloud_controller_ng/vendor/bundle/ruby/2.4.0/gems/cf-uaa-
236 | lib-3.14.3/lib/uaa/token_coder.rb:212:in `decode_at_reference_time'\", \"/var/
237 | vcap/packages-src/8d7a6cd54ff4180c0094fc9aefbe3e5f43169e13/cloud_controller_ng/
238 | lib/cloud_controller/uaa/uaa_token_decoder.rb:70:in `decode_token_with_key'\",
239 | \"/var/vcap/packages-src/8d7a6cd54ff4180c0094fc9aefbe3e5f43169e13/
240 | cloud_controller_ng/lib/cloud_controller/uaa/uaa_token_decoder.rb:58:in
241 | `block in decode_token_with_asymmetric_key'\", \"/var/vcap/packages-
242 | src/8d7a6cd54ff4180c0094fc9aefbe3e5f43169e13/cloud_controller_ng/
243 | lib/cloud_controller/uaa/uaa_token_decoder.rb:56:in `each'\", \"/
244 | var/vcap/packages-src/8d7a6cd54ff4180c0094fc9aefbe3e5f43169e13/
245 | cloud_controller_ng/lib/cloud_controller/uaa/uaa_token_decoder.rb:56:in
246 | `decode_token_with_asymmetric_key'\", \"/var/vcap/packages-
247 | src/8d7a6cd54ff4180c0094fc9aefbe3e5f43169e13/cloud_controller_ng/lib/
248 | cloud_controller/uaa/uaa_token_decoder.rb:29:in `decode_token'\", \"/var/vcap/
249 | packages-src/8d7a6cd54ff4180c0094fc9aefbe3e5f43169e13/cloud_controller_ng/lib/
250 | cloud_controller/security/security_context_configurer.rb:22:in `decode_token'\", \"/
251 | var/vcap/packages-src/8d7a6cd54ff4180c0094fc9aefbe3e5f43169e13/cloud_controller_ng/
252 | lib/cloud_controller/security/security_context_configurer.rb:10:in `configure'\",
253 | \"/var/vcap/packages/cloud_controller_ng/cloud_controller_ng/middleware/
254 | security_context_setter.rb:12:in `call'\", \"/var/vcap/packages/cloud_controller_ng/
255 | cloud_controller_ng/middleware/vcap_request_id.rb:15:in `call'\", \"/var/vcap/
256 | packages/cloud_controller_ng/cloud_controller_ng/middleware/cors.rb:49:in
257 | `call_app'\", \"/var/vcap/packages/cloud_controller_ng/cloud_controller_ng/
258 | middleware/cors.rb:14:in `call'\", \"/var/vcap/packages/cloud_controller_ng/
259 | cloud_controller_ng/middleware/request_metrics.rb:12:in `call'\", \"/
260 | var/vcap/packages/cloud_controller_ng/cloud_controller_ng/vendor/bundle/
261 | ruby/2.4.0/gems/rack-1.6.9/lib/rack/builder.rb:153:in `call'\", \"/var/vcap/
262 | packages/cloud_controller_ng/cloud_controller_ng/vendor/bundle/ruby/2.4.0/
263 | gems/thin-1.7.0/lib/thin/connection.rb:86:in `block in pre_process'\", \"/
264 | var/vcap/packages/cloud_controller_ng/cloud_controller_ng/vendor/bundle/
265 | ruby/2.4.0/gems/thin-1.7.0/lib/thin/connection.rb:84:in `catch'\", \"/var/
266 | vcap/packages/cloud_controller_ng/cloud_controller_ng/vendor/bundle/ruby/2.4.0/
267 | gems/thin-1.7.0/lib/thin/connection.rb:84:in `pre_process'\", \"/var/vcap/
268 | packages/cloud_controller_ng/cloud_controller_ng/vendor/bundle/ruby/2.4.0/
269 | gems/thin-1.7.0/lib/thin/connection.rb:50:in `block in process'\", \"/
270 | var/vcap/packages/cloud_controller_ng/cloud_controller_ng/vendor/bundle/
271 | ruby/2.4.0/gems/eventmachine-1.0.9.1/lib/eventmachine.rb:1067:in `block in
272 | spawn_threadpool'\"]","log_level":"warn","source":"cc.uaa_token_decoder","data":
273 | {"request_guid":"f3e25c45-a94a-4748-7ccf-5a72600fbb17::774bdb79-5d6a-4ccb-a9b8-
274 | f4022afa3bdd"},"thread_id":47339751566100,"fiber_id":47339769104800,"process_id":3245,"file":"/
275 | var/vcap/packages-src/8d7a6cd54ff4180c0094fc9aefbe3e5f43169e13/cloud_controller_ng/
276 | lib/cloud_controller/uaa/uaa_token_decoder.rb","lineno":35,"method":"rescue in
277 | decode_token"}
278 |
279 |
280 |
281 |
282 | info: informational messages
283 |
284 | <14>1 2018-08-21T22:42:54.324023+00:00 api-group-0 vcap.cloud_controller_ng
285 | - - -
286 | {"timestamp":1534891374.3237739,"message":"Started GET
287 | \"/v2/info\" for user: , ip: 127.0.0.1 with vcap-request-id:
288 | 45e00b66-e0b7-4b10-b1e0-2657f43284e7 at 2018-08-21 22:42:54
289 | UTC","log_level":"info","source":"cc.api","data":{"request_guid":"45e00b66-
290 | e0b7-4b10-
291 | b1e0-2657f43284e7"},"thread_id":47420077354840,"fiber_id":47420124921300,"process_id":3200,"file":
292 | var/vcap/packages/cloud_controller_ng/cloud_controller_ng/middleware/
293 | request_logs.rb","lineno":12,"method":"call"}
294 |
295 |
296 |
297 |
298 | debug: debugging messages
299 |
300 | <15>1 2018-08-21T22:45:15.146838+00:00 api-group-0 vcap.cloud_controller_ng
301 | - - -
302 | {"timestamp":1534891515.1463814,"message":"dispatch
303 | VCAP::CloudController::InfoController get /v2/
304 | info","log_level":"debug","source":"cc.api","data":{"request_guid":"b228ef6d-
305 | af5e-4808-
306 | af0b-791a37f51154"},"thread_id":47420125585200,"fiber_id":47420098783620,"process_id":3200,"file":
307 | var/vcap/packages-src/8d7a6cd54ff4180c0094fc9aefbe3e5f43169e13/cloud_controller_ng/
308 | lib/cloud_controller/rest_controller/routes.rb","lineno":12,"method":"block in
309 | define_route"}
310 |
311 |
312 |
313 |
314 | debug1: lower-level debugging messages
315 |
316 |
317 |
318 |
319 | debug2: lowest-level debugging message
320 |
321 | <15>1 2018-08-21T22:46:02.173445+00:00 api-group-0 vcap.cloud_controller_ng - - -
322 | {"timestamp":1534891562.1731355,"message":"(0.006130s) SELECT * FROM `delayed_jobs`
323 | WHERE ((((`run_at` <= '2018-08-21 22:46:02') AND (`locked_at` IS NULL)) OR
324 | (`locked_at` < '2018-08-21 18:46:02') OR (`locked_by` = 'cc_api_worker.api.0.1'))
325 | AND (`failed_at` IS NULL) AND (`queue` IN ('cc-api-0'))) ORDER BY `priority`
326 | ASC, `run_at` ASC LIMIT 5","log_level":"debug2","source":"cc.background","data":
327 | {},"thread_id":47194852110160,"fiber_id":47194886034680,"process_id":3296,"file":"/
328 | var/vcap/packages/cloud_controller_ng/cloud_controller_ng/vendor/bundle/ruby/2.4.0/
329 | gems/sequel-4.49.0/lib/sequel/database/logging.rb","lineno":88,"method":"block in
330 | log_each"}
331 |
332 |
333 |
334 |
335 |
336 |
--------------------------------------------------------------------------------
/xml/entity-decl.ent:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 | &values-file;">
24 |
25 | Deployment, Administration, and User Guide">
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 | &yast; Control Center">
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
99 |
100 |
101 |
102 | &exampleuser_plain;">
103 |
104 |
105 |
106 | &exampleuserII_plain;">
107 |
108 |
109 |
110 | exampleuserIII_plain">
111 |
112 |
113 | users">
114 |
115 |
116 | root">
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 | Open vSwitch">
136 |
137 |
138 |
139 | systemd">
140 | cron">
141 | oprofile">
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 | AppArmor">
152 | AppArmor®">
153 | sudo">
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 | lvmcache">
199 | bcache">
200 | libvirt">
201 | libvirtd">
202 |
203 |
204 | qemu-system-ARCH">
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 | Deployment Guide">
224 | Installation Quick Start">
225 | Administration Guide">
226 | Reference">
227 | Start-Up">
228 |
229 |
230 |
231 |
232 |
233 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 | MAC">
257 | MAC">
258 | MAC">
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 | F2">
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
283 |
284 |
285 |
286 | root # ">
287 | &exampleuser_plain; > ">
288 | &exampleuserII_plain; > ">
289 |
290 |
291 |
292 |
293 |
294 |
295 |
310 |
311 |
312 | %network-entities;
313 |
314 |
315 | %repeat-entities;
316 |
317 |
318 |
319 |
320 |
321 |
322 |
323 | %dbcent;
324 |
325 |
326 |
327 |
--------------------------------------------------------------------------------