├── .devcontainer ├── Dockerfile ├── config │ └── zshrc └── devcontainer.json ├── .gitignore ├── .readthedocs.yaml ├── LICENSE ├── Makefile ├── README.md ├── lab ├── kubeadm-3-nodes-china │ ├── Vagrantfile │ ├── nfs.sh │ └── setup.sh ├── kubeadm-3-nodes │ ├── Vagrantfile │ ├── nfs.sh │ └── setup.sh ├── nfs-server │ ├── Vagrantfile │ └── setup.sh ├── pod │ └── nginx.yml └── v1.23 │ ├── Vagrantfile │ └── setup.sh ├── make.bat ├── requirements.txt └── source ├── _code ├── k8s-install │ ├── install-cn.sh │ └── install.sh └── network │ ├── container-to-container.yml │ └── pod-to-pod-single-node.yml ├── _static ├── about.jpg ├── controller-manager-on-master.png ├── css │ └── custom.css ├── exam │ └── kubectl-cheat-sheet.PNG ├── init-containers.png ├── introduction │ ├── container-orchestration.PNG │ └── kubernetes_architecture.jpg ├── k8s-core-concept │ ├── deployment.svg │ └── pod.png ├── kube-alias.jpeg ├── logging │ └── fluentd.png ├── maintaining │ ├── k8s-ha1.jpg │ └── k8s-ha2.jpg ├── network │ ├── ingress-fanout.png │ ├── ingress-overview.svg │ ├── ingress-virtual-host.png │ ├── kubernetes-ingress.png │ ├── networking-overview.png │ ├── pod-network.PNG │ └── service.gif ├── pdf │ └── k8s-network.pdf ├── pod-phase.png ├── wechat-tips.jpg └── wechat.jpg ├── about.rst ├── api-server-pod.rst ├── api-server-pod ├── api-server.rst ├── init-container.rst ├── kubectl-proxy.rst ├── namespace.rst ├── pod-lifecycle.rst ├── pod.rst └── static-pod.rst ├── conf.py ├── controller-deployment.rst ├── controller-deployment ├── controller-manager.rst ├── daemonset.rst ├── deployment.rst ├── job.rst └── labels.rst ├── exam.rst ├── index.rst ├── introduction.rst ├── k8s-install.rst ├── k8s-install ├── kubeadm-cn.rst ├── kubeadm.rst ├── mac-arm.rst ├── minikube.rst ├── vagrant.rst └── verify.rst ├── logging-monitoring.rst ├── logging ├── event.rst ├── jsonpath.rst ├── logging.rst └── monitoring.rst ├── maintaining.rst ├── maintaining ├── HA.rst ├── cert-renew.rst ├── etcd.rst └── upgrade.rst ├── network.rst ├── network ├── cluster-dns.rst ├── ingress.rst ├── pod-network.rst ├── resource.rst ├── service-auto.rst └── service.rst ├── scheduling.rst ├── scheduling ├── affinity.rst ├── cordoning.rst ├── manual-pod.rst ├── node-selector.rst └── taints.rst ├── security.rst ├── security ├── RBAC.rst ├── kubeconfig.rst └── service_account.rst ├── storage ├── configmap.rst ├── env.rst ├── nfs.rst ├── pv-pvc.rst ├── secrets.rst └── volume.rst ├── stroage.rst ├── troubleshooting.rst └── troubleshooting ├── app.rst ├── control-plane.rst ├── nodes.rst └── tools.rst /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-alpine 2 | 3 | ENV TZ Europe/Amsterdam 4 | 5 | ARG USERNAME=devcontainer 6 | ARG USER_UID=1000 7 | ARG USER_GID=$USER_UID 8 | 9 | # Install git, process tools, lsb-release (common in install instructions for CLIs) and curl 10 | RUN apk --no-cache update && apk --no-cache upgrade && \ 11 | apk add --no-cache gcc musl-dev linux-headers libffi-dev bash zsh git curl sudo build-base 12 | 13 | RUN pip install --no-cache-dir --upgrade pip && \ 14 | pip install --no-cache-dir wheel pre-commit 15 | 16 | # change default shell from bash to zsh 17 | RUN sed -i -e "s/bin\/ash/bin\/zsh/" /etc/passwd 18 | 19 | # Create the user 20 | RUN addgroup -g $USER_GID $USERNAME \ 21 | && adduser -D -u $USER_UID -G $USERNAME -s /bin/zsh $USERNAME 22 | # add user to sudoer 23 | RUN echo "$USERNAME ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers 24 | 25 | # change user 26 | USER $USERNAME 27 | 28 | # install oh-my-zsh 29 | RUN sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" 30 | # install zsh plugins auto suggestions and syntax highlighting 31 | RUN git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions 32 | 33 | COPY config/zshrc /home/$USERNAME/.zshrc 34 | -------------------------------------------------------------------------------- /.devcontainer/config/zshrc: -------------------------------------------------------------------------------- 1 | # If you come from bash you might have to change your $PATH. 2 | # export PATH=$HOME/bin:/usr/local/bin:$PATH 3 | 4 | # Path to your oh-my-zsh installation. 5 | export ZSH="$HOME/.oh-my-zsh" 6 | 7 | # Set name of the theme to load --- if set to "random", it will 8 | # load a random theme each time oh-my-zsh is loaded, in which case, 9 | # to know which specific one was loaded, run: echo $RANDOM_THEME 10 | # See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes 11 | ZSH_THEME="robbyrussell" 12 | 13 | # Set list of themes to pick from when loading at random 14 | # Setting this variable when ZSH_THEME=random will cause zsh to load 15 | # a theme from this variable instead of looking in $ZSH/themes/ 16 | # If set to an empty array, this variable will have no effect. 17 | # ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" ) 18 | 19 | # Uncomment the following line to use case-sensitive completion. 20 | # CASE_SENSITIVE="true" 21 | 22 | # Uncomment the following line to use hyphen-insensitive completion. 23 | # Case-sensitive completion must be off. _ and - will be interchangeable. 24 | # HYPHEN_INSENSITIVE="true" 25 | 26 | # Uncomment one of the following lines to change the auto-update behavior 27 | # zstyle ':omz:update' mode disabled # disable automatic updates 28 | # zstyle ':omz:update' mode auto # update automatically without asking 29 | # zstyle ':omz:update' mode reminder # just remind me to update when it's time 30 | 31 | # Uncomment the following line to change how often to auto-update (in days). 32 | # zstyle ':omz:update' frequency 13 33 | 34 | # Uncomment the following line if pasting URLs and other text is messed up. 35 | # DISABLE_MAGIC_FUNCTIONS="true" 36 | 37 | # Uncomment the following line to disable colors in ls. 38 | # DISABLE_LS_COLORS="true" 39 | 40 | # Uncomment the following line to disable auto-setting terminal title. 41 | # DISABLE_AUTO_TITLE="true" 42 | 43 | # Uncomment the following line to enable command auto-correction. 44 | # ENABLE_CORRECTION="true" 45 | 46 | # Uncomment the following line to display red dots whilst waiting for completion. 47 | # You can also set it to another string to have that shown instead of the default red dots. 48 | # e.g. COMPLETION_WAITING_DOTS="%F{yellow}waiting...%f" 49 | # Caution: this setting can cause issues with multiline prompts in zsh < 5.7.1 (see #5765) 50 | # COMPLETION_WAITING_DOTS="true" 51 | 52 | # Uncomment the following line if you want to disable marking untracked files 53 | # under VCS as dirty. This makes repository status check for large repositories 54 | # much, much faster. 55 | # DISABLE_UNTRACKED_FILES_DIRTY="true" 56 | 57 | # Uncomment the following line if you want to change the command execution time 58 | # stamp shown in the history command output. 59 | # You can set one of the optional three formats: 60 | # "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd" 61 | # or set a custom format using the strftime function format specifications, 62 | # see 'man strftime' for details. 63 | # HIST_STAMPS="mm/dd/yyyy" 64 | 65 | # Would you like to use another custom folder than $ZSH/custom? 66 | # ZSH_CUSTOM=/path/to/new-custom-folder 67 | 68 | # Which plugins would you like to load? 69 | # Standard plugins can be found in $ZSH/plugins/ 70 | # Custom plugins may be added to $ZSH_CUSTOM/plugins/ 71 | # Example format: plugins=(rails git textmate ruby lighthouse) 72 | # Add wisely, as too many plugins slow down shell startup. 73 | plugins=( 74 | git 75 | zsh-autosuggestions 76 | ) 77 | 78 | ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE='fg=5' 79 | 80 | 81 | source $ZSH/oh-my-zsh.sh 82 | 83 | # User configuration 84 | 85 | # export MANPATH="/usr/local/man:$MANPATH" 86 | 87 | # You may need to manually set your language environment 88 | # export LANG=en_US.UTF-8 89 | 90 | # Preferred editor for local and remote sessions 91 | # if [[ -n $SSH_CONNECTION ]]; then 92 | # export EDITOR='vim' 93 | # else 94 | # export EDITOR='mvim' 95 | # fi 96 | 97 | # Compilation flags 98 | # export ARCHFLAGS="-arch x86_64" 99 | 100 | # Set personal aliases, overriding those provided by oh-my-zsh libs, 101 | # plugins, and themes. Aliases can be placed here, though oh-my-zsh 102 | # users are encouraged to define aliases within the ZSH_CUSTOM folder. 103 | # For a full list of active aliases, run `alias`. 104 | # 105 | # Example aliases 106 | # alias zshconfig="mate ~/.zshrc" 107 | # alias ohmyzsh="mate ~/.oh-my-zsh" 108 | export PATH="${PATH}:/home/devcontainer/.local/bin" 109 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Python DevContainer", 3 | "dockerFile": "Dockerfile", 4 | "customizations": { 5 | "vscode": { 6 | "settings": { 7 | "editor.tabSize": 2, 8 | "files.trimTrailingWhitespace": true, 9 | "workbench.colorTheme": "Aura Dark", 10 | "workbench.iconTheme": "eq-material-theme-icons-darker" 11 | }, 12 | "extensions": [ 13 | "ms-python.python", 14 | "ms-python.vscode-pylance", 15 | "DaltonMenezes.aura-theme", 16 | "equinusocio.vsc-material-theme-icons", 17 | "GitHub.copilot", 18 | "ms-vscode.cpptools-extension-pack" 19 | ] 20 | } 21 | }, 22 | "remoteUser": "devcontainer", 23 | "postCreateCommand": "zsh -i -c 'pre-commit install && pip install -r requirements.txt'" 24 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # vagrant 132 | .vagrant 133 | .vscode 134 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.10" 13 | 14 | # Build documentation in the docs/ directory with Sphinx 15 | sphinx: 16 | configuration: source/conf.py 17 | 18 | # We recommend specifying your dependencies to enable reproducible builds: 19 | # https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 20 | python: 21 | install: 22 | - requirements: requirements.txt -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # learn-k8s-from-scratch 2 | 3 | [![Documentation Status](https://readthedocs.org/projects/learn-k8s-from-scratch/badge/?version=latest)](https://learn-k8s-from-scratch.readthedocs.io/en/latest/?badge=latest) 4 | 5 | 如果的大家发现文档中有问题或者错误,欢迎大家提出PR或者issue 6 | -------------------------------------------------------------------------------- /lab/kubeadm-3-nodes-china/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | # 4 | Vagrant.require_version ">= 1.6.0" 5 | 6 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 7 | # configures the configuration version (we support older styles for 8 | # backwards compatibility). Please don't change it unless you know what 9 | # you're doing. 10 | 11 | 12 | boxes = [ 13 | { 14 | :name => "k8s-master", 15 | :eth1 => "192.168.56.10", 16 | :mem => "4096", 17 | :cpu => "2" 18 | }, 19 | { 20 | :name => "k8s-worker1", 21 | :eth1 => "192.168.56.11", 22 | :mem => "2048", 23 | :cpu => "2" 24 | }, 25 | { 26 | :name => "k8s-worker2", 27 | :eth1 => "192.168.56.12", 28 | :mem => "2048", 29 | :cpu => "2" 30 | } 31 | ] 32 | 33 | Vagrant.configure(2) do |config| 34 | 35 | # config.vm.box = "ubuntu/jammy64" 36 | config.vm.box = "ubuntu/focal64" #ubuntu 20.04 37 | boxes.each do |opts| 38 | config.vm.define opts[:name] do |config| 39 | config.vm.hostname = opts[:name] 40 | 41 | config.vm.provider "vmware_fusion" do |v| 42 | v.vmx["memsize"] = opts[:mem] 43 | v.vmx["numvcpus"] = opts[:cpu] 44 | end 45 | 46 | config.vm.provider "virtualbox" do |v| 47 | v.customize ["modifyvm", :id, "--memory", opts[:mem]] 48 | v.customize ["modifyvm", :id, "--cpus", opts[:cpu]] 49 | end 50 | 51 | config.vm.network :private_network, ip: opts[:eth1] 52 | end 53 | end 54 | config.vm.provision "shell", privileged: true, path: "./setup.sh" 55 | end 56 | -------------------------------------------------------------------------------- /lab/kubeadm-3-nodes-china/nfs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # install NFS server and create directory for our exports 4 | 5 | sudo apt-get install -y nfs-kernel-server 6 | sudo mkdir /export/volumes 7 | sudo mkdir /export/volumes/pod 8 | 9 | # config NFS export 10 | 11 | sudo bash -c 'echo "/export/volumes *(rw,no_root_squash,no_subtree_check)" > /etc/exports' 12 | cat /etc/exports 13 | sudo systemctl restart nfs-kernel-server.service 14 | 15 | -------------------------------------------------------------------------------- /lab/kubeadm-3-nodes-china/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # setup timezone 4 | echo "[TASK 0] Set timezone" 5 | timedatectl set-timezone Asia/Shanghai 6 | apt-get install -y ntpdate >/dev/null 2>&1 7 | ntpdate ntp.aliyun.com 8 | 9 | 10 | echo "[TASK 1] Disable and turn off SWAP" 11 | sed -i '/swap/d' /etc/fstab 12 | swapoff -a 13 | 14 | echo "[TASK 2] Stop and Disable firewall" 15 | systemctl disable --now ufw >/dev/null 2>&1 16 | 17 | echo "[TASK 3] Enable and Load Kernel modules" 18 | cat >>/etc/modules-load.d/containerd.conf <>/etc/sysctl.d/kubernetes.conf </dev/null 2>&1 32 | 33 | echo "[TASK 5] Install containerd runtime" 34 | mkdir -p /etc/apt/keyrings 35 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg 36 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ 37 | $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 38 | apt -qq update >/dev/null 2>&1 39 | apt install -qq -y containerd.io >/dev/null 2>&1 40 | containerd config default >/etc/containerd/config.toml 41 | str1="registry.k8s.io/pause:3.8" 42 | str2="registry.aliyuncs.com/google_containers/pause:3.9" 43 | sed -i "/sandbox_image/ s%${str1}%${str2}%g" /etc/containerd/config.toml 44 | sed -i '/SystemdCgroup/ s/false/true/g' /etc/containerd/config.toml 45 | systemctl restart containerd 46 | systemctl enable containerd > /dev/null 2>&1 47 | 48 | 49 | echo "[TASK 6] Add apt repo for kubernetes" 50 | curl -fsSL https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add > /dev/null 2>&1 51 | echo "deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list > /dev/null 2>&1 52 | apt-get update >/dev/null 2>&1 53 | 54 | echo "[TASK 7] Install Kubernetes components (kubeadm, kubelet and kubectl)" 55 | apt install -qq -y kubeadm=1.28.0-00 kubelet=1.28.0-00 kubectl=1.28.0-00 >/dev/null 2>&1 56 | -------------------------------------------------------------------------------- /lab/kubeadm-3-nodes/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | # 4 | Vagrant.require_version ">= 1.6.0" 5 | 6 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 7 | # configures the configuration version (we support older styles for 8 | # backwards compatibility). Please don't change it unless you know what 9 | # you're doing. 10 | 11 | 12 | boxes = [ 13 | { 14 | :name => "k8s-master", 15 | :eth1 => "192.168.56.10", 16 | :mem => "4096", 17 | :cpu => "2" 18 | }, 19 | { 20 | :name => "k8s-worker1", 21 | :eth1 => "192.168.56.11", 22 | :mem => "2048", 23 | :cpu => "2" 24 | }, 25 | { 26 | :name => "k8s-worker2", 27 | :eth1 => "192.168.56.12", 28 | :mem => "2048", 29 | :cpu => "2" 30 | } 31 | ] 32 | 33 | Vagrant.configure(2) do |config| 34 | 35 | # config.vm.box = "ubuntu/jammy64" 36 | config.vm.box = "ubuntu/focal64" #ubuntu 20.04 37 | boxes.each do |opts| 38 | config.vm.define opts[:name] do |config| 39 | config.vm.hostname = opts[:name] 40 | 41 | config.vm.provider "vmware_fusion" do |v| 42 | v.vmx["memsize"] = opts[:mem] 43 | v.vmx["numvcpus"] = opts[:cpu] 44 | end 45 | 46 | config.vm.provider "virtualbox" do |v| 47 | v.customize ["modifyvm", :id, "--memory", opts[:mem]] 48 | v.customize ["modifyvm", :id, "--cpus", opts[:cpu]] 49 | end 50 | 51 | config.vm.network :private_network, ip: opts[:eth1] 52 | end 53 | end 54 | config.vm.provision "shell", privileged: true, path: "./setup.sh" 55 | end 56 | -------------------------------------------------------------------------------- /lab/kubeadm-3-nodes/nfs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # install NFS server and create directory for our exports 4 | 5 | sudo apt-get install -y nfs-kernel-server 6 | sudo mkdir /export/volumes 7 | sudo mkdir /export/volumes/pod 8 | 9 | # config NFS export 10 | 11 | sudo bash -c 'echo "/export/volumes *(rw,no_root_squash,no_subtree_check)" > /etc/exports' 12 | cat /etc/exports 13 | sudo systemctl restart nfs-kernel-server.service 14 | 15 | -------------------------------------------------------------------------------- /lab/kubeadm-3-nodes/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "[TASK 1] Disable and turn off SWAP" 4 | sed -i '/swap/d' /etc/fstab 5 | swapoff -a 6 | 7 | echo "[TASK 2] Stop and Disable firewall" 8 | systemctl disable --now ufw >/dev/null 2>&1 9 | 10 | echo "[TASK 3] Enable and Load Kernel modules" 11 | cat >>/etc/modules-load.d/containerd.conf<>/etc/sysctl.d/kubernetes.conf</dev/null 2>&1 25 | 26 | echo "[TASK 5] Install containerd runtime" 27 | mkdir -p /etc/apt/keyrings 28 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg 29 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ 30 | $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 31 | apt -qq update >/dev/null 2>&1 32 | apt install -qq -y containerd.io >/dev/null 2>&1 33 | containerd config default >/etc/containerd/config.toml 34 | sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml 35 | systemctl restart containerd 36 | systemctl enable containerd >/dev/null 2>&1 37 | 38 | echo "[TASK 6] Add apt repo for kubernetes" 39 | apt-get install -y apt-transport-https ca-certificates curl gpg >/dev/null 2>&1 40 | curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg 41 | echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list > /dev/null 42 | 43 | echo "[TASK 7] Install Kubernetes components (kubeadm, kubelet and kubectl)" 44 | apt -qq update >/dev/null 2>&1 45 | apt install -qq -y kubeadm=1.29.2-1.1 kubelet=1.29.2-1.1 kubectl=1.29.2-1.1 >/dev/null 2>&1 -------------------------------------------------------------------------------- /lab/nfs-server/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | # 4 | Vagrant.require_version ">= 1.6.0" 5 | 6 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 7 | # configures the configuration version (we support older styles for 8 | # backwards compatibility). Please don't change it unless you know what 9 | # you're doing. 10 | 11 | 12 | boxes = [ 13 | { 14 | :name => "nfs-server", 15 | :eth1 => "192.168.56.20", 16 | :mem => "2048", 17 | :cpu => "2" 18 | } 19 | ] 20 | 21 | Vagrant.configure(2) do |config| 22 | 23 | # config.vm.box = "ubuntu/jammy64" 24 | config.vm.box = "ubuntu/focal64" #ubuntu 20.04 25 | boxes.each do |opts| 26 | config.vm.define opts[:name] do |config| 27 | config.vm.hostname = opts[:name] 28 | 29 | config.vm.provider "vmware_fusion" do |v| 30 | v.vmx["memsize"] = opts[:mem] 31 | v.vmx["numvcpus"] = opts[:cpu] 32 | end 33 | 34 | config.vm.provider "virtualbox" do |v| 35 | v.customize ["modifyvm", :id, "--memory", opts[:mem]] 36 | v.customize ["modifyvm", :id, "--cpus", opts[:cpu]] 37 | end 38 | 39 | config.vm.network :private_network, ip: opts[:eth1] 40 | end 41 | end 42 | config.vm.provision "shell", privileged: true, path: "./setup.sh" 43 | end 44 | -------------------------------------------------------------------------------- /lab/nfs-server/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # install NFS server and create directory for our exports 4 | 5 | sudo apt-get install -y nfs-kernel-server 6 | sudo mkdir -p /export/volumes 7 | sudo mkdir -p /export/volumes/pod 8 | 9 | # config NFS export 10 | 11 | sudo bash -c 'echo "/export/volumes *(rw,no_root_squash,no_subtree_check)" > /etc/exports' 12 | cat /etc/exports 13 | sudo systemctl restart nfs-kernel-server.service 14 | -------------------------------------------------------------------------------- /lab/pod/nginx.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myapp-pod 5 | 6 | spec: 7 | containers: 8 | - image: nginx:latest -------------------------------------------------------------------------------- /lab/v1.23/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | # 4 | Vagrant.require_version ">= 1.6.0" 5 | 6 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 7 | # configures the configuration version (we support older styles for 8 | # backwards compatibility). Please don't change it unless you know what 9 | # you're doing. 10 | 11 | 12 | boxes = [ 13 | { 14 | :name => "k8s-master", 15 | :eth1 => "192.168.56.10", 16 | :mem => "4096", 17 | :cpu => "2" 18 | }, 19 | { 20 | :name => "k8s-worker1", 21 | :eth1 => "192.168.56.11", 22 | :mem => "2048", 23 | :cpu => "2" 24 | }, 25 | { 26 | :name => "k8s-worker2", 27 | :eth1 => "192.168.56.12", 28 | :mem => "2048", 29 | :cpu => "2" 30 | } 31 | ] 32 | 33 | Vagrant.configure(2) do |config| 34 | 35 | # config.vm.box = "ubuntu/jammy64" 36 | config.vm.box = "ubuntu/focal64" #ubuntu 20.04 37 | boxes.each do |opts| 38 | config.vm.define opts[:name] do |config| 39 | config.vm.hostname = opts[:name] 40 | 41 | config.vm.provider "vmware_fusion" do |v| 42 | v.vmx["memsize"] = opts[:mem] 43 | v.vmx["numvcpus"] = opts[:cpu] 44 | end 45 | 46 | config.vm.provider "virtualbox" do |v| 47 | v.customize ["modifyvm", :id, "--memory", opts[:mem]] 48 | v.customize ["modifyvm", :id, "--cpus", opts[:cpu]] 49 | end 50 | 51 | config.vm.network :private_network, ip: opts[:eth1] 52 | end 53 | end 54 | config.vm.provision "shell", privileged: true, path: "./setup.sh" 55 | end 56 | -------------------------------------------------------------------------------- /lab/v1.23/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "[TASK 1] Disable and turn off SWAP" 4 | sed -i '/swap/d' /etc/fstab 5 | swapoff -a 6 | 7 | echo "[TASK 2] Stop and Disable firewall" 8 | systemctl disable --now ufw >/dev/null 2>&1 9 | 10 | echo "[TASK 3] Enable and Load Kernel modules" 11 | cat >>/etc/modules-load.d/containerd.conf <>/etc/sysctl.d/kubernetes.conf </dev/null 2>&1 25 | 26 | echo "[TASK 5] Install containerd runtime" 27 | apt update -qq >/dev/null 2>&1 28 | apt install -qq -y containerd apt-transport-https >/dev/null 2>&1 29 | apt install -qq -y nfs-common >/dev/null 2>&1 30 | mkdir /etc/containerd 31 | containerd config default >/etc/containerd/config.toml 32 | systemctl restart containerd 33 | systemctl enable containerd >/dev/null 2>&1 34 | 35 | echo "[TASK 6] Add apt repo for kubernetes" 36 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - >/dev/null 2>&1 37 | apt-add-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main" >/dev/null 2>&1 38 | 39 | echo "[TASK 7] Install Kubernetes components (kubeadm, kubelet and kubectl)" 40 | apt install -qq -y kubeadm=1.23.8-00 kubelet=1.23.8-00 kubectl=1.23.8-00 >/dev/null 2>&1 41 | 42 | #https://blog.sighup.io/how-to-run-kubernetes-without-docker/ 43 | -------------------------------------------------------------------------------- /make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # furo==2022.9.15 2 | # pytz==2021.1 3 | # PyYAML 4 | # requests 5 | # Sphinx 6 | # sphinx-basic-ng==0.0.1a12 7 | # sphinx-book-theme==0.3.3 8 | # sphinx-contributors==0.2.6 9 | # sphinx-copybutton==0.5.0 10 | # sphinx-press-theme==0.7.3 11 | # sphinx-rtd-theme==0.5.2 12 | # sphinxcontrib-applehelp==1.0.2 13 | # sphinxcontrib-devhelp==1.0.2 14 | # sphinxcontrib-htmlhelp==1.0.3 15 | # sphinxcontrib-jsmath==1.0.1 16 | # sphinxcontrib-qthelp==1.0.3 17 | # sphinxcontrib-serializinghtml==1.1.4 18 | # sphinxemoji 19 | # urllib3 20 | # git+https://github.com/SuperKogito/sphinxcontrib-pdfembed 21 | Sphinx 22 | sphinxemoji 23 | sphinx-copybutton 24 | sphinx-contributors 25 | git+https://github.com/SuperKogito/sphinxcontrib-pdfembed 26 | furo -------------------------------------------------------------------------------- /source/_code/k8s-install/install-cn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # setup timezone 4 | echo "[TASK 0] Set timezone" 5 | timedatectl set-timezone Asia/Shanghai 6 | apt-get install -y ntpdate >/dev/null 2>&1 7 | ntpdate ntp.aliyun.com 8 | 9 | echo "[TASK 1] install some tools" 10 | apt install -qq -y vim jq iputils-ping net-tools >/dev/null 2>&1 11 | 12 | echo "[TASK 2] Disable and turn off SWAP" 13 | sed -i '/swap/d' /etc/fstab 14 | swapoff -a 15 | 16 | echo "[TASK 3] Stop and Disable firewall" 17 | systemctl disable --now ufw >/dev/null 2>&1 18 | 19 | echo "[TASK 4] Enable and Load Kernel modules" 20 | cat >>/etc/modules-load.d/containerd.conf <>/etc/sysctl.d/kubernetes.conf </dev/null 2>&1 34 | 35 | echo "[TASK 6] Install containerd runtime" 36 | mkdir -p /etc/apt/keyrings 37 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg 38 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ 39 | $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 40 | apt -qq update >/dev/null 2>&1 41 | apt install -qq -y containerd.io >/dev/null 2>&1 42 | containerd config default >/etc/containerd/config.toml 43 | sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml 44 | sed -i 's/registry.k8s.io\/pause:3.6/registry.aliyuncs.com\/google_containers\/pause:3.9/g' /etc/containerd/config.toml 45 | systemctl restart containerd 46 | systemctl enable containerd >/dev/null 2>&1 47 | 48 | 49 | echo "[TASK 7] Add apt repo for kubernetes" 50 | curl -fsSL https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add > /dev/null 2>&1 51 | echo "deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list > /dev/null 2>&1 52 | apt-get update >/dev/null 2>&1 53 | -------------------------------------------------------------------------------- /source/_code/k8s-install/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # swap already support https://kubernetes.io/blog/2023/08/24/swap-linux-beta/ if you want to enalbe swap, please check the link 4 | echo "[TASK 1] Disable and turn off SWAP" 5 | sed -i '/swap/d' /etc/fstab 6 | swapoff -a 7 | 8 | echo "[TASK 2] install some tools" 9 | apt install -qq -y vim jq iputils-ping net-tools >/dev/null 2>&1 10 | 11 | echo "[TASK 3] Stop and Disable firewall" 12 | systemctl disable --now ufw >/dev/null 2>&1 13 | 14 | echo "[TASK 4] Enable and Load Kernel modules" 15 | cat >>/etc/modules-load.d/containerd.conf<>/etc/sysctl.d/kubernetes.conf</dev/null 2>&1 29 | 30 | echo "[TASK 6] Install containerd runtime" 31 | mkdir -p /etc/apt/keyrings 32 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg 33 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ 34 | $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 35 | apt -qq update >/dev/null 2>&1 36 | apt install -qq -y containerd.io >/dev/null 2>&1 37 | containerd config default >/etc/containerd/config.toml 38 | sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml 39 | systemctl restart containerd 40 | systemctl enable containerd >/dev/null 2>&1 41 | 42 | echo "[TASK 7] Add apt repo for kubernetes" 43 | apt-get install -y apt-transport-https ca-certificates curl gpg >/dev/null 2>&1 44 | curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg 45 | echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list > /dev/null 46 | apt -qq update >/dev/null 2>&1 47 | -------------------------------------------------------------------------------- /source/_code/network/container-to-container.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: my-pod 5 | spec: 6 | nodeName: 'k8s-worker1' # 指定节点,请根据实际情况修改 7 | containers: 8 | - name: container1 9 | image: xiaopeng163/net-box 10 | command: ["sh", "-c", "while true; do echo $(date) >> /tmp/index.html; sleep 60; done"] 11 | - name: container2 12 | image: xiaopeng163/net-box 13 | command: ["sh", "-c", "while true; do echo $(date) >> /tmp/index.html; sleep 60; done"] 14 | -------------------------------------------------------------------------------- /source/_code/network/pod-to-pod-single-node.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mypod1 5 | spec: 6 | nodeName: 'k8s-worker1' # 指定节点,请根据实际情况修改 7 | containers: 8 | - name: pod1 9 | image: xiaopeng163/net-box 10 | command: ["sh", "-c", "while true; do echo $(date) >> /tmp/index.html; sleep 60; done"] 11 | --- 12 | apiVersion: v1 13 | kind: Pod 14 | metadata: 15 | name: mypod2 16 | spec: 17 | nodeName: 'k8s-worker1' # 指定节点,请根据实际情况修改 18 | containers: 19 | - name: pod2 20 | image: xiaopeng163/net-box 21 | command: ["sh", "-c", "while true; do echo $(date) >> /tmp/index.html; sleep 60; done"] 22 | -------------------------------------------------------------------------------- /source/_static/about.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/about.jpg -------------------------------------------------------------------------------- /source/_static/controller-manager-on-master.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/controller-manager-on-master.png -------------------------------------------------------------------------------- /source/_static/css/custom.css: -------------------------------------------------------------------------------- 1 | .custom-contributors img { 2 | border-radius: 50%; 3 | } -------------------------------------------------------------------------------- /source/_static/exam/kubectl-cheat-sheet.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/exam/kubectl-cheat-sheet.PNG -------------------------------------------------------------------------------- /source/_static/init-containers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/init-containers.png -------------------------------------------------------------------------------- /source/_static/introduction/container-orchestration.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/introduction/container-orchestration.PNG -------------------------------------------------------------------------------- /source/_static/introduction/kubernetes_architecture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/introduction/kubernetes_architecture.jpg -------------------------------------------------------------------------------- /source/_static/k8s-core-concept/pod.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/k8s-core-concept/pod.png -------------------------------------------------------------------------------- /source/_static/kube-alias.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/kube-alias.jpeg -------------------------------------------------------------------------------- /source/_static/logging/fluentd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/logging/fluentd.png -------------------------------------------------------------------------------- /source/_static/maintaining/k8s-ha1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/maintaining/k8s-ha1.jpg -------------------------------------------------------------------------------- /source/_static/maintaining/k8s-ha2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/maintaining/k8s-ha2.jpg -------------------------------------------------------------------------------- /source/_static/network/ingress-fanout.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/network/ingress-fanout.png -------------------------------------------------------------------------------- /source/_static/network/ingress-overview.svg: -------------------------------------------------------------------------------- 1 |
cluster
Ingress-managed
load balancer
routing rule
Ingress
Pod
Service
Pod
client
-------------------------------------------------------------------------------- /source/_static/network/ingress-virtual-host.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/network/ingress-virtual-host.png -------------------------------------------------------------------------------- /source/_static/network/kubernetes-ingress.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/network/kubernetes-ingress.png -------------------------------------------------------------------------------- /source/_static/network/networking-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/network/networking-overview.png -------------------------------------------------------------------------------- /source/_static/network/pod-network.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/network/pod-network.PNG -------------------------------------------------------------------------------- /source/_static/network/service.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/network/service.gif -------------------------------------------------------------------------------- /source/_static/pdf/k8s-network.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/pdf/k8s-network.pdf -------------------------------------------------------------------------------- /source/_static/pod-phase.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/pod-phase.png -------------------------------------------------------------------------------- /source/_static/wechat-tips.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/wechat-tips.jpg -------------------------------------------------------------------------------- /source/_static/wechat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/d5ab7156895757d7b09318d4223a3fd5f5cc56c4/source/_static/wechat.jpg -------------------------------------------------------------------------------- /source/about.rst: -------------------------------------------------------------------------------- 1 | About Me 2 | =============== 3 | 4 | .. image:: /_static/about.jpg 5 | :width: 300px 6 | :alt: about 7 | 8 | 9 | 网名 ``麦兜搞IT`` 资深网络运维工程师,现居 ``荷兰`` ,在某银行数据中心网络部门担任资深网络运维工程师,负责Net DevOps的落地实施 10 | 此前先后曾在 ``Cisco`` 、``KPN`` 等公司工作10年之久,对运维自动化,DevOps有着丰富的实战经验。17年开始涉足在线教育,现有学生超过3万人。 11 | 12 | 13 | 🔭 I’m currently working as a 🛠 Network DevOps engineer @ing-bank Netherlands. 14 | 15 | 📚 I like creating tech training videos online (Udemy, YouTube, WeChat) 16 | 17 | 💬 How to reach me: `GitHub `_, `Twitter `_, `YouTube `_ 18 | 19 | 20 | 也欢迎中国大陆的朋友关注我的微信公众号,会不定期分享一些Docker/k8s的技术文章 21 | 22 | .. image:: _static/wechat.jpg 23 | :width: 400 24 | :alt: wechat 25 | -------------------------------------------------------------------------------- /source/api-server-pod.rst: -------------------------------------------------------------------------------- 1 | API Server and Pod 2 | ============================ 3 | 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | :caption: Contents: 8 | 9 | api-server-pod/api-server 10 | api-server-pod/pod 11 | api-server-pod/kubectl-proxy 12 | api-server-pod/namespace 13 | api-server-pod/static-pod 14 | api-server-pod/init-container 15 | api-server-pod/pod-lifecycle 16 | 17 | -------------------------------------------------------------------------------- /source/api-server-pod/api-server.rst: -------------------------------------------------------------------------------- 1 | API Server and API Object 2 | ================================ 3 | 4 | 参考: 5 | 6 | - https://kubernetes.io/docs/reference/using-api/ 7 | - https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/ 8 | - https://kubernetes.io/docs/reference/kubernetes-api/ 9 | 10 | API Server 11 | -------------- 12 | 13 | Kubernetes的API Server是一个client/server的架构 14 | 15 | - 通过HTTP对外提供RESTful API服务,client提交请求,server回复 16 | - 是无状态的stateless,所有的状态都存储在cluster store里(etcd) 17 | 18 | .. image:: ../_static/introduction/kubernetes_architecture.jpg 19 | :alt: kubernetes_architecture 20 | 21 | .. code-block:: bash 22 | 23 | $ kubectl cluster-info 24 | $ kubectl config view 25 | $ kubectl config view --raw 26 | $ kubectl config get-contexts 27 | 28 | 29 | Clients 30 | ~~~~~~~~~~~ 31 | 32 | - kubectl 33 | - RESTful API 34 | - other clients 35 | 36 | 37 | API Object 38 | ------------- 39 | 40 | API Object是通过API server可以操作的Kubernetes对象,它们代表了整个集群的状态,比如: 41 | 42 | - What containerized applications are running (and on which nodes) 43 | - The resources available to those applications 44 | - The policies around how those applications behave, such as restart policies, upgrades, and fault-tolerance 45 | 46 | API Object通过以下字段组织起来 47 | 48 | - Kind (Pod, Deployment, Service, etc.) 49 | - Group (core, apps, storage), see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/ 50 | - Version (v1, beta, alpha) see https://kubernetes.io/docs/reference/using-api/#api-versioning 51 | 52 | .. code-block:: bash 53 | 54 | $ kubectl api-resources | more 55 | $ kubectl api-resources --api-group=apps | more 56 | $ kubectl api-versions | sort | more 57 | 58 | 59 | 60 | 如何操作API Object 61 | ~~~~~~~~~~~~~~~~~~~~~~ 62 | 63 | 两种模式 64 | 65 | - Imperative Configuration (直接通过命令行去创建,操作) 66 | - Declarative Configuration (通过YAML/JSON格式定义Manifest,把期望状态定义在文件中, 然后把文件传给API server) 67 | 68 | 69 | .. code-block:: yaml 70 | 71 | apiVersion: v1 72 | kind: Pod 73 | metadata: 74 | name: web 75 | spec: 76 | containers: 77 | - name: nginx-container 78 | image: nginx:latest 79 | 80 | .. code-block:: bash 81 | 82 | $ kubectl apply -f nginx.yml # Declarative Configuration 83 | $ kubectl run web --image=nginx # Imperative Configuration 84 | -------------------------------------------------------------------------------- /source/api-server-pod/init-container.rst: -------------------------------------------------------------------------------- 1 | Init Containers 2 | =================== 3 | 4 | 5 | https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ 6 | 7 | 8 | .. image:: ../_static/init-containers.png 9 | :alt: init-container 10 | 11 | 12 | why init containers 13 | ---------------------- 14 | 15 | - 初始化 16 | - 处理依赖,控制启动 17 | 18 | 19 | 20 | .. code-block:: yaml 21 | 22 | apiVersion: v1 23 | kind: Pod 24 | metadata: 25 | name: pod-with-init-containers 26 | spec: 27 | initContainers: 28 | - name: init-service 29 | image: busybox 30 | command: ["sh", "-c", "echo waiting for sercice; sleep 4"] 31 | - name: init-database 32 | image: busybox 33 | command: ["sh", "-c", "echo waiting for database; sleep 4"] 34 | containers: 35 | - name: app-container 36 | image: nginx 37 | -------------------------------------------------------------------------------- /source/api-server-pod/kubectl-proxy.rst: -------------------------------------------------------------------------------- 1 | kubectl proxy 2 | ================== 3 | 4 | 5 | 一种直接访问kubernetes API的方法。 6 | 7 | 首先通过kubectl获取到API的URL, 例如 8 | 9 | .. code-block:: bash 10 | 11 | vagrant@k8s-master:~$ kubectl get namespaces -v 6 12 | I0612 21:17:08.143200 65940 loader.go:372] Config loaded from file: /home/vagrant/.kube/config 13 | I0612 21:17:08.174927 65940 round_trippers.go:553] GET https://192.168.56.10:6443/api/v1/namespaces?limit=500 200 OK in 18 milliseconds 14 | NAME STATUS AGE 15 | default Active 12d 16 | kube-node-lease Active 12d 17 | kube-public Active 12d 18 | kube-system Active 12d 19 | 20 | 21 | 后台执行 ``kubectl proxy &`` 22 | 23 | .. code-block:: bash 24 | 25 | vagrant@k8s-master:~$ kubectl proxy & 26 | [1] 66087 27 | vagrant@k8s-master:~$ Starting to serve on 127.0.0.1:8001 28 | 29 | vagrant@k8s-master:~$ 30 | 31 | 这时候就可以通过proxy来访问API了,例如 32 | 33 | 34 | .. code-block:: bash 35 | 36 | vagrant@k8s-master:~$ curl http://127.0.0.1:8001/api/v1/namespaces?limit=500 37 | { 38 | "kind": "NamespaceList", 39 | "apiVersion": "v1", 40 | "metadata": { 41 | "resourceVersion": "352131" 42 | }, 43 | "items": [ 44 | { 45 | "metadata": { 46 | "name": "default", 47 | "uid": "9c661b71-38ad-4cdc-b505-0889e88bdb4b", 48 | "resourceVersion": "197", 49 | "creationTimestamp": "2022-05-31T18:13:08Z", 50 | "labels": { 51 | "kubernetes.io/metadata.name": "default" 52 | }, 53 | 54 | 55 | 如何退出proxy? 运行fg,然后运行ctrl + c 56 | 57 | .. code-block:: bash 58 | 59 | vagrant@k8s-master:~$ fg 60 | kubectl proxy 61 | 62 | ^C 63 | vagrant@k8s-master:~$ 64 | 65 | -------------------------------------------------------------------------------- /source/api-server-pod/namespace.rst: -------------------------------------------------------------------------------- 1 | Namespace 2 | =================== 3 | 4 | 5 | Basic 6 | --------- 7 | 8 | 9 | .. code-block:: bash 10 | 11 | # get all namespaces 12 | kubectl get namespaces 13 | 14 | # get list of all api resources and if they can/not be namespaced 15 | kubectl api-resources --namespaced=true | head 16 | kubectl api-resources --namespaced=false | head 17 | 18 | kubectl describe namespaces 19 | 20 | kubectl get pods --all-namespaces 21 | 22 | # get all resources all 23 | kubectl get all --all-namespaces 24 | 25 | # create namespace with cli 26 | kubectl create namespace playground1 27 | 28 | kubectl create namespace Playground1 # will be error 29 | 30 | # create namespace with yaml 31 | vagrant@k8s-master:~$ kubectl create namespace demo --dry-run=client -o yaml > demo.yaml 32 | vagrant@k8s-master:~$ more demo.yaml 33 | apiVersion: v1 34 | kind: Namespace 35 | metadata: 36 | creationTimestamp: null 37 | name: demo 38 | spec: {} 39 | status: {} 40 | vagrant@k8s-master:~$ kubectl apply -f demo.yaml 41 | namespace/demo created 42 | 43 | 44 | # delete namespaces 45 | vagrant@k8s-master:~$ kubectl delete namespaces demo 46 | namespace "demo" deleted 47 | 48 | 49 | Pod with namespace 50 | --------------------- 51 | 52 | .. code-block:: bash 53 | 54 | # get all pod in all namespaces 55 | $ kubectl get pod -A 56 | 57 | # get pod in default namespaces 58 | $ kubectl get pods 59 | 60 | # get pod in demo namespaces 61 | $ kubectl get pods --namespace=demo 62 | 63 | # create a pod in demo namespace 64 | $ kubectl run web --namespace=demo --image=nginx 65 | 66 | # or with yaml file 67 | $ more web.yml 68 | apiVersion: v1 69 | kind: Pod 70 | metadata: 71 | name: web 72 | namespace: demo 73 | spec: 74 | containers: 75 | - image: nginx 76 | name: web 77 | $ kubectl apply -f web.yml 78 | 79 | Change default namespace 80 | ----------------------------- 81 | 82 | .. code-block:: bash 83 | 84 | vagrant@k8s-master:~$ kubectl create namespace demo 85 | namespace/demo created 86 | vagrant@k8s-master:~$ kubectl run web --namespace demo --image nginx 87 | pod/web created 88 | vagrant@k8s-master:~$ kubectl get pods 89 | No resources found in default namespace. 90 | vagrant@k8s-master:~$ kubectl get pods --namespace demo 91 | NAME READY STATUS RESTARTS AGE 92 | web 1/1 Running 0 10s 93 | 94 | 95 | 如何切换当前默认的namespace从default到demo 96 | 97 | .. code-block:: bash 98 | 99 | vagrant@k8s-master:~$ kubectl config get-contexts 100 | CURRENT NAME CLUSTER AUTHINFO NAMESPACE 101 | * kubernetes-admin@kubernetes kubernetes kubernetes-admin 102 | 103 | vagrant@k8s-master:~$ kubectl config set-context --current --namespace demo 104 | Context "kubernetes-admin@kubernetes" modified. 105 | vagrant@k8s-master:~$ kubectl config get-contexts 106 | CURRENT NAME CLUSTER AUTHINFO NAMESPACE 107 | * kubernetes-admin@kubernetes kubernetes kubernetes-admin demo 108 | vagrant@k8s-master:~$ kubectl get pods 109 | NAME READY STATUS RESTARTS AGE 110 | web 1/1 Running 0 2m44s 111 | -------------------------------------------------------------------------------- /source/api-server-pod/pod-lifecycle.rst: -------------------------------------------------------------------------------- 1 | Pod Lifecycle 2 | ================= 3 | 4 | Linux补充知识 5 | 6 | SIGTERM vs SIGKILL: What's the Difference? 7 | 8 | https://linuxhandbook.com/sigterm-vs-sigkill/ 9 | 10 | 11 | Pod Lifecycle 12 | ----------------- 13 | 14 | 参考 https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/ 15 | 16 | pod phase 17 | 18 | .. image:: ../_static/pod-phase.png 19 | :alt: init-container 20 | 21 | 22 | Container state 23 | ---------------- 24 | 25 | - waiting 26 | - Running 27 | - Termination 28 | 29 | - Process is terminated/crashed 30 | - Pod is deleted 31 | - Node failure or maintenance 32 | - Evicteed due to lack of resources 33 | 34 | Stopping/Terminating Pods 35 | ----------------------------- 36 | 37 | .. code-block:: bash 38 | 39 | kubectl delete pod 40 | 41 | 当执行删除的时候, 42 | 43 | - API server会设置一个timer(grace period Timer),默认是30秒 44 | - 同时pod状态改成Terminating 45 | - Pod所在node上的kubelet收到命令,会给Pod里的container发送SIGTERM信号,然后等等container退出 46 | - 如果container在timer到时之前退出了,那么pod信息同时会被API server从存储中删除 47 | - 如果container没有在timer到时之前退出,则kubelet会发送SIGKILL信息到pod里的容器,强制杀死容器, 最后API server更新存储etcd 48 | 49 | 50 | grace period timer是可以修改的 51 | 52 | .. code-block:: bash 53 | 54 | $ kubectl delete pod --grace-period= 55 | 56 | .. code-block:: yaml 57 | 58 | apiVersion: v1 59 | kind: Pod 60 | metadata: 61 | name: web 62 | spec: 63 | terminationGracePeriodSeconds: 10 64 | containers: 65 | - image: nginx 66 | name: web 67 | 68 | 或者也可以直接就强制删除pod (SIGKILL) 69 | 70 | .. code-block:: bash 71 | 72 | $ kubectl delete pod --grace-period=0 --force 73 | 74 | 75 | Persistency of Pod 76 | --------------------- 77 | 78 | Pod本身不会重新自动部署 79 | 80 | 如果一个pod停止了,它不会重启动,只有可能创建一个新的 81 | 82 | 如果有配置需要持久化怎么办? 83 | 84 | - Pod Manifests, secrets and ConfigMaps 85 | - environment variables 86 | 87 | 数据持久? 88 | 89 | - PersistentVolume 90 | - PersistentVolumeClaim 91 | 92 | Container Restart Policy 93 | --------------------------- 94 | 95 | The spec of a Pod has a restartPolicy field with possible values ``Always``, ``OnFailure``, and ``Never``. The default value is Always. 96 | 97 | .. code-block:: bash 98 | 99 | vagrant@k8s-master:~$ kubectl run web --image nginx --dry-run=client -o yaml 100 | apiVersion: v1 101 | kind: Pod 102 | metadata: 103 | creationTimestamp: null 104 | labels: 105 | run: web 106 | name: web 107 | spec: 108 | containers: 109 | - image: nginx 110 | name: web 111 | resources: {} 112 | dnsPolicy: ClusterFirst 113 | restartPolicy: Always 114 | status: {} 115 | -------------------------------------------------------------------------------- /source/api-server-pod/pod.rst: -------------------------------------------------------------------------------- 1 | Pod 2 | ====== 3 | 4 | https://kubernetes.io/docs/concepts/workloads/pods/ 5 | 6 | What is Pod? 7 | ---------------- 8 | 9 | Pod是k8s里最小的调度单位。 10 | 11 | - A group of one or more application containers and their shared resources like volume. 12 | - A pod share the same name spaces like network name spaces (have same IP address.) 13 | - Pod is the smallest unit for K8s scheduling 14 | 15 | 16 | .. note:: 17 | 18 | 关于容器,name spaces,欢迎参考另一篇关于Docker的文档 https://dockertips.readthedocs.io/en/latest/single-host-network.html 19 | 20 | .. image:: ../_static/k8s-core-concept/pod.png 21 | :width: 800 22 | :alt: what is pod 23 | 24 | 25 | How to create a pod? 26 | ------------------------- 27 | 28 | Imperative 29 | ~~~~~~~~~~~~~ 30 | 31 | Create a pod named web with image of nginx:latest 32 | 33 | .. code-block:: bash 34 | 35 | $ kubectl run web --image=nginx 36 | pod/web created 37 | 38 | 运行一个命令, kubectl run --image= --command -- ... 39 | 40 | .. code-block:: bash 41 | 42 | $ kubectl run client --image=busybox --command -- bin/sh -c "sleep 100000" 43 | 44 | 45 | Declarative 46 | ~~~~~~~~~~~~~ 47 | 48 | 以下yaml文件是定义一个pod所需的最少字段 (nginx.yml) 49 | 50 | .. code-block:: yaml 51 | 52 | apiVersion: v1 53 | kind: Pod 54 | metadata: 55 | name: web 56 | spec: 57 | containers: 58 | - name: nginx-container 59 | image: nginx:latest 60 | 61 | .. code-block:: bash 62 | 63 | $ kubectl apply -f nginx.yml 64 | pod/web created 65 | 66 | 67 | 运行一个命令, sh -c "sleep 1000000" 68 | 69 | .. code-block:: yaml 70 | 71 | apiVersion: v1 72 | kind: Pod 73 | metadata: 74 | name: client 75 | spec: 76 | containers: 77 | - name: client 78 | image: busybox 79 | command: 80 | - sh 81 | - -c 82 | - "sleep 1000000" 83 | 84 | multi-container pod 85 | ----------------------------- 86 | 87 | 一个pod是可以包含多个container的,如果要创建这样的pod,那么只能通过yaml文件实现,例如: 88 | 89 | .. code-block:: yaml 90 | 91 | apiVersion: v1 92 | kind: Pod 93 | metadata: 94 | name: my-pod 95 | spec: 96 | containers: 97 | - name: nginx 98 | image: nginx 99 | - name: client 100 | image: busybox 101 | command: 102 | - sh 103 | - -c 104 | - "sleep 1000000" 105 | 106 | 107 | 108 | .. code-block:: bash 109 | 110 | $ kubectl create -f my-pod.yml 111 | $ kubectl get pod 112 | NAME READY STATUS RESTARTS AGE 113 | my-pod 2/2 Running 0 35s 114 | 115 | 116 | Pod YAML 语法 117 | --------------- 118 | 119 | 查文档 kubernetes.io, 命令行帮助 120 | 121 | .. code-block:: bash 122 | 123 | kubectl explain pods | more 124 | kubectl explain pod.spec | more 125 | kubectl explain pod.spec.containers | more 126 | 127 | kubectl dry-run 128 | ------------------ 129 | 130 | 131 | Server-side 132 | ~~~~~~~~~~~~~~~~ 133 | 134 | 和正常情况一样处理客户端发送过来的请求,但是并不会把Object状态持久化存储到storage中 135 | 136 | 137 | .. code-block:: bash 138 | 139 | $ kubectl apply -f nginx.yml --dry-run=server 140 | 141 | Client-side 142 | ~~~~~~~~~~~~~~~~ 143 | 144 | - 把要操作的Object通过标准输出stdout输出到terminal 145 | - 验证manifest的语法 146 | - 可以用于生成语法正确的Yaml manifest 147 | 148 | .. code-block:: bash 149 | 150 | $ kubectl apply -f nginx.yml --dry-run=client 151 | $ kubectl run web --image=nginx --dry-run=client -o yaml 152 | $ kubectl run web --image=nginx --dry-run=client -o yaml > nginx.yml 153 | 154 | kubectl diff 155 | ---------------- 156 | 157 | 显示当前要部署的manifest和集群中运行的有和不同,这样就知道如果apply会发生什么。 158 | 159 | .. code-block:: bash 160 | 161 | $ kubectl diff -f new-nginx.yml 162 | 163 | 164 | 165 | Pod的基本操作 166 | --------------- 167 | 168 | 获取pod列表 169 | ~~~~~~~~~~~~~~~~~ 170 | 171 | 172 | .. code-block:: bash 173 | 174 | vagrant@k8s-master:~$ kubectl get pods 175 | NAME READY STATUS RESTARTS AGE 176 | client 1/1 Running 0 5m14s 177 | web 1/1 Running 0 15m 178 | vagrant@k8s-master:~$ kubectl get pods -o wide 179 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 180 | client 1/1 Running 0 5m17s 10.244.2.4 k8s-worker2 181 | web 1/1 Running 0 15m 10.244.1.2 k8s-worker1 182 | 183 | 通过 ``-o yaml`` 可以获取到具体一个pod的yaml定义文件 184 | 185 | .. code-block:: bash 186 | 187 | $ kubectl get pods client -o yaml 188 | 189 | 190 | 删除Pod 191 | ~~~~~~~~~~~ 192 | 193 | .. code-block:: bash 194 | 195 | $ kubectl delete pod web 196 | pod "web" deleted 197 | 198 | 199 | 获取pod详细信息 200 | ~~~~~~~~~~~~~~~~~~ 201 | 202 | .. code-block:: bash 203 | 204 | $ kubectl describe pod my-pod 205 | 206 | 207 | 进入容器执行命令 208 | ------------------- 209 | 210 | 对于只有单个容器的Pod, 执行date命令 211 | 212 | .. code-block:: bash 213 | 214 | vagrant@k8s-master:~$ kubectl get pods 215 | NAME READY STATUS RESTARTS AGE 216 | client 1/1 Running 0 38s 217 | my-pod 2/2 Running 0 6s 218 | vagrant@k8s-master:~$ kubectl exec client -- date 219 | Wed Jun 1 21:57:07 UTC 2022 220 | 221 | 进入交互式shell 222 | 223 | .. code-block:: bash 224 | 225 | vagrant@k8s-master:~$ kubectl exec client -- date 226 | Wed Jun 1 21:57:07 UTC 2022 227 | vagrant@k8s-master:~$ kubectl exec client -it -- sh 228 | / # 229 | / # ip a 230 | 1: lo: mtu 65536 qdisc noqueue qlen 1000 231 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 232 | inet 127.0.0.1/8 scope host lo 233 | valid_lft forever preferred_lft forever 234 | inet6 ::1/128 scope host 235 | valid_lft forever preferred_lft forever 236 | 3: eth0@if7: mtu 1450 qdisc noqueue 237 | link/ether a6:56:08:ba:34:28 brd ff:ff:ff:ff:ff:ff 238 | inet 10.244.1.3/24 brd 10.244.1.255 scope global eth0 239 | valid_lft forever preferred_lft forever 240 | inet6 fe80::a456:8ff:feba:3428/64 scope link 241 | valid_lft forever preferred_lft forever 242 | / # 243 | 244 | 对于具有多个容器的pod,需要通过 ``-c`` 指定要进入那个容器中。 245 | 246 | .. code-block:: bash 247 | 248 | vagrant@k8s-master:~$ kubectl get pods 249 | NAME READY STATUS RESTARTS AGE 250 | client 1/1 Running 0 3m16s 251 | my-pod 2/2 Running 0 2m44s 252 | vagrant@k8s-master:~$ kubectl exec my-pod -c 253 | client nginx 254 | vagrant@k8s-master:~$ kubectl exec my-pod -c nginx -- date 255 | Wed Jun 1 21:59:58 UTC 2022 256 | 257 | 258 | API level log 259 | ----------------- 260 | 261 | 通过 ``-v`` 可以获取到每一个kubectl命令在API level的log,例如 262 | 263 | 获取kubectl操作更详细的log, API level( 通过 -v 指定) 264 | 265 | .. code-block:: bash 266 | 267 | $ kubectl get pod -v 6 # 或者 7,8,9 不同的level,数值越大,得到的信息越详细 268 | 269 | 270 | ``--watch`` 持续监听kubectl操作,API level 271 | 272 | .. code-block:: bash 273 | 274 | $ kubectl get pods --watch -v 6 275 | 276 | 277 | 278 | Pod with init containers 279 | --------------------------- 280 | 281 | .. code-block:: yaml 282 | 283 | apiVersion: v1 284 | kind: Pod 285 | metadata: 286 | name: pod-with-init-containers 287 | spec: 288 | initContainers: 289 | - name: init-service 290 | image: busybox 291 | command: ["sh", "-c", "echo waiting for sercice; sleep 2"] 292 | - name: init-database 293 | image: busybox 294 | command: ["sh", "-c", "echo waiting for database; sleep 2"] 295 | containers: 296 | - name: app-container 297 | image: nginx 298 | 299 | 300 | 301 | 302 | Pod Lifecycle 303 | -------------------- 304 | 305 | 306 | Pod Health 307 | ------------- 308 | 309 | LivenessProbes 310 | ~~~~~~~~~~~~~~~~~~ 311 | 312 | - Runs a diagnostic check on a container 313 | - Per container setting 314 | - On failure, the kubelet restarts the container 315 | - Container Restart Policy 316 | - Give Kubernetes a better understanding of our applications 317 | 318 | readinessProbes 319 | ~~~~~~~~~~~~~~~~~~~~~~ 320 | 321 | - Runs a diagnostic check on a container 322 | - Per container setting 323 | - Won't receive traffic from a service until it succeeds 324 | - On failure, remove Pod from load balancing 325 | - Applications that temporarily can't respond to a request 326 | - Prevents users from seeing errors 327 | 328 | type of Diagnostic Checks for Probes 329 | 330 | - Exec 331 | - tcpSocket 332 | - httpGet 333 | -------------------------------------------------------------------------------- /source/api-server-pod/static-pod.rst: -------------------------------------------------------------------------------- 1 | Static Pod 2 | ============= 3 | 4 | 5 | 参考 https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/ 6 | 7 | 8 | What is Static Pods? 9 | ------------------------- 10 | 11 | - Managed by the kubelet on Node 12 | - Static Pod manifests, ``staticPodPath`` in kubelet's configuration, by default is ``/etc/kubernetes/manifests`` 13 | - kubelet configuration file: ``/var/lib/kubelet/config.yaml`` 14 | - pod can be 'seen' through API server, but can not be managed by API server 15 | 16 | 17 | Control plane 的几个static pod 18 | 19 | .. code-block:: bash 20 | 21 | vagrant@k8s-master:~$ sudo ls /etc/kubernetes/manifests/ 22 | etcd.yaml kube-apiserver.yaml kube-controller-manager.yaml kube-scheduler.yaml 23 | vagrant@k8s-master:~$ 24 | 25 | 26 | -------------------------------------------------------------------------------- /source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | # import os 14 | # import sys 15 | # sys.path.insert(0, os.path.abspath('.')) 16 | 17 | from datetime import date 18 | 19 | # -- Project information ----------------------------------------------------- 20 | 21 | project = 'Learn Kubernetes from Scratch' 22 | copyright = str(date.today().year) + ", Peng Xiao. All rights reserved." 23 | author = 'Peng Xiao' 24 | 25 | # The full version, including alpha/beta/rc tags 26 | release = '1.0' 27 | 28 | 29 | # -- General configuration --------------------------------------------------- 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 | # ones. 34 | extensions = [ 35 | "sphinxemoji.sphinxemoji", "sphinx_contributors", "sphinx_copybutton", 36 | "sphinxcontrib.pdfembed" 37 | ] 38 | sphinxemoji_style = 'twemoji' 39 | 40 | 41 | # Add any paths that contain templates here, relative to this directory. 42 | templates_path = ['_templates'] 43 | 44 | # List of patterns, relative to source directory, that match files and 45 | # directories to ignore when looking for source files. 46 | # This pattern also affects html_static_path and html_extra_path. 47 | exclude_patterns = [] 48 | 49 | 50 | # -- Options for HTML output ------------------------------------------------- 51 | 52 | # The theme to use for HTML and HTML Help pages. See the documentation for 53 | # a list of builtin themes. 54 | # 55 | # html_theme = 'sphinx_rtd_theme' 56 | html_theme = "furo" 57 | html_title = "Learn Kubernetes from Scratch" 58 | html_theme_options = {} 59 | html_theme_options["announcement"] = ( 60 | "🙏🙏🙏 如果大家发现文章中的错误,欢迎提PR或者issue指正 " 61 | "文档源码地址." 62 | ) 63 | # Add any paths that contain custom static files (such as style sheets) here, 64 | # relative to this directory. They are copied after the builtin static files, 65 | # so a file named "default.css" will overwrite the builtin "default.css". 66 | html_static_path = ['_static'] 67 | html_css_files = [ 68 | "css/custom.css", 69 | ] -------------------------------------------------------------------------------- /source/controller-deployment.rst: -------------------------------------------------------------------------------- 1 | Controller and Deployment 2 | ============================ 3 | 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | :caption: Contents: 8 | 9 | controller-deployment/controller-manager 10 | controller-deployment/labels 11 | controller-deployment/deployment 12 | controller-deployment/daemonset 13 | controller-deployment/job 14 | -------------------------------------------------------------------------------- /source/controller-deployment/controller-manager.rst: -------------------------------------------------------------------------------- 1 | Controller Manager 2 | ==================== 3 | 4 | https://kubernetes.io/docs/concepts/architecture/controller/ 5 | 6 | A controller tracks at least one Kubernetes resource type. These objects have a spec field that represents the ``desired state``. 7 | The controller(s) for that resource are responsible for making the current state come closer to that desired state. 8 | 9 | 10 | - kube controller Manager 11 | - cloud controller Manager 12 | 13 | 14 | .. image:: ../_static/controller-manager-on-master.png 15 | :alt: controller-manager 16 | 17 | 18 | Controllers 19 | -------------- 20 | 21 | Pod Controllers 22 | 23 | - ReplicaSet 24 | - Deployment 25 | - DaemonSet 26 | - StatefulSet 27 | - Job 28 | - CronJob 29 | 30 | 31 | Other Controllers 32 | 33 | - Node 34 | - Service 35 | - Endpoint 36 | 37 | 38 | System Pods 39 | ------------------ 40 | 41 | 42 | .. code-block:: bash 43 | 44 | $ kubectl get all -A 45 | 46 | $ kubectl get deployment coredns --namespace kube-system 47 | 48 | $ kubectl get daemonset --namespace kube-system 49 | 50 | -------------------------------------------------------------------------------- /source/controller-deployment/daemonset.rst: -------------------------------------------------------------------------------- 1 | DaemonSet 2 | ================== 3 | 4 | https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ 5 | 6 | 确保所有或者部分Kubernetes集群节点上运行一个pod。当有新节点加入时,pod也会运行在上面。 7 | 8 | 常见例子: 9 | 10 | - kube-proxy 网络相关 11 | - log collectors 12 | - metric servers 13 | - Resource monitoring agent 14 | - storage daemons 15 | 16 | 17 | 语法 18 | --------- 19 | 20 | .. code-block:: yaml 21 | 22 | apiVersion: apps/v1 23 | kind: DaemonSet 24 | metadata: 25 | name: hello-ds 26 | spec: 27 | selector: 28 | matchLabels: 29 | app: hello-world 30 | template: 31 | metadata: 32 | labels: 33 | app: hello-world 34 | spec: 35 | containers: 36 | - name: hello-world 37 | image: nginx:1.14 38 | 39 | 可以指定Node,通过 ``nodeSelector`` 40 | 41 | .. code-block:: yaml 42 | 43 | apiVersion: apps/v1 44 | kind: DaemonSet 45 | metadata: 46 | name: hello-ds 47 | spec: 48 | selector: 49 | matchLabels: 50 | app: hello-world 51 | template: 52 | metadata: 53 | labels: 54 | app: hello-world 55 | spec: 56 | nodeSelector: 57 | node: hello-world 58 | containers: 59 | - name: hello-world 60 | image: nginx:1.14 61 | 62 | 63 | Update Strategy 64 | ----------------------------------- 65 | 66 | - RollingUpdate 67 | - OnDelete -------------------------------------------------------------------------------- /source/controller-deployment/deployment.rst: -------------------------------------------------------------------------------- 1 | Deployment 2 | ============================ 3 | 4 | https://kubernetes.io/docs/concepts/workloads/controllers/deployment/ 5 | 6 | You describe a ``desired state`` in a Deployment, and the Deployment Controller changes the actual state 7 | to the desired state at a controlled rate. You can define Deployments to create new ``ReplicaSets``, 8 | or to remove existing Deployments and adopt all their resources with new Deployments. 9 | 10 | 11 | Create Deployment 12 | ---------------------- 13 | 14 | Imperatively 15 | ~~~~~~~~~~~~~~~ 16 | 17 | .. code-block:: bash 18 | 19 | $ kubectl create deployment web --image=nginx:1.14.2 20 | $ kubectl scale deployment web --replicas=5 21 | 22 | 23 | Declaratively 24 | ~~~~~~~~~~~~~~~~~ 25 | 26 | .. code-block:: yaml 27 | 28 | apiVersion: apps/v1 29 | kind: Deployment 30 | metadata: 31 | labels: 32 | app: web 33 | name: web 34 | spec: 35 | replicas: 1 36 | selector: 37 | matchLabels: 38 | app: web 39 | template: 40 | metadata: 41 | labels: 42 | app: web 43 | spec: 44 | containers: 45 | - image: nginx:1.14.2 46 | name: nginx 47 | 48 | 49 | ReplicaSets and Failures 50 | ---------------------------- 51 | 52 | Pod Failures 53 | ~~~~~~~~~~~~~~~~~~ 54 | 55 | Rescheduled and a new Pod is created 56 | 57 | 58 | Node Failures 59 | ~~~~~~~~~~~~~~~~~~~ 60 | 61 | - Transient failure 62 | - permanent failure 63 | 64 | kube-contorller-manager 有一个timeout的设置 65 | 66 | pod-eviction-timeout (默认5min) Node如果失联超过5分钟,就会触发在其上运行的Pod的终止和重建。 67 | 68 | 69 | Update Deployment 70 | ---------------------- 71 | 72 | Update Strategy 73 | ~~~~~~~~~~~~~~~~~~~ 74 | 75 | https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy 76 | 77 | ``RollingUpdate`` (Default) 78 | 79 | A new ReplicaSet starts scaling up and the old ReplicaSet starts scaling down 80 | 81 | 创建一个deployment 82 | 83 | .. code-block:: bash 84 | 85 | $ kubectl create deployment web --image=nginx:1.14 --replicas 3 86 | 87 | Update image 88 | 89 | .. code-block:: bash 90 | 91 | $ kubectl set image deployment/web nginx=nginx:1.14.2 92 | 93 | 94 | Rolling Back 95 | 96 | 查看rollout history 97 | 98 | .. code-block:: bash 99 | 100 | $ kubectl rollout history deployment web 101 | 102 | 查看revision history 103 | 104 | .. code-block:: bash 105 | 106 | $ kubectl rollout history deployment web --revision=1 107 | 108 | rollout 109 | 110 | .. code-block:: bash 111 | 112 | $ kubectl rollout undo deployment web --to-revision=1 113 | 114 | ``Recreate`` 115 | 116 | Terminates all pods in the current ReplicaSet, set prior to scaling up the new ReplicaSet 117 | 118 | (used when applications don't support running different versions concurrently) 119 | 120 | 121 | 122 | Restarting a Deployment 123 | ~~~~~~~~~~~~~~~~~~~~~~~~~~ 124 | 125 | .. code-block:: bash 126 | 127 | $ kubectl rollout restart deployment hello-world 128 | 129 | 130 | Scale Deployment 131 | ---------------------- 132 | 133 | .. code-block:: bash 134 | 135 | $ kubectl scale deployment web --replicas 5 136 | -------------------------------------------------------------------------------- /source/controller-deployment/job.rst: -------------------------------------------------------------------------------- 1 | Job and CronJob 2 | =================== 3 | 4 | .. warning:: 5 | 6 | 注意API的版本,可能会随着Kubernetes的版本更新而改变 7 | 8 | .. code-block:: bash 9 | 10 | $ kubectl api-resources | grep job 11 | cronjobs cj batch/v1 true CronJob 12 | jobs batch/v1 true Job 13 | 14 | 15 | Job 16 | ------- 17 | 18 | Job: https://kubernetes.io/docs/concepts/workloads/controllers/job/ 19 | 20 | 一次性运行的Pod,一般为执行某个命令或者脚本,执行结束后pod的生命随之结束 21 | 22 | 23 | create job 24 | 25 | .. code-block:: bash 26 | 27 | $ kubectl create job my-job --image=busybox -- sh -c "sleep 50" 28 | 29 | 30 | .. code-block:: yaml 31 | 32 | apiVersion: batch/v1 33 | kind: Job 34 | metadata: 35 | name: my-job 36 | spec: 37 | template: 38 | spec: 39 | containers: 40 | - name: my-job 41 | image: busybox 42 | command: ["sh", "-c", "sleep 50"] 43 | restartPolicy: Never 44 | 45 | .. code-block:: bash 46 | 47 | $ kubectl get pods 48 | NAME READY STATUS RESTARTS AGE 49 | my-job-z679f 1/1 Running 0 10s 50 | 51 | $ kubectl get pods 52 | NAME READY STATUS RESTARTS AGE 53 | my-job-z679f 0/1 Completed 0 63s 54 | 55 | 56 | CronJob 57 | --------- 58 | 59 | 计划任务 60 | 61 | CronJobs: https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ 62 | 63 | create CronJob 64 | 65 | .. code-block:: yaml 66 | 67 | apiVersion: batch/v1 68 | kind: CronJob 69 | metadata: 70 | name: hello 71 | spec: 72 | schedule: "*/1 * * * *" 73 | jobTemplate: 74 | spec: 75 | template: 76 | spec: 77 | containers: 78 | - name: hello 79 | image: busybox:1.28 80 | imagePullPolicy: IfNotPresent 81 | command: 82 | - /bin/sh 83 | - -c 84 | - date; echo Hello from the Kubernetes cluster 85 | restartPolicy: OnFailure 86 | -------------------------------------------------------------------------------- /source/controller-deployment/labels.rst: -------------------------------------------------------------------------------- 1 | Labels 2 | =============== 3 | 4 | 5 | - Used to organize resources: Pod, Nodes and more 6 | - Label Selectors are used to select/query Objects 7 | 8 | 9 | adding and editing labels 10 | ----------------------------------- 11 | 12 | - imperatively with kubectl 13 | - Declaratively in a Manifest in YAML 14 | 15 | .. code-block:: yaml 16 | 17 | apiVersion: v1 18 | kind: Pod 19 | metadata: 20 | name: nginx-pod-1 21 | labels: 22 | app: v1 23 | tier: PROD 24 | spec: 25 | containers: 26 | - name: nginx 27 | image: nginx 28 | --- 29 | apiVersion: v1 30 | kind: Pod 31 | metadata: 32 | name: nginx-pod-2 33 | labels: 34 | app: v1 35 | tier: ACC 36 | spec: 37 | containers: 38 | - name: nginx 39 | image: nginx 40 | --- 41 | apiVersion: v1 42 | kind: Pod 43 | metadata: 44 | name: nginx-pod-3 45 | labels: 46 | app: v1 47 | tier: TEST 48 | spec: 49 | containers: 50 | - name: nginx 51 | image: nginx 52 | 53 | edit labels 54 | 55 | 56 | .. code-block:: bash 57 | 58 | $ kubectl label pod nginx-pod-1 tier=PROD app=v1 59 | $ kubectl label pod nginx-pod-2 tier=ACC app=v1 60 | $ kubectl label pod nginx-pod-3 tier=TEST app=v1 61 | 62 | # update label 63 | $ kubectl label pod nginx-pod-1 tier=ACC app=v1 --overwrite # overwrite 64 | $ kubectl label pod nginx-pod-1 tier- # delete label 65 | 66 | 67 | Querying Using Labels and Selectors 68 | ---------------------------------------- 69 | 70 | .. code-block:: bash 71 | 72 | $ kubectl get pods --show-labels 73 | $ kubectl get pods --selector tier=PROD 74 | $ kubectl get pods -l 'tier in (PROD, TEST)' 75 | $ kubectl get pods -l 'tier notin (PROD, TEST)' 76 | 77 | 78 | How Kubernetes Uses Labels 79 | ---------------------------------- 80 | 81 | - Controllers and Services match pods using Selectors 82 | - Pod Scheduling, scheduling to specific Node 83 | -------------------------------------------------------------------------------- /source/exam.rst: -------------------------------------------------------------------------------- 1 | CKA Exam Tips 2 | =============== 3 | 4 | 关于考试的一些技巧。 5 | 6 | 考试大纲(必读) 7 | -------------------- 8 | 9 | https://github.com/cncf/curriculum 10 | 11 | 12 | 考试相关(必读) 13 | ------------------------ 14 | 15 | https://www.cncf.io/certification/cka/ 16 | 17 | https://docs.linuxfoundation.org/tc-docs/certification/tips-cka-and-ckad 18 | 19 | https://docs.linuxfoundation.org/tc-docs/certification/faq-cka-ckad-cks 20 | 21 | 22 | kubectl Cheat Sheet 23 | ----------------------- 24 | 25 | 考试的时候可以打开放一边 26 | 27 | https://kubernetes.io/docs/reference/kubectl/cheatsheet/ 28 | 29 | 30 | https://collabnix.github.io/kubelabs/ 31 | 32 | 一些工具的使用 33 | --------------- 34 | 35 | vi/vim编辑器 36 | 37 | json处理 38 | ------------ 39 | 40 | ``jsonpath`` 或者使用 ``jq`` 41 | 42 | .. code-block:: bash 43 | 44 | vagrant@k8s-master:~$ kubectl get pods --all-namespaces -o json | jq '.items[].spec.containers[].image' 45 | "rancher/mirrored-flannelcni-flannel:v0.18.1" 46 | "rancher/mirrored-flannelcni-flannel:v0.18.1" 47 | "rancher/mirrored-flannelcni-flannel:v0.18.1" 48 | "k8s.gcr.io/coredns/coredns:v1.8.6" 49 | "k8s.gcr.io/coredns/coredns:v1.8.6" 50 | "k8s.gcr.io/etcd:3.5.3-0" 51 | "k8s.gcr.io/kube-apiserver:v1.24.3" 52 | "k8s.gcr.io/kube-controller-manager:v1.24.3" 53 | "k8s.gcr.io/kube-proxy:v1.24.3" 54 | "k8s.gcr.io/kube-proxy:v1.24.3" 55 | "k8s.gcr.io/kube-proxy:v1.24.3" 56 | "k8s.gcr.io/kube-scheduler:v1.24.3" 57 | "k8s.gcr.io/metrics-server/metrics-server:v0.6.1" 58 | vagrant@k8s-master:~$ kubectl get pods --all-namespaces -o jsonpath='{.items[*].spec.containers[*].image}' 59 | rancher/mirrored-flannelcni-flannel:v0.18.1 rancher/mirrored-flannelcni-flannel:v0.18.1 rancher/mirrored-flannelcni-flannel:v0.18.1 k8s.gcr.io/coredns/coredns:v1.8.6 k8s.gcr.io/coredns/coredns:v1.8.6 k8s.gcr.io/etcd:3.5.3-0 k8s.gcr.io/kube-apiserver:v1.24.3 k8s.gcr.io/kube-controller-manager:v1.24.3 k8s.gcr.io/kube-proxy:v1.24.3 k8s.gcr.io/kube-proxy:v1.24.3 k8s.gcr.io/kube-proxy:v1.24.3 k8s.gcr.io/kube-scheduler:v1.24.3 k8s.gcr.io/metrics-server/metrics-server:v0.6.1vagrant@k8s-master:~$ 60 | vagrant@k8s-master:~$ 61 | vagrant@k8s-master:~$ 62 | 63 | 64 | 通过dry-run快速生成yaml 65 | -------------------------- 66 | 67 | 68 | .. code-block:: bash 69 | 70 | vagrant@k8s-master:~$ kubectl run nginx --image=nginx --dry-run=client -oyaml > pod.yaml 71 | vagrant@k8s-master:~$ more pod.yaml 72 | apiVersion: v1 73 | kind: Pod 74 | metadata: 75 | creationTimestamp: null 76 | labels: 77 | run: nginx 78 | name: nginx 79 | spec: 80 | containers: 81 | - image: nginx 82 | name: nginx 83 | resources: {} 84 | dnsPolicy: ClusterFirst 85 | restartPolicy: Always 86 | status: {} 87 | 88 | 可以定义变量节省命令输入时间 89 | 90 | .. code-block:: bash 91 | 92 | $ export dry="--dry-run=client -o yaml" 93 | $ kubectl run nginx --image=nginx $dry > pod.yaml 94 | 95 | 96 | 快速强制删除Pod 97 | ----------------- 98 | 99 | 100 | 直接就强制删除pod (SIGKILL) 可以节省时间。 101 | 102 | .. code-block:: bash 103 | 104 | $ kubectl delete pod --grace-period=0 --force 105 | 106 | 同样也可以定义变量节省时间 107 | 108 | 109 | 设置alias,节省敲命令的时间 110 | -------------------------------- 111 | 112 | .. image:: _static/kube-alias.jpeg 113 | :width: 600 114 | :alt: k8s-alias 115 | -------------------------------------------------------------------------------- /source/index.rst: -------------------------------------------------------------------------------- 1 | .. Learn Kubernetes from Scratch documentation master file, created by 2 | sphinx-quickstart on Thu Apr 28 12:33:17 2022. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Learn Kubernetes from Scratch 7 | ======================================== 8 | 9 | .. note:: 10 | 11 | 🎓🎓🎓 本文档的目的是帮助大家快速入门Kubernetes,并通过 ``Certified Kubernetes Administrator (CKA)`` 的认证考试。 12 | 13 | 目前课程基于的版本是Kubernetes ``v1.30.x`` (2024年7月份更新) ,并且会持续更新。 14 | 15 | .. note:: 16 | 17 | 视频课程地址 https://www.udemy.com/course/k8s-chinese/?referralCode=4D8B7AFDBFAF9A8E4F81 18 | 19 | 20 | 感谢以下对此文档做出贡献的同学(排名不分高低和先后): 21 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 22 | 23 | .. contributors:: xiaopeng163/learn-k8s-from-scratch 24 | :avatars: 25 | :exclude: xiaopeng163,dependabot[bot], dependabot-preview[bot] 26 | :order: DESC 27 | 28 | 29 | Before Start You Need Know 30 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 31 | 32 | - 一定的Linux命令行基础 33 | - 了解Docker和容器 34 | - 一定的网络知识 35 | - 一定的加密相关知识(非对称加密,HTTPS等) 36 | 37 | 目录 38 | ------ 39 | 40 | .. toctree:: 41 | :maxdepth: 2 42 | 43 | introduction 44 | k8s-install 45 | api-server-pod 46 | controller-deployment 47 | scheduling 48 | stroage 49 | network 50 | security 51 | maintaining 52 | logging-monitoring 53 | troubleshooting 54 | exam 55 | about 56 | 57 | Indices and tables 58 | ================== 59 | 60 | * :ref:`genindex` 61 | * :ref:`modindex` 62 | * :ref:`search` 63 | -------------------------------------------------------------------------------- /source/introduction.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | =============== 3 | 4 | .. note:: 5 | 6 | 容器技术? 如果对容器技术和Docker没有了解,建议先学习一下,或者参考我的另外一个关于Docker的免费在线文档 7 | 8 | https://dockertips.readthedocs.io/en/latest/docker-install/docker-intro.html 9 | 10 | 11 | 12 | 13 | 14 | Container Orchestration 15 | -------------------------- 16 | 17 | .. image:: _static/introduction/container-orchestration.PNG 18 | :alt: container-orchestration 19 | 20 | 21 | k8s? 22 | ------- 23 | 24 | 25 | Kubernetes is an open-source container orchestration system for automating software deployment, scaling, and management. 26 | Google originally designed Kubernetes, but the Cloud Native Computing Foundation now maintains the project. - Wikipedia 27 | 28 | 29 | By the way, if you’re wondering where the name “Kubernetes” came from, it is a Greek word, meaning helmsman or pilot. The abbreviation K8s is derived by replacing the eight letters of “ubernete” with the digit 8. 30 | 31 | 32 | 33 | Kubernetes Architecture 34 | ----------------------------- 35 | 36 | .. image:: _static/introduction/kubernetes_architecture.jpg 37 | :alt: kubernetes_architecture 38 | 39 | 40 | Control Plane 41 | ~~~~~~~~~~~~~~~~~~~~~ 42 | 43 | Master节点主要有4个主要组件: 44 | 45 | - API Server 跟集群进行交互的接口(kubectl) 46 | - etcd, 集群数据存储 47 | - Scheduler,根据不同的因素决定最终的容器运行在哪个节点node上 48 | - Controller Manager,管理不同的controller的 49 | 50 | Worker Node 51 | ~~~~~~~~~~~~~~~~ 52 | 53 | Worker节点的三个主要组件: 54 | 55 | - kubelet 运行在每一个node上的agent,负责确保最终的pod创建启动停止销毁等,直接和container runtime交互 56 | - kubeproxy 运行在每一个node上的network agent,负责网络通信部分,pod之间,pod和外界之间等 57 | - container runtime 具体的容器运行时,比如docker,containerd等 -------------------------------------------------------------------------------- /source/k8s-install.rst: -------------------------------------------------------------------------------- 1 | Kubernetes Installation 2 | =========================== 3 | 4 | Kubernetes考试版本查看 https://training.linuxfoundation.org/certification/certified-kubernetes-administrator-cka/# 5 | 6 | Kubernetes的集群搭建方法有很多种,其中最常见的有: 7 | 8 | - Minikube 9 | - kubeadm 10 | - Kubernetes on cloud (AWS,Azure,GCP等) 11 | 12 | 最简单的是第三种,基本就是在云上点一下鼠标就完成了,其次是minikube,一条命令搞定,稍微麻烦一点的是kubeadm。 13 | 14 | 15 | 个人比较推荐的是 ``kubeadm``, 原因是在CKA的考试中,kubeadm相关的内容是必考内容之一,包括: 16 | 17 | - Use Kubeadm to install a basic cluster 18 | - Perform a version upgrade on a Kubernetes cluster using Kubeadm 19 | 20 | 21 | .. note:: 22 | 23 | 有些朋友可能听说过 https://labs.play-with-k8s.com/ 这个免费的在线k8s环境,个人并不推荐,速度非常慢,而且经常会因为资源问题卡死。 24 | 25 | .. toctree:: 26 | :maxdepth: 2 27 | :caption: Contents: 28 | 29 | k8s-install/minikube 30 | k8s-install/kubeadm 31 | k8s-install/kubeadm-cn 32 | k8s-install/verify 33 | k8s-install/vagrant 34 | -------------------------------------------------------------------------------- /source/k8s-install/mac-arm.rst: -------------------------------------------------------------------------------- 1 | Mac ARM 2 | ============ 3 | 4 | 5 | 推荐大家使用lima来创建管理虚拟机,参考视频如下 6 | 7 | https://www.youtube.com/watch?v=aV4l85XHFGA 8 | 9 | 10 | 11 | 创建三台VM 12 | ------------ 13 | 14 | .. code-block:: sh 15 | 16 | limactl start --name=k8s-master template://ubuntu 17 | limactl start --name=k8s-worker1 template://ubuntu 18 | limactl start --name=k8s-worker2 template://ubuntu 19 | 20 | 查看VM 21 | ---------- 22 | 23 | .. code-block:: sh 24 | 25 | limactl list 26 | 27 | .. code-block:: sh 28 | 29 | NAME STATUS SSH ARCH CPUS MEMORY DISK DIR 30 | k8s-master Running 127.0.0.1:64931 aarch64 4 4GiB 100GiB /Users/XS69ND/.lima/k8s-master 31 | k8s-worker1 Running 127.0.0.1:64945 aarch64 4 4GiB 100GiB /Users/XS69ND/.lima/k8s-worker1 32 | k8s-worker2 Running 127.0.0.1:64965 aarch64 4 4GiB 100GiB /Users/XS69ND/.lima/k8s-worker2 33 | 34 | k8s-master install software 35 | ---------------------------------- 36 | 37 | 38 | .. code-block:: bash 39 | 40 | #!/bin/bash 41 | 42 | echo "[TASK 1] Disable and turn off SWAP" 43 | sed -i '/swap/d' /etc/fstab 44 | swapoff -a 45 | 46 | echo "[TASK 2] Stop and Disable firewall" 47 | systemctl disable --now ufw >/dev/null 2>&1 48 | 49 | echo "[TASK 3] Enable and Load Kernel modules" 50 | cat >>/etc/modules-load.d/containerd.conf<>/etc/sysctl.d/kubernetes.conf</dev/null 2>&1 64 | 65 | echo "[TASK 5] Install containerd runtime" 66 | mkdir -p /etc/apt/keyrings 67 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg 68 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ 69 | $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 70 | apt -qq update >/dev/null 2>&1 71 | apt install -qq -y containerd.io >/dev/null 2>&1 72 | containerd config default >/etc/containerd/config.toml 73 | systemctl restart containerd 74 | systemctl enable containerd >/dev/null 2>&1 75 | 76 | echo "[TASK 6] Add apt repo for kubernetes" 77 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - >/dev/null 2>&1 78 | apt-add-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main" >/dev/null 2>&1 79 | 80 | echo "[TASK 7] Install Kubernetes components (kubeadm, kubelet and kubectl)" 81 | apt install -qq -y kubeadm=1.28.0-00 kubelet=1.28.0-00 kubectl=1.28.0-00 >/dev/null 2>&1 82 | 83 | 84 | -------------------------------------------------------------------------------- /source/k8s-install/minikube.rst: -------------------------------------------------------------------------------- 1 | Minikube 2 | =========== 3 | 4 | Minikube是一个可以快速搭建Kubernetes的工具 5 | 6 | Minukube 文档 https://minikube.sigs.k8s.io/docs/start/ 7 | 8 | 下载: https://github.com/kubernetes/minikube/releases 9 | 10 | Pre-Requirements 11 | -------------------- 12 | 13 | Minukube搭建k8s环境是通过创建虚拟机或者Docker容器的方式实现的: 14 | 15 | - 虚拟机,就是创建启动几个虚拟机,然后安装k8s 16 | - 通过docker创建几个容器,然后把k8s安装和启动在这些容器里 17 | 18 | 这些不同的方式是通过不同的Driver实现的,具体Minukube所支持的Driver可以通过这个link查看 19 | 20 | https://minikube.sigs.k8s.io/docs/drivers/ 21 | 22 | 23 | 对于Windows和Mac的机器,建议使用虚拟化的方式进行(也就是说要保证你的机器可以成功的创建和启动虚拟机),要满足以下条件: 24 | 25 | - BIOS开启虚拟化 26 | - 要有安装一个虚拟化的 ``hypervisor``: 27 | - VirtualBox 28 | - VMware workstation/ VMware Fusion 29 | - 或者其它 30 | 31 | 32 | 33 | Install Steps for Windows 34 | ---------------------------- 35 | 36 | 打开powershell,运行 37 | 38 | .. code-block:: powershell 39 | 40 | New-Item -Path 'c:\' -Name 'minikube' -ItemType Directory -Force 41 | Invoke-WebRequest -OutFile 'c:\minikube\minikube.exe' -Uri 'https://github.com/kubernetes/minikube/releases/latest/download/minikube-windows-amd64.exe' -UseBasicParsing 42 | 43 | 为了能直接在命令行中使用minikube命令,而不是每次都输入 c:\minikube\minikube.exe,有两种方式: 44 | 45 | 添加环境变量 46 | ~~~~~~~~~~~~~ 47 | 48 | 添加环境变量,以管理员身份打开powershell运行 49 | 50 | .. code-block:: powershell 51 | 52 | $oldPath = [Environment]::GetEnvironmentVariable('Path', [EnvironmentVariableTarget]::Machine) 53 | if ($oldPath.Split(';') -inotcontains 'C:\minikube'){ ` 54 | [Environment]::SetEnvironmentVariable('Path', $('{0};C:\minikube' -f $oldPath), [EnvironmentVariableTarget]::Machine) ` 55 | } 56 | 57 | 添加Alias 58 | ~~~~~~~~~~~ 59 | 60 | 如果不想修改环境变量,也可以通过设置Alias实现,非管理员打开powershell 61 | 62 | 运行 63 | 64 | .. code-block:: powershell 65 | 66 | notepad.exe $PROFILE 67 | 68 | 这个会用notepad打开一个powershell的profile文件(如果是第一次运行,那么会提示你要新建一个文件,点击确定) 69 | 70 | 71 | 把以下内容贴到文件中,保存关闭文件,然后重启打开powershell 72 | 73 | .. code-block:: powershell 74 | 75 | Set-Alias -Name minikube -Value C:\minikube\minikube.exe 76 | 77 | 就可以直接在命令行中使用minikube命令了 78 | 79 | .. code-block:: powershell 80 | 81 | PS C:\Users\Peng Xiao> minikube 82 | minikube provisions and manages local Kubernetes clusters optimized for development workflows. 83 | 84 | Basic Commands: 85 | start Starts a local Kubernetes cluster 86 | status Gets the status of a local Kubernetes cluster 87 | stop Stops a running local Kubernetes cluster 88 | delete Deletes a local Kubernetes cluster 89 | dashboard Access the Kubernetes dashboard running within the minikube cluster 90 | pause pause Kubernetes 91 | unpause unpause Kubernetes 92 | 93 | Images Commands: 94 | docker-env Configure environment to use minikube's Docker daemon 95 | podman-env Configure environment to use minikube's Podman service 96 | cache Add, delete, or push a local image into minikube 97 | image Manage images 98 | 99 | Configuration and Management Commands: 100 | addons Enable or disable a minikube addon 101 | config Modify persistent configuration values 102 | profile Get or list the current profiles (clusters) 103 | update-context Update kubeconfig in case of an IP or port change 104 | 105 | Networking and Connectivity Commands: 106 | service Returns a URL to connect to a service 107 | tunnel Connect to LoadBalancer services 108 | 109 | Advanced Commands: 110 | mount Mounts the specified directory into minikube 111 | ssh Log into the minikube environment (for debugging) 112 | kubectl Run a kubectl binary matching the cluster version 113 | node Add, remove, or list additional nodes 114 | cp Copy the specified file into minikube 115 | 116 | Troubleshooting Commands: 117 | ssh-key Retrieve the ssh identity key path of the specified node 118 | ssh-host Retrieve the ssh host key of the specified node 119 | ip Retrieves the IP address of the specified node 120 | logs Returns logs to debug a local Kubernetes cluster 121 | update-check Print current and latest version number 122 | version Print the version of minikube 123 | options Show a list of global command-line options (applies to all commands). 124 | 125 | Other Commands: 126 | completion Generate command completion for a shell 127 | 128 | Use "minikube --help" for more information about a given command. 129 | PS C:\Users\Peng Xiao> 130 | 131 | minikube start 132 | ~~~~~~~~~~~~~~~~ 133 | 134 | 135 | 以VirtualBox驱动和 v1.24.0版本的Kubernetes为例 136 | 137 | .. code-block:: powershell 138 | 139 | minikube start --driver=virtualbox --kubernetes-version=v1.24.0 140 | 141 | 142 | kubectl 143 | ~~~~~~~~~~ 144 | 145 | 可以通过minikube来运行kubectl 146 | 147 | .. code-block:: powershell 148 | 149 | minikube kubectl -- 150 | 151 | 为了方便,也可以把下面的alias加到powershell的 PROFILE里 152 | 153 | 154 | .. code-block:: powershell 155 | 156 | function kubectl { minikube kubectl -- $args } 157 | doskey kubectl=minikube kubectl $* 158 | 159 | 160 | Install Steps for MacOS 161 | ---------------------------- 162 | 163 | 164 | x86芯片 165 | ~~~~~~~~~~~~~ 166 | 167 | .. code-block:: bash 168 | 169 | curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64 170 | sudo install minikube-darwin-amd64 /usr/local/bin/minikube 171 | 172 | M1 ARM芯片 173 | ~~~~~~~~~~~~~~ 174 | 175 | .. code-block:: bash 176 | 177 | curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-arm64 178 | sudo install minikube-darwin-arm64 /usr/local/bin/minikube 179 | 180 | 181 | 启动 182 | ~~~~~~ 183 | 184 | 如果是x86芯片,可以使用VirtualBox 185 | 186 | 187 | .. code-block:: bash 188 | 189 | minikube start --driver=virtualbox --kubernetes-version=v1.24.0 190 | 191 | 192 | 如果是ARM芯片,可以使用docker 193 | 194 | 195 | .. code-block:: bash 196 | 197 | minikube start --driver=docker --alsologtostderr --kubernetes-version=v1.24.0 198 | 199 | 200 | 设置Alias 201 | 202 | .. code-block:: bash 203 | 204 | alias kubectl="minikube kubectl --" 205 | -------------------------------------------------------------------------------- /source/k8s-install/vagrant.rst: -------------------------------------------------------------------------------- 1 | Vagrant Setup 2 | ================ 3 | 4 | 5 | Vagrant入门系列 6 | 7 | YouTube: https://www.youtube.com/playlist?list=PLfQqWeOCIH4B6YAEXMr6cx4AfnKNBLbZO 8 | 9 | 10 | 环境快速搭建 11 | -------------- 12 | 13 | .. code-block:: bash 14 | 15 | $ git clone https://github.com/xiaopeng163/learn-k8s-from-scratch 16 | $ cd learn-k8s-from-scratch/lab/kubeadm-3-nodes 17 | $ vagrant up 18 | -------------------------------------------------------------------------------- /source/k8s-install/verify.rst: -------------------------------------------------------------------------------- 1 | kubeadm集群验证 2 | ================= 3 | 4 | 三节点环境搭建完成后,可以通过下面的方式快速验证一下环境是否搭建成功。 5 | 6 | .. code-block:: bash 7 | 8 | kubectl get nodes -o wide 9 | 10 | .. code-block:: bash 11 | 12 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME 13 | k8s-master Ready control-plane 38m v1.29.2 10.211.55.4 Ubuntu 22.04.2 LTS 5.15.0-76-generic containerd://1.6.28 14 | k8s-worker1 Ready 21m v1.29.2 10.211.55.5 Ubuntu 22.04.2 LTS 5.15.0-97-generic containerd://1.6.28 15 | k8s-worker2 Ready 21m v1.29.2 10.211.55.6 Ubuntu 22.04.2 LTS 5.15.0-76-generic containerd://1.6.28 16 | 17 | 18 | 创建pod 19 | --------- 20 | 21 | 创建一个nginx的pod,pod能成功过running 22 | 23 | .. code-block:: bash 24 | 25 | kubectl run web --image nginx 26 | 27 | .. code-block:: bash 28 | 29 | pod/web created 30 | 31 | .. code-block:: bash 32 | 33 | kubectl get pods -o wide 34 | 35 | .. code-block:: bash 36 | 37 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 38 | web 1/1 Running 0 63s 10.244.1.2 k8s-worker1 39 | 40 | 创建service 41 | ------------- 42 | 43 | 给nginx pod创建一个service, 通过curl能访问这个service的cluster ip地址。 44 | 45 | .. code-block:: bash 46 | 47 | kubectl expose pod web --port=80 --name=web-service 48 | 49 | .. code-block:: bash 50 | 51 | service/web-service exposed 52 | 53 | .. code-block:: bash 54 | 55 | kubectl get service 56 | 57 | .. code-block:: bash 58 | 59 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 60 | kubernetes ClusterIP 10.96.0.1 443/TCP 3h53m 61 | web-service ClusterIP 10.98.102.238 80/TCP 4s 62 | 63 | .. code-block:: bash 64 | 65 | curl 10.98.102.238 66 | 67 | 68 | 69 | Welcome to nginx! 70 | 75 | 76 | 77 |

Welcome to nginx!

78 |

If you see this page, the nginx web server is successfully installed and 79 | working. Further configuration is required.

80 | 81 |

For online documentation and support please refer to 82 | nginx.org.
83 | Commercial support is available at 84 | nginx.com.

85 | 86 |

Thank you for using nginx.

87 | 88 | 89 | vagrant@k8s-master:~$ 90 | 91 | 92 | 环境清理 93 | ----------- 94 | 95 | .. code-block:: bash 96 | 97 | $ kubectl delete service web-service 98 | $ kubectl delete pod web 99 | -------------------------------------------------------------------------------- /source/logging-monitoring.rst: -------------------------------------------------------------------------------- 1 | Logging and Monitoring 2 | ========================= 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | :caption: Contents: 7 | 8 | logging/logging 9 | logging/event 10 | logging/jsonpath 11 | logging/monitoring -------------------------------------------------------------------------------- /source/logging/event.rst: -------------------------------------------------------------------------------- 1 | Events 2 | ========== 3 | 4 | Event global 5 | --------------- 6 | 7 | .. code-block:: bash 8 | 9 | kubectl get events 10 | kubectl get events --field-selector type=Warning,reason=Failed 11 | kubectl get events --watch & ( run `fg` and ctrl +c to break it) 12 | 13 | Event per resource 14 | ---------------------- 15 | 16 | Use kubectl describe. 17 | 18 | .. code-block:: bash 19 | 20 | kubectl describe pods nginx 21 | 22 | -------------------------------------------------------------------------------- /source/logging/jsonpath.rst: -------------------------------------------------------------------------------- 1 | JsonPath 2 | ============ 3 | 4 | some examples 5 | 6 | .. code-block:: bash 7 | 8 | # get all pod names 9 | $ kubectl get pods -o jsonpath='{.items[*].metadata.name}' 10 | 11 | # get all container image name in use by all pods in all namespaces 12 | $ kubectl get pods --all-namespaces -o jsonpath='{.items[*].spec.containers[*].image}' 13 | 14 | # add a new line to the result 15 | $ kubectl get pods --all-namespaces -o jsonpath='{.items[*].spec.containers[*].image}{"\n"}' 16 | 17 | # ?() define filter 18 | # @ - the current object 19 | $ kubectl get nodes -o jsonpath="{.items[*].status.addresses[?(@.type=='InternalIP')].address}" 20 | 21 | # sorting 22 | $ kubectl get pods -A -o jsonpath='{.items[*].metadata.name}{"\n"}' --sort-by=.metadata.name 23 | 24 | -------------------------------------------------------------------------------- /source/logging/logging.rst: -------------------------------------------------------------------------------- 1 | Logging 2 | =========== 3 | 4 | 参考 https://kubernetes.io/docs/concepts/cluster-administration/logging/ 5 | 6 | Accessing Log Data - Pods and Containers 7 | -------------------------------------------- 8 | 9 | kubectl logs -> API Server -> kubelet -> container logs 10 | 11 | .. code-block:: bash 12 | 13 | $ kubectl logs $POD_NAME 14 | $ kubectl logs $POD_NAME -c $CONTAINER_NAME 15 | $ kubectl logs $POD_NAME --all-containers 16 | $ kubectl logs --selector app=demo 17 | $ kubectl logs -f $POD_NAME # follow latest logs 18 | $ kubectl logs $POD_NAME --tail 5 # get last 5 entries logs 19 | 20 | 如果api server不可用,则需要手动去container所在节点查看container Log 21 | 22 | .. code-block:: bash 23 | 24 | crictl --runtime-endpoint unix:///run/containerd/containerd.sock logs $CONTAINER_ID 25 | 26 | 或者 27 | 28 | .. code-block:: bash 29 | 30 | tail /var/log/containers/$CONTAINER_NAME_$CONTAINER_ID 31 | 32 | Accessing Log Data - Nodes 33 | ------------------------------ 34 | 35 | Node上有两个关键的组件,一个是``kubelet``, 一个是 ``kube-proxy`` 36 | 37 | .. code-block:: bash 38 | 39 | systemctl status kubelet.service # check service status 40 | 41 | journalctl -u kubelet.service 42 | journalctl -u kubelet.service | grep -i ERROR 43 | journalctl -u kubelet.service --since today --no-pager 44 | 45 | Accessing Log Data - Control Plane 46 | --------------------------------------- 47 | 48 | Run as Pods 49 | 50 | .. code-block:: bash 51 | 52 | $ kubectl logs -n kube-system $POD_NAME 53 | 54 | 55 | Run from systemd 56 | 57 | .. code-block:: bash 58 | 59 | systemctl status kubelet.service # check service status 60 | 61 | journalctl -u kubelet.service 62 | 63 | centralized logging solutions 64 | ---------------------------------- 65 | 66 | .. image:: ../_static/logging/fluentd.png 67 | :alt: fluentd 68 | -------------------------------------------------------------------------------- /source/logging/monitoring.rst: -------------------------------------------------------------------------------- 1 | Monitoring 2 | =================== 3 | 4 | - Observe 5 | - Measure Changes 6 | - Resource Limits 7 | 8 | Kubernetes Metrics Server 9 | ----------------------------- 10 | 11 | https://kubernetes-sigs.github.io/metrics-server/ 12 | 13 | collects resource metris from kubelets for resources like Pods, Nodes. 14 | 15 | Metric like CPU, Memory 16 | 17 | .. code-block:: bash 18 | 19 | $ kubectl top pods 20 | $ kubectl top nodes 21 | 22 | 23 | Deploy Metrics Server 24 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 25 | 26 | -------------------------------------------------------------------------------- /source/maintaining.rst: -------------------------------------------------------------------------------- 1 | Maintaining Kubernetes Clusters 2 | ================================== 3 | 4 | 5 | 6 | .. toctree:: 7 | :maxdepth: 2 8 | :caption: Contents: 9 | 10 | maintaining/etcd 11 | maintaining/upgrade 12 | maintaining/HA 13 | maintaining/cert-renew 14 | -------------------------------------------------------------------------------- /source/maintaining/HA.rst: -------------------------------------------------------------------------------- 1 | Highly Available Clusters 2 | ============================= 3 | 4 | 5 | 更多信息: https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/ 6 | 7 | Option1 8 | ------------ 9 | 10 | .. image:: ../_static/maintaining/k8s-ha1.jpg 11 | :alt: ha1 12 | 13 | 14 | Option2 15 | ------------- 16 | 17 | .. image:: ../_static/maintaining/k8s-ha2.jpg 18 | :alt: ha2 19 | -------------------------------------------------------------------------------- /source/maintaining/cert-renew.rst: -------------------------------------------------------------------------------- 1 | Cluster Cert Renew 2 | ==================== 3 | 4 | 5 | k8s集群内部通信证书的有效期为1年,到期后需要更新证书。如果你遇到了以下问题,那么你可能需要更新证书: 6 | 7 | 8 | x509: certificate has expired or is not yet valid 9 | 10 | 11 | 本文档将指导您如何更新k8s集群内部通信证书。 12 | 13 | 14 | 查看证书有效期 15 | ------------------ 16 | 17 | .. code-block:: bash 18 | 19 | $ sudo kubeadm certs check-expiration 20 | [check-expiration] Reading configuration from the cluster... 21 | [check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml' 22 | 23 | CERTIFICATE EXPIRES RESIDUAL TIME CERTIFICATE AUTHORITY EXTERNALLY MANAGED 24 | admin.conf Mar 04, 2025 15:35 UTC 341d ca no 25 | apiserver Mar 04, 2025 15:35 UTC 341d ca no 26 | apiserver-etcd-client Mar 04, 2025 15:35 UTC 341d etcd-ca no 27 | apiserver-kubelet-client Mar 04, 2025 15:35 UTC 341d ca no 28 | controller-manager.conf Mar 04, 2025 15:35 UTC 341d ca no 29 | etcd-healthcheck-client Mar 04, 2025 15:35 UTC 341d etcd-ca no 30 | etcd-peer Mar 04, 2025 15:35 UTC 341d etcd-ca no 31 | etcd-server Mar 04, 2025 15:35 UTC 341d etcd-ca no 32 | front-proxy-client Mar 04, 2025 15:35 UTC 341d front-proxy-ca no 33 | scheduler.conf Mar 04, 2025 15:35 UTC 341d ca no 34 | super-admin.conf Mar 04, 2025 15:35 UTC 341d ca no 35 | 36 | CERTIFICATE AUTHORITY EXPIRES RESIDUAL TIME EXTERNALLY MANAGED 37 | ca Mar 02, 2034 15:35 UTC 9y no 38 | etcd-ca Mar 02, 2034 15:35 UTC 9y no 39 | front-proxy-ca Mar 02, 2034 15:35 UTC 9y no 40 | 41 | 42 | 更新证书 43 | ----------- 44 | 45 | 更新之前最好备份一下现有证书,以及etcd snapshot,以防万一。 46 | 47 | 把这个目录的文件全部备份一下 `/etc/kubernetes/pki/` 48 | 49 | .. code-block:: bash 50 | 51 | $ sudo kubeadm certs renew all 52 | [renew] Reading configuration from the cluster... 53 | [renew] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml' 54 | 55 | certificate embedded in the kubeconfig file for the admin to use and for kubeadm itself renewed 56 | certificate for serving the Kubernetes API renewed 57 | certificate the apiserver uses to access etcd renewed 58 | certificate for the API server to connect to kubelet renewed 59 | certificate embedded in the kubeconfig file for the controller manager to use renewed 60 | certificate for liveness probes to healthcheck etcd renewed 61 | certificate for etcd nodes to communicate with each other renewed 62 | certificate for serving etcd renewed 63 | certificate for the front proxy client renewed 64 | certificate embedded in the kubeconfig file for the scheduler manager to use renewed 65 | certificate embedded in the kubeconfig file for the super-admin renewed 66 | 67 | Done renewing certificates. You must restart the kube-apiserver, kube-controller-manager, kube-scheduler and etcd, so that they can use the new certificates. 68 | 69 | 执行完之后,需要重启kube-apiserver, kube-controller-manager, kube-scheduler和etcd。 70 | 71 | 暴力方法就是直接重启集群的所有节点。比较温和的方法是按照我们前面讲的集群upgrade的方法,依次重启集群的所有节点,重启节点之前需进行节点的 `drain` 操作。 -------------------------------------------------------------------------------- /source/maintaining/etcd.rst: -------------------------------------------------------------------------------- 1 | etcd backup and restore operations 2 | ===================================== 3 | 4 | 5 | etcd 6 | ------- 7 | 8 | - key vaule datastore 9 | - stores cluster state data and objects 10 | 11 | 12 | to protect data: 13 | 14 | - backup and restore 15 | - HA 16 | 17 | etcdctl install 18 | ------------------- 19 | 20 | 获取到当前的etcd版本。例如当前实验环境,etcd version是 3.5.3 21 | 22 | .. code-block:: bash 23 | 24 | $ kubectl get pods -A | grep etcd 25 | kube-system etcd-k8s-master 1/1 Running 21 (26m ago) 47d 26 | $ kubectl exec -it --namespace kube-system etcd-k8s-master -- sh 27 | sh-5.1# 28 | sh-5.1# etcd --version 29 | etcd Version: 3.5.3 30 | Git SHA: 0452feec7 31 | Go Version: go1.16.15 32 | Go OS/Arch: linux/amd64 33 | sh-5.1# exit 34 | exit 35 | $ 36 | 37 | GitHub下载对应版本的etcd 38 | 39 | .. code-block:: bash 40 | 41 | export RELEASE=3.5.3 42 | wget https://github.com/etcd-io/etcd/releases/download/v${RELEASE}/etcd-v${RELEASE}-linux-amd64.tar.gz 43 | tar -zxvf etcd-v${RELEASE}-linux-amd64.tar.gz 44 | cd etcd-v${RELEASE}-linux-amd64 45 | sudo cp etcdctl /usr/local/bin 46 | 47 | 检查版本 48 | 49 | .. code-block:: bash 50 | 51 | $ etcdctl version 52 | etcdctl version: 3.5.3 53 | API version: 3.5 54 | 55 | ETCD 文档 https://etcd.io/docs/ 56 | 57 | 58 | Backing up etcd 59 | ------------------- 60 | 61 | etcd数据的备份可以通过``etcdctl``命令行创建快照snashot进行, 备份产生的数据应该尽快复制到集群外一个安全的地方保存。 62 | 63 | 对于kubeadm搭建的集群, etcd是运行在一个pod里, 数据存储在 ``/var/lib/etcd``, 这个目录通过 ``hostPath`` mount 64 | 到了master节点上。 65 | 66 | 67 | .. code-block:: bash 68 | 69 | $ sudo apt-get install jq 70 | $ kubectl get pod --namespace kube-system etcd-k8s-master -o jsonpath='{.spec.containers[0].volumeMounts}' | jq 71 | [ 72 | { 73 | "mountPath": "/var/lib/etcd", 74 | "name": "etcd-data" 75 | }, 76 | { 77 | "mountPath": "/etc/kubernetes/pki/etcd", 78 | "name": "etcd-certs" 79 | } 80 | ] 81 | $ sudo tree /var/lib/etcd/ 82 | /var/lib/etcd/ 83 | └── member 84 | ├── snap 85 | │   ├── 0000000000000016-00000000001bc619.snap 86 | │   ├── 0000000000000016-00000000001bed2a.snap 87 | │   ├── 0000000000000016-00000000001c143b.snap 88 | │   ├── 0000000000000016-00000000001c3b4c.snap 89 | │   ├── 0000000000000016-00000000001c625d.snap 90 | │   └── db 91 | └── wal 92 | ├── 0.tmp 93 | ├── 000000000000000f-000000000015c31b.wal 94 | ├── 0000000000000010-0000000000173561.wal 95 | ├── 0000000000000011-000000000018a661.wal 96 | ├── 0000000000000012-00000000001a1b93.wal 97 | └── 0000000000000013-00000000001b8e6c.wal 98 | 99 | 3 directories, 12 files 100 | 101 | 102 | .. code-block:: bash 103 | 104 | $ sudo ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 \ 105 | --cacert=/etc/kubernetes/pki/etcd/ca.crt \ 106 | --cert=/etc/kubernetes/pki/etcd/server.crt \ 107 | --key=/etc/kubernetes/pki/etcd/server.key \ 108 | snapshot save /var/lib/dat-backup.db 109 | 110 | # 验证 111 | $ sudo ETCDCTL_API=3 etcdctl --write-out=table \ 112 | snapshot status /var/lib/dat-backup.db 113 | 114 | 115 | Restoring etcd with etctl 116 | ----------------------------- 117 | 118 | 119 | .. code-block:: bash 120 | 121 | $ sudo ETCDCTL_API=3 etcdctl snapshot restore /var/lib/dat-backup.db 122 | 123 | # 备份一下恢复之前的数据, 以防止恢复失败 124 | $ mv /var/lib/etcd /var/lib/etcd.OLD 125 | 126 | # 复制恢复数据 127 | $ sudo mv ./default.etcd /var/lib/etcd 128 | 129 | # 停止etcd容器 130 | # 找到容器ID 131 | sudo crictl --runtime-endpoint unix:///run/containerd/containerd.sock ps 132 | # stop 133 | sudo crictl --runtime-endpoint unix:///run/containerd/containerd.sock stop 134 | 135 | -------------------------------------------------------------------------------- /source/maintaining/upgrade.rst: -------------------------------------------------------------------------------- 1 | Upgrading an existing Cluster 2 | ============================== 3 | 4 | upgrade kubeadm-based Cluster 5 | ---------------------------------- 6 | 7 | - 只能小版本更新 8 | 9 | - 1.21 -> 1.22 |:white_check_mark:| 10 | - 1.21 -> 1.23 |:x:| 11 | 12 | - 一定要阅读changelog https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG 13 | 14 | 检查当前版本: 15 | 16 | - kubectl version --short 17 | - kubectl get nodes 18 | - kubeadm version 19 | 20 | Upgrade Control Plane 21 | -------------------------- 22 | 23 | - update kubeadm package 24 | - Drain the control plane/master node 25 | - kubeadm upgrade plan 26 | - kubeadm upgrade apply 27 | - Uncordon the control plan/master node 28 | - update kubelet and kubectl 29 | 30 | 以Ubuntu为例 31 | 32 | .. code-block:: bash 33 | 34 | # update kubeadm 35 | sudo apt-mark unhold kubeadm 36 | sudo apt-get update 37 | sudo apt-cache policy kubeadm 38 | sudo apt-get install -y kubeadm=$TARGET_VERSION 39 | sudo apt-mark hold kubeadm 40 | 41 | # drain master node 42 | kubectl drain k8s-master --ignore-daemonsets 43 | 44 | sudo kubeadm upgrade plan 45 | sudo kubeadm upgrade apply v$TARGET_VERSION 46 | 47 | # uncordon 48 | kubectl uncordon k8s-master 49 | 50 | # update kubelet and kubectl 51 | sudo apt-mark unhold kubelet kubectl 52 | sudo apt-get update 53 | sudo apt-get install -y kubelet=$TARGET_VERSION kubectl=$TARGET_VERSION 54 | sudo apt-mark hold kubelet kubectl 55 | 56 | 57 | 58 | Upgrade work node 59 | -------------------------- 60 | 61 | 以Ubuntu为例 62 | 63 | .. code-block:: bash 64 | 65 | # go to master node 66 | kubectl drain k8s-worker1 --ingore-daemonsets 67 | 68 | # update kubeadm 69 | sudo apt-mark unhold kubeadm 70 | sudo apt-get update 71 | sudo apt-get install -y kubeadm=$TARGET_VERSION 72 | sudo apt-mark hold kubeadm 73 | 74 | sudo kubeadm upgrade node 75 | 76 | # update kubelet and kubectl 77 | sudo apt-mark unhold kubelet 78 | sudo apt-get update 79 | sudo apt-get install -y kubelet=$TARGET_VERSION 80 | sudo apt-mark hold kubelet 81 | 82 | # go to master node, uncordon this node 83 | kubectl uncordon k8s-worker1 84 | 85 | 86 | Worker Node Maintenance 87 | ---------------------------- 88 | 89 | os update and hardware upgrade 90 | 91 | Drain/Cordon the Node. -------------------------------------------------------------------------------- /source/network.rst: -------------------------------------------------------------------------------- 1 | Networking 2 | ============ 3 | 4 | https://kubernetes.io/docs/concepts/cluster-administration/networking/ 5 | 6 | https://matthewpalmer.net/kubernetes-app-developer/articles/kubernetes-networking-guide-beginners.html 7 | 8 | - container-to-container communications 9 | - Pod-to-Pod communications 10 | - Pod-to-Service communications 11 | - External-to-Service communications 12 | 13 | .. image:: _static/network/networking-overview.png 14 | :width: 800 15 | :alt: k8s-network 16 | 17 | 18 | Kubernetes Networking Model 19 | 20 | - All pods can communicate with each other on all Nodes 21 | - Agents on a Node can communicate with all pods on that Node 22 | - No Network Address Translation(NAT) 23 | 24 | Kubernetes Network Topology 25 | 26 | - Node Network 27 | - Pod Network 28 | - Cluster Network (used by Services) 29 | 30 | 31 | .. toctree:: 32 | :maxdepth: 2 33 | :caption: Contents: 34 | 35 | network/pod-network 36 | network/cluster-dns 37 | network/service 38 | network/service-auto 39 | network/ingress 40 | network/resource 41 | -------------------------------------------------------------------------------- /source/network/cluster-dns.rst: -------------------------------------------------------------------------------- 1 | Cluster DNS 2 | ============= 3 | 4 | https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ 5 | 6 | DNS is available as a Service in a Cluster, Pods are configured to use this DNS. 7 | 8 | 9 | Cluster DNS configuration 10 | ------------------------------- 11 | 12 | 13 | - 1 deployment (1 replicaset) with 2 pods running on master node. 14 | 15 | - 1 kube-dns service 16 | 17 | .. code-block:: bash 18 | 19 | vagrant@k8s-master:~$ kubectl get all -A -o wide | grep dns 20 | kube-system pod/coredns-6d4b75cb6d-26qqw 1/1 Running 15 (4d23h ago) 34d 10.244.0.36 k8s-master 21 | kube-system pod/coredns-6d4b75cb6d-lhkng 1/1 Running 15 (4d23h ago) 34d 10.244.0.37 k8s-master 22 | kube-system service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 34d k8s-app=kube-dns 23 | kube-system deployment.apps/coredns 2/2 2 2 34d coredns k8s.gcr.io/coredns/coredns:v1.8.6 k8s-app=kube-dns 24 | kube-system replicaset.apps/coredns-6d4b75cb6d 2 2 2 34d coredns k8s.gcr.io/coredns/coredns:v1.8.6 k8s-app=kube-dns,pod-template-hash=6d4b75cb6d 25 | vagrant@k8s-master:~$ 26 | 27 | 28 | .. code-block:: bash 29 | 30 | $ kubectl describe configmaps coredns --namespace kube-system 31 | Name: coredns 32 | Namespace: kube-system 33 | Labels: 34 | Annotations: 35 | 36 | Data 37 | ==== 38 | Corefile: 39 | ---- 40 | .:53 { 41 | errors 42 | health { 43 | lameduck 5s 44 | } 45 | ready 46 | kubernetes cluster.local in-addr.arpa ip6.arpa { 47 | pods insecure 48 | fallthrough in-addr.arpa ip6.arpa 49 | ttl 30 50 | } 51 | prometheus :9153 52 | forward . /etc/resolv.conf { 53 | max_concurrent 1000 54 | } 55 | cache 30 56 | loop 57 | reload 58 | loadbalance 59 | } 60 | 61 | 62 | BinaryData 63 | ==== 64 | 65 | Events: 66 | 67 | 68 | Pod DNS settings 69 | --------------------- 70 | 71 | 72 | .. code-block:: bash 73 | 74 | $ kubectl run client --image=xiaopeng163/net-box --command -- sh -c "sleep 100000" 75 | $ kubectl exec -it client -- cat /etc/resolv.conf 76 | search default.svc.cluster.local svc.cluster.local cluster.local kpn 77 | nameserver 10.96.0.10 78 | options ndots:5 79 | 80 | 81 | Pod A/AAAA records 82 | 83 | In general a Pod has the following DNS resolution: 84 | 85 | ``pod-ip-address.my-namespace.pod.cluster-domain.example`` 86 | 87 | 88 | .. code-block:: bash 89 | 90 | $ nslookup 10-244-1-194.default.pod.cluster.local 10.96.0.10 91 | Server: 10.96.0.10 92 | Address: 10.96.0.10#53 93 | 94 | Name: 10-244-1-194.default.pod.cluster.local 95 | Address: 10.244.1.194 96 | -------------------------------------------------------------------------------- /source/network/ingress.rst: -------------------------------------------------------------------------------- 1 | Ingress 2 | ========= 3 | 4 | 5 | What is Ingress? 6 | -------------------- 7 | 8 | 参考 https://kubernetes.io/docs/concepts/services-networking/ingress/ 9 | 10 | .. image:: ../_static/network/ingress-overview.svg 11 | :alt: ingress-overview 12 | 13 | .. image:: ../_static/network/kubernetes-ingress.png 14 | :alt: ingress-k8s 15 | 16 | 17 | 18 | 简单说,就是接收请求,并根据一定的路由规则,把请求转发到相应的Service上去。 19 | 20 | 两个要求: 21 | 22 | - Ingress Resource,就是一系列的路由规则 routing rules 23 | - Ingress Controller, 控制实现这些路由规则。(https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) 24 | 25 | Ingress Controller 26 | --------------------- 27 | 28 | Nginx Ingress Controller: https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/index.md 29 | 30 | 创建 Nginx ingress Controller 31 | 32 | .. code-block:: bash 33 | 34 | $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.2.1/deploy/static/provider/cloud/deploy.yaml 35 | 36 | 37 | .. note:: 38 | 39 | ``ingress-nginx`` 版本的选择要依据kubernetes的版本, 请参考表格https://github.com/kubernetes/ingress-nginx#support-versions-table 40 | 例如 k8s ``v1.24.x`` 应该选择ingress-nginx ``v1.3.0`` ,但是在课程录制的时候 ``v1.3.0`` 并没有发布,所以课程演示中我们使用的是 ``v1.2.1`` 41 | 42 | Type of Ingress 43 | -------------------------- 44 | 45 | Exposing a Single Service with Ingress 46 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 47 | 48 | Create a web server deployment and service 49 | 50 | .. code-block:: bash 51 | 52 | $ kubectl create deployment demo --image=httpd --port=80 53 | $ kubectl expose deployment demo 54 | 55 | Create ingress resource: 56 | 57 | .. code-block:: yaml 58 | 59 | apiVersion: networking.k8s.io/v1 60 | kind: Ingress 61 | metadata: 62 | name: demo-localhost 63 | namespace: default 64 | spec: 65 | ingressClassName: nginx 66 | rules: 67 | - host: demo.localdev.me 68 | http: 69 | paths: 70 | - backend: 71 | service: 72 | name: demo 73 | port: 74 | number: 80 75 | path: / 76 | pathType: Prefix 77 | 78 | 找到demo这个service的cluster IP(10.100.162.241),以及 ingress-nginx-controller这个service的ClusterIP(10.101.55.153, 79 | 80 | .. code-block:: bash 81 | 82 | kubectl get svc -A 83 | NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 84 | default demo ClusterIP 10.100.162.241 80/TCP 19m 85 | default kubernetes ClusterIP 10.96.0.1 443/TCP 37d 86 | ingress-nginx ingress-nginx-controller LoadBalancer 10.101.55.153 80:32765/TCP,443:32009/TCP 16m 87 | ingress-nginx ingress-nginx-controller-admission ClusterIP 10.110.135.56 443/TCP 16m 88 | kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 37d 89 | 90 | - 直接访问demo的clusterIP是可以的 91 | 92 | .. code-block:: bash 93 | 94 | $ curl 10.100.162.241 95 |

It works!

96 | 97 | - 但是直接访问ingress-nginx-controller的ClusterIP是不可以的 98 | 99 | .. code-block:: bash 100 | 101 | $ curl 10.101.55.153 102 | 103 | 404 Not Found 104 | 105 |

404 Not Found

106 |
nginx
107 | 108 | 109 | 110 | 需要通过域名访问, 当然前提是要把域名对应的ingress-nginx-controller的ClusterIP放到系统hosts文件里。 111 | 112 | .. code-block:: bash 113 | 114 | $ curl demo.localdev.me 115 |

It works!

116 | $ more /etc/hosts | grep demo 117 | 10.101.55.153 demo.localdev.me 118 | 119 | Exposing Multiple Services with Ingress (Simple fanout ) 120 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 121 | 122 | .. image:: ../_static/network/ingress-fanout.png 123 | :alt: ingress-k8s-fanout 124 | 125 | 创建两个deployment,两个Service 126 | 127 | .. code-block:: bash 128 | 129 | $ kubectl create deployment web1 --image=gcr.io/google-samples/hello-app:1.0 --port=8080 --replicas=2 130 | $ kubectl expose deployment web1 --port 9001 --target-port 8080 131 | $ kubectl create deployment web2 --image=gcr.io/google-samples/hello-app:2.0 --port=8080 --replicas=2 132 | $ kubectl expose deployment web2 --port 9002 --target-port 8080 133 | 134 | $ kubectl get deployments.apps 135 | NAME READY UP-TO-DATE AVAILABLE AGE 136 | web1 2/2 2 2 2m3s 137 | web2 2/2 2 2 111s 138 | $ kubectl get svc 139 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 140 | kubernetes ClusterIP 10.96.0.1 443/TCP 39d 141 | web1 ClusterIP 10.99.37.121 9001/TCP 89s 142 | web2 ClusterIP 10.102.94.47 9002/TCP 81s 143 | 144 | 创建Ingress 145 | 146 | .. code-block:: yaml 147 | 148 | apiVersion: networking.k8s.io/v1 149 | kind: Ingress 150 | metadata: 151 | name: ingress-multiple 152 | spec: 153 | ingressClassName: nginx 154 | rules: 155 | - host: api.example.com 156 | http: 157 | paths: 158 | - path: /v1 159 | pathType: Prefix 160 | backend: 161 | service: 162 | name: web1 163 | port: 164 | number: 9001 165 | - path: /v2 166 | pathType: Prefix 167 | backend: 168 | service: 169 | name: web2 170 | port: 171 | number: 9002 172 | defaultBackend: 173 | service: 174 | name: web1 175 | port: 176 | number: 9001 177 | 178 | .. code-block:: bash 179 | 180 | $ kubectl get ingress 181 | NAME CLASS HOSTS ADDRESS PORTS AGE 182 | ingress-multiple nginx api.example.com 80 23m 183 | $ kubectl describe ingress ingress-multiple 184 | Name: ingress-multiple 185 | Labels: 186 | Namespace: default 187 | Address: 188 | Ingress Class: nginx 189 | Default backend: web1:9001 (10.244.1.212:8080,10.244.2.204:8080) 190 | Rules: 191 | Host Path Backends 192 | ---- ---- -------- 193 | api.example.com 194 | /v1 web1:9001 (10.244.1.212:8080,10.244.2.204:8080) 195 | /v2 web2:9002 (10.244.1.213:8080,10.244.2.205:8080) 196 | Annotations: 197 | Events: 198 | Type Reason Age From Message 199 | ---- ------ ---- ---- ------- 200 | Normal Sync 23m nginx-ingress-controller Scheduled for sync 201 | 202 | 设置hosts文件 203 | 204 | .. code-block:: bash 205 | 206 | $ more /etc/hosts 207 | 208 | 10.104.170.176 api.example.com 209 | 210 | 访问 211 | 212 | .. code-block:: bash 213 | 214 | $ curl api.example.com 215 | Hello, world! 216 | Version: 1.0.0 217 | Hostname: web1-7f6c665f7d-nmg8d 218 | $ curl api.example.com/v1 219 | Hello, world! 220 | Version: 1.0.0 221 | Hostname: web1-7f6c665f7d-472c2 222 | $ curl api.example.com/v2 223 | Hello, world! 224 | Version: 2.0.0 225 | Hostname: web2-8c85c8cd8-xw6f7 226 | $ 227 | 228 | 229 | Name Based Virtual Hosts with Ingress 230 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 231 | 232 | .. image:: ../_static/network/ingress-virtual-host.png 233 | :alt: ingress-k8s-vh 234 | 235 | .. code-block:: yaml 236 | 237 | apiVersion: networking.k8s.io/v1 238 | kind: Ingress 239 | metadata: 240 | name: ingress-multiple 241 | spec: 242 | ingressClassName: nginx 243 | rules: 244 | - host: v1.api.example.com 245 | http: 246 | paths: 247 | - path: / 248 | pathType: Prefix 249 | backend: 250 | service: 251 | name: web1 252 | port: 253 | number: 9001 254 | - host: v2.api.example.com 255 | http: 256 | paths: 257 | - path: / 258 | pathType: Prefix 259 | backend: 260 | service: 261 | name: web2 262 | port: 263 | number: 9002 264 | 265 | 266 | Using TLS certificates for HTTPs Ingress 267 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 268 | 269 | 270 | 生成key 271 | 272 | .. code-block:: bash 273 | 274 | $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=api.example.com" 275 | Generating a RSA private key 276 | ...........+++++ 277 | ................................+++++ 278 | writing new private key to 'tls.key' 279 | ----- 280 | $ ls 281 | tls.crt tls.key 282 | 283 | 284 | Create secret: 285 | 286 | .. code-block:: bash 287 | 288 | $ kubectl create secret tls test-tls --key="tls.key" --cert="tls.crt" 289 | secret/test-tls created 290 | $ kubectl get secrets 291 | NAME TYPE DATA AGE 292 | test-tls kubernetes.io/tls 2 6s 293 | 294 | 295 | ingress-virtual-host.png 296 | 297 | .. code-block:: yaml 298 | 299 | apiVersion: networking.k8s.io/v1 300 | kind: Ingress 301 | metadata: 302 | name: ingress-https 303 | spec: 304 | ingressClassName: nginx 305 | tls: 306 | - hosts: 307 | - api.example.com 308 | secretName: test-tls 309 | rules: 310 | - host: api.example.com 311 | http: 312 | paths: 313 | - path: / 314 | pathType: Prefix 315 | backend: 316 | service: 317 | name: web1 318 | port: 319 | number: 9001 320 | 321 | 322 | 创建ingress。 323 | 324 | 修改hosts文件并测试 325 | 326 | .. code-block:: bash 327 | 328 | $ sudo more /etc/hosts | grep api 329 | 10.104.170.176 api.example.com 330 | $ curl https://api.example.com --insecure 331 | Hello, world! 332 | Version: 1.0.0 333 | Hostname: web1-7f6c665f7d-472c2 334 | -------------------------------------------------------------------------------- /source/network/pod-network.rst: -------------------------------------------------------------------------------- 1 | Pod Networking 2 | ================== 3 | 4 | Basic 5 | --------- 6 | 7 | - Pod share a network namespace 8 | - Containers in a Pod communicate over localhost 9 | 10 | 这一节需要安装的一些包(以Ubuntu为例) 11 | 12 | - ``bridge-utils`` 13 | - ``net-tools`` 14 | 15 | 在集群的所有节点上安装 16 | 17 | .. code-block:: bash 18 | 19 | $ sudo apt install bridge-utils net-tools 20 | 21 | 22 | 23 | .. image:: ../_static/network/pod-network.PNG 24 | :alt: pod-network 25 | 26 | 27 | 28 | Container to Container in Pod 29 | -------------------------------- 30 | 31 | 创建测试Pod 32 | 33 | .. code-block:: bash 34 | 35 | kubectl apply -f https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/master/source/_code/network/container-to-container.yml 36 | 37 | 38 | .. literalinclude:: ../_code/network/container-to-container.yml 39 | :language: yaml 40 | :linenos: 41 | 42 | 获取到pod所在节点和pod的IP 43 | 44 | .. code-block:: bash 45 | 46 | kubectl get pods -o wide 47 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 48 | my-pod 2/2 Running 0 9s 10.244.1.4 k8s-worker1 49 | 50 | 这个pod的两个container共享一个network namespace,所以他们可以通过localhost进行通信,ip地址是10.244.1.4 51 | 52 | 可以在k8s-worker1节点上通过以下命令查看验证 53 | 54 | .. code-block:: bash 55 | 56 | # get the container id 57 | $ sudo ctr --namespace=k8s.io container ls | grep net-box 58 | 4bcad54d44ec02d7a55cc997e435217dff7c6e3ffa7e65b8940817da50d61b33 docker.io/xiaopeng163/net-box:latest io.containerd.runc.v2 59 | c1c0044f5f11ee4418240ec3a8e4e3d0e8a50257b4d4ab050dcd04b32f91ecd4 docker.io/xiaopeng163/net-box:latest io.containerd.runc.v2 60 | 61 | # get the network namespace for each container 62 | $ sudo ctr --namespace=k8s.io container info 4bcad54d44ec02d7a55cc997e435217dff7c6e3ffa7e65b8940817da50d61b33 | jq '.Spec.linux.namespaces' 63 | [ 64 | { 65 | "type": "pid" 66 | }, 67 | { 68 | "type": "ipc", 69 | "path": "/proc/14188/ns/ipc" 70 | }, 71 | { 72 | "type": "uts", 73 | "path": "/proc/14188/ns/uts" 74 | }, 75 | { 76 | "type": "mount" 77 | }, 78 | { 79 | "type": "network", 80 | "path": "/proc/14188/ns/net" 81 | }, 82 | { 83 | "type": "cgroup" 84 | } 85 | ] 86 | $ sudo ctr --namespace=k8s.io container info c1c0044f5f11ee4418240ec3a8e4e3d0e8a50257b4d4ab050dcd04b32f91ecd4 | jq '.Spec.linux.namespaces' 87 | [ 88 | { 89 | "type": "pid" 90 | }, 91 | { 92 | "type": "ipc", 93 | "path": "/proc/14188/ns/ipc" 94 | }, 95 | { 96 | "type": "uts", 97 | "path": "/proc/14188/ns/uts" 98 | }, 99 | { 100 | "type": "mount" 101 | }, 102 | { 103 | "type": "network", 104 | "path": "/proc/14188/ns/net" 105 | }, 106 | { 107 | "type": "cgroup" 108 | } 109 | ] 110 | 111 | 112 | # check the ipaddress for the network namespace 113 | $ sudo nsenter -t 14188 -n ip addr 114 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 115 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 116 | inet 127.0.0.1/8 scope host lo 117 | valid_lft forever preferred_lft forever 118 | inet6 ::1/128 scope host 119 | valid_lft forever preferred_lft forever 120 | 2: eth0@if5: mtu 1450 qdisc noqueue state UP group default 121 | link/ether b6:87:92:e4:40:43 brd ff:ff:ff:ff:ff:ff link-netnsid 0 122 | inet 10.244.1.4/24 brd 10.244.1.255 scope global eth0 123 | valid_lft forever preferred_lft forever 124 | inet6 fe80::b487:92ff:fee4:4043/64 scope link 125 | valid_lft forever preferred_lft forever 126 | 127 | 结论就是两个container可以通过localhost进行通信,它们在同一个network namespace中。 128 | 129 | 清理测试Pod 130 | 131 | .. code-block:: bash 132 | 133 | kubectl delete -f https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/master/source/_code/network/container-to-container.yml 134 | 135 | 136 | Pod to Pod (single node) 137 | ----------------------------- 138 | 139 | 创建测试Pod 140 | 141 | .. code-block:: bash 142 | 143 | kubectl apply -f https://raw.githubusercontent.com/xiaopeng163/learn-k8s-from-scratch/master/source/_code/network/pod-to-pod-single-node.yml 144 | 145 | .. literalinclude:: ../_code/network/pod-to-pod-single-node.yml 146 | :language: yaml 147 | :linenos: 148 | 149 | 150 | Pod to Pod (multi-Node) 151 | ----------------------------- 152 | 153 | .. code-block:: yaml 154 | 155 | apiVersion: v1 156 | kind: Pod 157 | metadata: 158 | name: mypod1 159 | spec: 160 | containers: 161 | - name: pod1 162 | image: xiaopeng163/net-box 163 | command: ["sh", "-c", "while true; do echo $(date) >> /tmp/index.html; sleep 60; done"] 164 | --- 165 | apiVersion: v1 166 | kind: Pod 167 | metadata: 168 | name: mypod2 169 | spec: 170 | containers: 171 | - name: pod2 172 | image: xiaopeng163/net-box 173 | command: ["sh", "-c", "while true; do echo $(date) >> /tmp/index.html; sleep 60; done"] 174 | 175 | 176 | References 177 | ------------------- 178 | 179 | https://kubernetes.io/docs/concepts/cluster-administration/networking/ 180 | 181 | https://medium.com/@anilkreddyr/kubernetes-with-flannel-understanding-the-networking-part-2-78b53e5364c7 182 | -------------------------------------------------------------------------------- /source/network/resource.rst: -------------------------------------------------------------------------------- 1 | Resource 2 | ============== 3 | 4 | A Visual Guide to Kubernetes Networking Fundamentals 5 | ----------------------------------------------------------- 6 | 7 | 8 | https://github.com/xiaopeng163/learn-k8s-from-scratch/blob/master/source/_static/pdf/k8s-network.pdf 9 | 10 | .. :pdfembed:`src:../_static/pdf/k8s-network.pdf, height:1600, width:1000, align:middle` -------------------------------------------------------------------------------- /source/network/service-auto.rst: -------------------------------------------------------------------------------- 1 | Service Discovery 2 | ==================== 3 | 4 | Kubernetes 服务的自动发现。 5 | 6 | 7 | 源码 https://github.com/xiaopeng163/flask-redis 8 | 9 | .. code-block:: yaml 10 | 11 | apiVersion: apps/v1 12 | kind: Deployment 13 | metadata: 14 | name: redis-deployment 15 | spec: 16 | replicas: 1 17 | selector: 18 | matchLabels: 19 | app: redis-server 20 | template: 21 | metadata: 22 | labels: 23 | app: redis-server 24 | spec: 25 | containers: 26 | - name: redis-server 27 | image: redis:latest 28 | command: 29 | - redis-server 30 | - --requirepass 31 | - redis 32 | ports: 33 | - containerPort: 6379 34 | --- 35 | apiVersion: v1 36 | kind: Service 37 | metadata: 38 | name: redis 39 | spec: 40 | selector: 41 | app: redis-server 42 | ports: 43 | - protocol: TCP 44 | port: 6379 45 | targetPort: 6379 46 | 47 | --- 48 | apiVersion: apps/v1 49 | kind: Deployment 50 | metadata: 51 | name: flask-deployment 52 | spec: 53 | replicas: 1 54 | selector: 55 | matchLabels: 56 | app: flask-app 57 | template: 58 | metadata: 59 | labels: 60 | app: flask-app 61 | spec: 62 | containers: 63 | - name: flask-app 64 | image: xiaopeng163/flask-redis 65 | ports: 66 | - containerPort: 5000 67 | env: 68 | - name: REDIS_HOST 69 | value: ???????????????????????? 70 | --- 71 | apiVersion: v1 72 | kind: Service 73 | metadata: 74 | name: flask-service 75 | spec: 76 | selector: 77 | app: flask-app 78 | ports: 79 | - protocol: TCP 80 | port: 8000 81 | targetPort: 5000 82 | 83 | 84 | 85 | 86 | DNS 87 | ------ 88 | 89 | (国内无法访问google的朋友,请替换下面image为 `xiaopeng163/hello-app:1.0`) 90 | 91 | .. code-block:: bash 92 | 93 | $ kubectl create deployment demo --image=gcr.io/google-samples/hello-app:1.0 --port=8080 94 | $ kubectl expose deployment demo 95 | 96 | 这时不仅有一个clusterIP可供整个cluster访问,同时一个DNS域名也被注册了(类似前面讲过的POD)。 97 | 98 | 我们可以在集群节点上访问以下域名,(当然要指定DNS server是coredns的地址) 99 | 100 | .. code-block:: bash 101 | 102 | $ kubectl get svc --namespace kube-system 103 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 104 | kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 37d 105 | $ nslookup demo.default.svc.cluster.local 10.96.0.10 106 | Server: 10.96.0.10 107 | Address: 10.96.0.10#53 108 | 109 | Name: demo.default.svc.cluster.local 110 | Address: 10.105.71.223 111 | 112 | 也就是 ``..svc.cluster.local`` 113 | 114 | 如果在创建一个pod,在pod里可以访问到demo这个service以及后面的deployment 115 | 116 | .. code-block:: bash 117 | 118 | $ kubectl run client --image=xiaopeng163/net-box --command -- sh -c "sleep 100000" 119 | $ kubectl exec -it client -- sh 120 | /omd # nslookup demo.default.svc.cluster.local 121 | ;; Truncated, retrying in TCP mode. 122 | Server: 10.96.0.10 123 | Address: 10.96.0.10#53 124 | 125 | Name: demo.default.svc.cluster.local 126 | Address: 10.105.71.223 127 | 128 | /omd # curl demo.default.svc.cluster.local:8080 129 | Hello, world! 130 | Version: 1.0.0 131 | Hostname: demo-557f884dd8-7n55c 132 | /omd # 133 | 134 | 135 | ENV 136 | ------ 137 | 138 | 另外一些环境变量也会在集群内注册。这时候如果我们创建另外一个POD 139 | 140 | .. code-block:: bash 141 | 142 | $ kubectl run client --image=xiaopeng163/net-box --command -- sh -c "sleep 100000" 143 | $ kubectl exec -it client -- sh 144 | /omd # env | grep DEMO 145 | DEMO_SERVICE_HOST=10.105.71.223 146 | DEMO_PORT_8080_TCP_ADDR=10.105.71.223 147 | DEMO_PORT_8080_TCP_PORT=8080 148 | DEMO_PORT_8080_TCP_PROTO=tcp 149 | DEMO_PORT=tcp://10.105.71.223:8080 150 | DEMO_SERVICE_PORT=8080 151 | DEMO_PORT_8080_TCP=tcp://10.105.71.223:8080 152 | -------------------------------------------------------------------------------- /source/network/service.rst: -------------------------------------------------------------------------------- 1 | Services 2 | ========= 3 | 4 | https://kubernetes.io/docs/concepts/services-networking/service/ 5 | 6 | - Persistent endpoint access for clients 7 | - Adds persistency to the ephemerality of Pods 8 | - Networking abstration providing persistent virutal IP and DNS 9 | - Load balances to the backend Pods 10 | - Automatically updated during pod controller operations 11 | 12 | 13 | How Services Work 14 | --------------------- 15 | 16 | Services match pods using Labels and Selectors 17 | 18 | Creates and registers Endpoints in the Service (Pod IP and port pair) 19 | 20 | Implemented in the kube-proxy on the Node in iptables 21 | 22 | Kube-proxy watches the API Server and Endpoints 23 | 24 | .. image:: ../_static/network/service.gif 25 | :alt: pod-network 26 | 27 | 28 | Lab 29 | ----- 30 | 31 | 创建一个Deployment (国内无法访问google的朋友,请替换下面image为 `xiaopeng163/hello-app:1.0`) 32 | 33 | .. code-block:: yaml 34 | 35 | apiVersion: apps/v1 36 | kind: Deployment 37 | metadata: 38 | name: hello-world 39 | spec: 40 | replicas: 1 41 | selector: 42 | matchLabels: 43 | app: hello-world 44 | template: 45 | metadata: 46 | labels: 47 | app: hello-world 48 | spec: 49 | containers: 50 | - name: hello-world 51 | image: gcr.io/google-samples/hello-app:1.0 52 | ports: 53 | - containerPort: 8080 54 | 55 | Service Types 56 | --------------- 57 | 58 | ClusterIP(Default) 59 | ~~~~~~~~~~~~~~~~~~~~~~ 60 | 61 | when application deson't need to be accessed by the outside of the cluster, 会分配到一个内部的cluster IP。 62 | 63 | .. code-block:: bash 64 | 65 | $ kubectl expose deployment hello-world --port=80 --target-port=8080 66 | $ kubectl get svc 67 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 68 | hello-world ClusterIP 10.107.138.203 80/TCP 119s 69 | kubernetes ClusterIP 10.96.0.1 443/TCP 36d 70 | $ curl 10.107.138.203 71 | Hello, world! 72 | Version: 1.0.0 73 | Hostname: hello-world-55594b4d48-nmtvd 74 | $ curl 10.107.138.203 75 | Hello, world! 76 | Version: 1.0.0 77 | Hostname: hello-world-55594b4d48-st7fx 78 | 79 | yaml format 80 | 81 | .. code-block:: bash 82 | 83 | $ kubectl get svc hello-world -o yaml 84 | 85 | 86 | How it works (deep dive) 87 | 88 | .. code-block:: bash 89 | 90 | $ sudo iptables -t nat -L KUBE-SERVICES -n | column -t 91 | Chain KUBE-SERVICES (2 references) 92 | target prot opt source destination 93 | KUBE-SVC-JD5MR3NA4I4DYORP tcp -- 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 94 | KUBE-SVC-NPX46M4PTMTKRN6Y tcp -- 0.0.0.0/0 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 95 | KUBE-SVC-DZ6LTOHRG6HQWHYE tcp -- 0.0.0.0/0 10.107.138.203 /* default/hello-world cluster IP */ tcp dpt:80 96 | KUBE-SVC-TCOU7JCQXEZGVUNU udp -- 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 97 | KUBE-SVC-ERIFXISQEP7F7OF4 tcp -- 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 98 | KUBE-NODEPORTS all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL 99 | 100 | $ sudo iptables -t nat -L KUBE-SVC-DZ6LTOHRG6HQWHYE -n | column -t 101 | Chain KUBE-SVC-DZ6LTOHRG6HQWHYE (1 references) 102 | target prot opt source destination 103 | KUBE-MARK-MASQ tcp -- !10.244.0.0/16 10.107.138.203 /* default/hello-world cluster IP */ tcp dpt:80 104 | KUBE-SEP-W2IFVPZJILTBGJON all -- 0.0.0.0/0 0.0.0.0/0 /* default/hello-world -> 10.244.1.199:8080 */ statistic mode random probability 0.50000000000 105 | KUBE-SEP-QWI4LEXVO5GRYADO all -- 0.0.0.0/0 0.0.0.0/0 /* default/hello-world -> 10.244.2.190:8080 */ 106 | 107 | 108 | cleanup and delete the service 109 | 110 | NodePort 111 | ~~~~~~~~~~~ 112 | 113 | This makes the service accessible on a static port on each Node in the cluster. 114 | 115 | .. code-block:: bash 116 | 117 | $ kubectl expose deployment hello-world --target-port=8080 --type=NodePort 118 | service/hello-world exposed 119 | $ kubectl get svc 120 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 121 | hello-world NodePort 10.111.34.177 8080:30583/TCP 5s 122 | kubernetes ClusterIP 10.96.0.1 443/TCP 36d 123 | 124 | $ curl 127.0.0.1:30583 125 | Hello, world! 126 | Version: 1.0.0 127 | Hostname: hello-world-7c649d8c6f-pqbdt 128 | 129 | 130 | iptables rules 131 | 132 | .. code-block:: bash 133 | 134 | $ sudo iptables -t nat -L PREROUTING | column -t 135 | Chain PREROUTING (policy ACCEPT) 136 | target prot opt source destination 137 | KUBE-SERVICES all -- anywhere anywhere /* kubernetes service portals */ 138 | $ sudo iptables -t nat -L KUBE-SERVICES -n | column -t 139 | Chain KUBE-SERVICES (2 references) 140 | target prot opt source destination 141 | KUBE-SVC-NPX46M4PTMTKRN6Y tcp -- 0.0.0.0/0 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 142 | KUBE-SVC-DZ6LTOHRG6HQWHYE tcp -- 0.0.0.0/0 10.111.34.177 /* default/hello-world cluster IP */ tcp dpt:8080 143 | KUBE-SVC-TCOU7JCQXEZGVUNU udp -- 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 144 | KUBE-SVC-ERIFXISQEP7F7OF4 tcp -- 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 145 | KUBE-SVC-JD5MR3NA4I4DYORP tcp -- 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 146 | KUBE-NODEPORTS all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL 147 | $ sudo iptables -t nat -L KUBE-NODEPORTS -n | column -t 148 | Chain KUBE-NODEPORTS (1 references) 149 | target prot opt source destination 150 | KUBE-EXT-DZ6LTOHRG6HQWHYE tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/hello-world */ tcp dpt:30583 151 | $ sudo iptables -t nat -L KUBE-EXT-DZ6LTOHRG6HQWHYE -n | column -t 152 | Chain KUBE-EXT-DZ6LTOHRG6HQWHYE (1 references) 153 | target prot opt source destination 154 | KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0 /* masquerade traffic for default/hello-world external destinations */ 155 | KUBE-SVC-DZ6LTOHRG6HQWHYE all -- 0.0.0.0/0 0.0.0.0/0 156 | $ sudo iptables -t nat -L KUBE-SVC-DZ6LTOHRG6HQWHYE -n | column -t 157 | Chain KUBE-SVC-DZ6LTOHRG6HQWHYE (2 references) 158 | target prot opt source destination 159 | KUBE-MARK-MASQ tcp -- !10.244.0.0/16 10.111.34.177 /* default/hello-world cluster IP */ tcp dpt:8080 160 | KUBE-SEP-KNPMKP4TJWLYHY4M all -- 0.0.0.0/0 0.0.0.0/0 /* default/hello-world -> 10.244.1.200:8080 */ statistic mode random probability 0.50000000000 161 | KUBE-SEP-ZYVBQARSCNBBR4HH all -- 0.0.0.0/0 0.0.0.0/0 /* default/hello-world -> 10.244.2.192:8080 */ 162 | $ 163 | 164 | 165 | LoadBalancer 166 | ~~~~~~~~~~~~~ 167 | 168 | The service becomes accessible externally through a cloud provider's load balancer functionality. GCP, AWS, Azure, and OpenStack offer this functionality. 169 | 170 | Azure kubernetes cluster 171 | 172 | .. code-block:: bash 173 | 174 | $ kubectl get nodes -o wide 175 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME 176 | aks-services-38564575-vmss000003 Ready agent 4m56s v1.21.9 10.224.0.4 Ubuntu 18.04.6 LTS 5.4.0-1083-azure containerd://1.4.13+azure-3 177 | aks-workspaces-33629094-vmss000003 Ready agent 5m10s v1.21.9 10.224.0.5 Ubuntu 18.04.6 LTS 5.4.0-1083-azure containerd://1.4.13+azure-3 178 | 179 | create Deoployment,(国内无法访问google的朋友,请替换下面image为 `xiaopeng163/hello-app:1.0`) 180 | 181 | .. code-block:: yaml 182 | 183 | apiVersion: apps/v1 184 | kind: Deployment 185 | metadata: 186 | name: hello-world 187 | spec: 188 | replicas: 1 189 | selector: 190 | matchLabels: 191 | app: hello-world 192 | template: 193 | metadata: 194 | labels: 195 | app: hello-world 196 | spec: 197 | containers: 198 | - name: hello-world 199 | image: gcr.io/google-samples/hello-app:1.0 200 | ports: 201 | - containerPort: 8080 202 | 203 | .. code-block:: bash 204 | 205 | $ kubectl get deployments.apps 206 | NAME READY UP-TO-DATE AVAILABLE AGE 207 | hello-world 2/2 2 2 5m50s 208 | $ kubectl get pods -o wide 209 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 210 | hello-world-54575d5b77-cp75f 1/1 Running 0 6m4s 10.244.1.5 aks-services-38564575-vmss000003 211 | hello-world-54575d5b77-g24fm 1/1 Running 0 6m4s 10.244.1.6 aks-services-38564575-vmss000003 212 | $ 213 | 214 | create Service 215 | 216 | .. code-block:: bash 217 | 218 | $ kubectl expose deployment hello-world --port=80 --target-port=8080 --type=LoadBalancer 219 | service/hello-world exposed 220 | $ kubectl get service 221 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 222 | hello-world LoadBalancer 10.0.202.198 23.97.235.50 80:31468/TCP 29s 223 | 224 | 打开浏览器访问 http://23.97.235.50/ 225 | 226 | -------------------------------------------------------------------------------- /source/scheduling.rst: -------------------------------------------------------------------------------- 1 | Scheduling 2 | ============== 3 | 4 | Scheduling 就是找到一个合适的node去运行pod的过程,这个寻找主要是通过 ``kube-scheduler`` 实现的. 5 | 6 | 以创建Pod为例 7 | 8 | - 通过api server获取node信息 9 | - node选择: 10 | - 过滤 (各种filter过滤,硬件的限制) 11 | - Scoring (过滤出的节点,进一步评分筛选) 12 | - Bind (选择出一个节点,最后和API object绑定) 13 | - 更新pod的信息,包括在哪个node上 14 | - 被选节点的kubelet通过监控api-server得知自己被选择创建一个pod 15 | - kubelet驱动container runtime创建container并启动 16 | 17 | https://www.alibabacloud.com/blog/getting-started-with-kubernetes-%7C-scheduling-process-and-scheduler-algorithms_596299 18 | 19 | 20 | .. toctree:: 21 | :maxdepth: 2 22 | :caption: Contents: 23 | 24 | scheduling/node-selector 25 | scheduling/affinity 26 | scheduling/taints 27 | scheduling/cordoning 28 | scheduling/manual-pod 29 | -------------------------------------------------------------------------------- /source/scheduling/affinity.rst: -------------------------------------------------------------------------------- 1 | Affinity and Anti-Affinity 2 | ============================= 3 | 4 | 5 | nodeAffinity 6 | -------------- 7 | 8 | Uses labels on Nodes to make a scheduling decision with ``matchExpressions`` 9 | 10 | https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/ 11 | 12 | requiredDuringSchedulingIgnoredDuringExecution 13 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 14 | 15 | .. code-block:: yaml 16 | 17 | apiVersion: v1 18 | kind: Pod 19 | metadata: 20 | name: nginx 21 | spec: 22 | affinity: 23 | nodeAffinity: 24 | requiredDuringSchedulingIgnoredDuringExecution: 25 | nodeSelectorTerms: 26 | - matchExpressions: 27 | - key: disktype 28 | operator: In 29 | values: 30 | - ssd 31 | containers: 32 | - name: nginx 33 | image: nginx 34 | imagePullPolicy: IfNotPresent 35 | 36 | preferredDuringSchedulingIgnoredDuringExecution 37 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 38 | 39 | .. code-block:: yaml 40 | 41 | apiVersion: v1 42 | kind: Pod 43 | metadata: 44 | name: nginx 45 | spec: 46 | affinity: 47 | nodeAffinity: 48 | preferredDuringSchedulingIgnoredDuringExecution: 49 | - weight: 1 50 | preference: 51 | matchExpressions: 52 | - key: disktype 53 | operator: In 54 | values: 55 | - ssd 56 | containers: 57 | - name: nginx 58 | image: nginx 59 | imagePullPolicy: IfNotPresent 60 | 61 | podAffinity and podAntiAffinity 62 | ------------------------------------ 63 | 64 | schedule pods onto the same or different node as some other pod 65 | 66 | https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ 67 | -------------------------------------------------------------------------------- /source/scheduling/cordoning.rst: -------------------------------------------------------------------------------- 1 | Cordoning 2 | ============= 3 | 4 | 5 | Cordoning 6 | ----------- 7 | 8 | Cordoning 是把一个节点标记为 unschedulabel, 一旦标记后,就不会有新的pod被部署到这个节点上了。 9 | 但已经运行在这个节点的pod不受影响。 10 | 11 | 12 | .. code-block:: bash 13 | 14 | $ kubectl cordon 15 | 16 | 当我们要维护一个节点时,一般会通过cordon标记这个节点。 17 | 18 | drain 19 | --------- 20 | 21 | drain可以gracefully的停止一个节点上的Pod 22 | 23 | 24 | .. code-block:: bash 25 | 26 | $ kubectl drain --ignore-daemonsets 27 | 28 | uncordon 29 | ------------ 30 | 31 | 重新标记一个节点为schedulable 32 | -------------------------------------------------------------------------------- /source/scheduling/manual-pod.rst: -------------------------------------------------------------------------------- 1 | Manual Scheduling 2 | =================== 3 | 4 | 所谓manual Scheduling人工调度,就是直接指定pod运行的Node。 5 | 6 | 7 | 比如下面指定pod运行在 k8s-worker1 节点上。 8 | 9 | .. code-block:: yaml 10 | 11 | apiVersion: v1 12 | kind: Pod 13 | metadata: 14 | name: web 15 | spec: 16 | nodeName: 'k8s-worker1' 17 | containers: 18 | - name: nginx-container 19 | image: nginx:latest 20 | 21 | 22 | 两个问题: 23 | 24 | - 1. taint的node是否可以接受这种pod? 答案是yes 25 | - 2. cordon的node是否可以接受这种pod? 答案是yes 26 | 27 | 28 | -------------------------------------------------------------------------------- /source/scheduling/node-selector.rst: -------------------------------------------------------------------------------- 1 | Node Selector 2 | ============================= 3 | 4 | ``nodeSelector`` Assign pods to Nodes using Labels and Selectors 5 | 6 | - 给节点设置label Apply Labels to Nodes 7 | - Scheduler will assign Pods to a Node with a matchling Label 8 | 9 | 简单来说就是 key-value的匹配 10 | 11 | 12 | Pod with nodeSelector 13 | ------------------------- 14 | 15 | .. code-block:: yaml 16 | 17 | apiVersion: v1 18 | kind: Pod 19 | metadata: 20 | name: web 21 | spec: 22 | containers: 23 | - name: hello-world 24 | image: nginx 25 | nodeSelector: 26 | hardware: local_gpu 27 | 28 | Status will be pending, 通过 kubectl describe pod web 可以查看event 29 | 30 | .. code-block:: bash 31 | 32 | $ $ kubectl get pods -o wide 33 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 34 | web 0/1 Pending 0 8s 35 | 36 | 37 | add label to the Node 38 | 39 | .. code-block:: bash 40 | 41 | $ kubectl label nodes k8s-worker1 hardware=local_gpu 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /source/scheduling/taints.rst: -------------------------------------------------------------------------------- 1 | Taints and Tolerations 2 | ========================= 3 | 4 | https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ 5 | 6 | Taints 7 | -------- 8 | 9 | Node affinity 使得pod有选择node的能力. 10 | 11 | Taints 正好相反-- 它允许Node排斥特定的Pod。也就是用在node上的。 12 | 13 | Tolerations 14 | -------------- 15 | 16 | Tolerations are applied to pods. 17 | Tolerations allow the scheduler to schedule pods with matching taints 18 | 19 | 20 | Demo 21 | -------- 22 | 23 | 给Node添加一个taint, 一个taint包含一个key和value,以及效果。 24 | 比如下面这个意味着没有任何一个pod能够schedule到这个node上,除非这个pod有一个matching toleration. 25 | 26 | .. code-block:: bash 27 | 28 | $ kubectl taint nodes k8s-worker1 key1=value1:NoSchedule # to add 29 | $ kubectl taint nodes k8s-worker1 key1=value1:NoSchedule- # to delete 30 | 31 | 32 | create a deployment with replica=3 (no pod will be scheduled on k8s-worker1) 33 | 34 | .. code-block:: bash 35 | 36 | $ kubectl taint nodes k8s-worker1 color=red:NoSchedule 37 | $ kubectl create deployment web --image=nginx --replicas=3 38 | 39 | 40 | add Tolerations 41 | 42 | .. code-block:: yaml 43 | 44 | apiVersion: apps/v1 45 | kind: Deployment 46 | metadata: 47 | name: web 48 | spec: 49 | replicas: 3 50 | selector: 51 | matchLabels: 52 | app: web 53 | template: 54 | metadata: 55 | labels: 56 | app: web 57 | spec: 58 | containers: 59 | - image: nginx 60 | name: nginx 61 | tolerations: 62 | - key: "color" 63 | operator: "Equal" 64 | value: "red" 65 | effect: "NoSchedule" 66 | -------------------------------------------------------------------------------- /source/security.rst: -------------------------------------------------------------------------------- 1 | Security 2 | ============== 3 | 4 | - Kubernetes Security Fundamentals 5 | - Managing Certificates and kubeconfig files 6 | - Managing Role Based Access Controls 7 | 8 | 前置知识: 9 | 10 | - Difference between Authentication and Authorization: 11 | 12 | - Authentication: 你是谁? 13 | - Authorization: 你可以干什么? 14 | 15 | - 关于PKI,数字签名,证书等知识: 16 | 17 | - https://learn-cryptography.readthedocs.io/zh/latest/digital-signature/ 18 | - 一起学加密 Youtube视频 https://www.youtube.com/playlist?list=PLfQqWeOCIH4AZt3TiSRP4UuL_Y3gxYPAW 19 | - 一起学加密 B站视频 https://www.bilibili.com/video/BV1WF411x7mN?spm_id_from=333.999.section.playall 20 | 21 | - SSL单向认证和双向认证 22 | 23 | All Kubernetes clusters have two categories of users: 24 | 25 | - service accounts managed by Kubernete, 程序(pod)连接API Server 26 | - normal users. 普通用户,比如通过kubectl连接API Server 27 | 28 | 29 | .. toctree:: 30 | :maxdepth: 2 31 | :caption: Contents: 32 | 33 | security/kubeconfig 34 | security/RBAC 35 | security/service_account 36 | -------------------------------------------------------------------------------- /source/security/RBAC.rst: -------------------------------------------------------------------------------- 1 | Role Based Access Control 2 | ============================= 3 | 4 | 5 | API Objects 6 | ---------------- 7 | 8 | - Role and ClusterRole 9 | - RoleBinding and ClusterRoleBinding 10 | 11 | 12 | Roles 13 | ---------- 14 | 15 | Roles 代表的是可以对k8s的resource做什么操作, 并且是分别namespace的 16 | 17 | 18 | .. code-block:: bash 19 | 20 | $ kubectl create role demorole --verb=get,list --resource=pods --namespace ns1 21 | 22 | $ kubectl create role demorole --verb=* --resource=pods --namespace ns1 23 | 24 | ClusterRoles 25 | ---------------- 26 | 27 | Cluster级别的Resource, Node, PersistentVolumes. (跨namespace) 28 | 29 | .. code-block:: bash 30 | 31 | $ kubectl create clusterrole democlusterrole --verb=get,list --resource=nodes 32 | 33 | RoleBinding/ClusterRoleBinding 34 | ------------------------------------------- 35 | 36 | Role/ClusterRole 定义了可以做什么。 37 | 38 | RoleBindingClusterRoleBinding是定义了谁可以做在这个Role/ClusterRole定义 39 | 40 | RoleBinding 41 | ~~~~~~~~~~~~~~~~~~ 42 | 43 | .. code-block:: bash 44 | 45 | $ kubectl create rolebinding demorolebinding --role=demorole --user=demouser --namespace ns1 46 | 47 | ClusterRoleBinding 48 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 49 | 50 | 51 | .. code-block:: bash 52 | 53 | $ kubectl create clusterrolebinding democlusterrolebinding --clusterrole=democlusterrole --user=demouser 54 | 55 | 56 | 使用场景 57 | ~~~~~~~~~~~~ 58 | 59 | Role/RoleBinding 一般用于单个namespace去定义权限 60 | 61 | CLusterRole/ClusterRoleBinding 一般用于所有的namespace 62 | 63 | Test 64 | ------------ 65 | 66 | 67 | Role and RoleBinding 68 | ~~~~~~~~~~~~~~~~~~~~~~~ 69 | 70 | .. code-block:: bash 71 | 72 | # 以管理员身份创建一些资源 73 | $ kubectl config use-context kubernetes-admin@kubernetes 74 | $ kubectl create namespace ns1 75 | $ kubectl create deployment web1 --namespace=ns1 --image=gcr.io/google-samples/hello-app:1.0 --port=8080 --replicas=2 76 | 77 | # test 78 | $ kubectl auth can-i list pod 79 | yes 80 | $ kubectl auth can-i list pod --as demouser 81 | no 82 | 83 | # create role and role binding 84 | $ kubectl create role demorole --verb=get,list --resource=pods --namespace ns1 85 | role.rbac.authorization.k8s.io/demorole created 86 | $ kubectl create rolebinding demorolebinding --role=demorole --user=demouser --namespace ns1 87 | rolebinding.rbac.authorization.k8s.io/demorolebinding created 88 | 89 | # test 90 | $ kubectl auth can-i list pod --as demouser 91 | no 92 | $ kubectl auth can-i list pod --as demouser --namespace ns1 93 | yes 94 | $ kubectl get pods --namespace ns1 --as demouser 95 | NAME READY STATUS RESTARTS AGE 96 | web1-7f6c665f7d-65h6v 1/1 Running 0 9m38s 97 | web1-7f6c665f7d-n54t5 1/1 Running 0 9m38s 98 | $ kubectl auth can-i delete pod --as demouser --namespace ns1 99 | no 100 | $ kubectl auth can-i list node --as demouser --namespace ns1 101 | Warning: resource 'nodes' is not namespace scoped 102 | no 103 | $ kubectl auth can-i list deployment --as demouser --namespace ns1 104 | no 105 | 106 | 107 | ClusterRole and ClusterRoleBinding 108 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 109 | 110 | .. code-block:: bash 111 | 112 | $ kubectl create clusterrole democlusterrole --verb=list --resource=node 113 | clusterrole.rbac.authorization.k8s.io/democlusterrole created 114 | $ kubectl create clusterrolebinding democlusteerrolebinding --clusterrole=democlusterrole --user=demouser 115 | clusterrolebinding.rbac.authorization.k8s.io/democlusteerrolebinding created 116 | $ kubectl auth can-i list node 117 | Warning: resource 'nodes' is not namespace scoped 118 | yes 119 | $ kubectl auth can-i list node --as demouser 120 | Warning: resource 'nodes' is not namespace scoped 121 | yes 122 | $ 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | -------------------------------------------------------------------------------- /source/security/kubeconfig.rst: -------------------------------------------------------------------------------- 1 | Certificates and kubeconfig files 2 | ===================================== 3 | 4 | 5 | Certificates and PKI 6 | ---------------------- 7 | 8 | kubeadm-based cluster will: 9 | 10 | - create self-signed Certificate Authority (in /etc/kubernetes/pki) 11 | 12 | - ca.key private key 13 | - ca.crt CA Certificate, 会被复制到各个cluster节点上,让Node信任由这个CA签名的证书。(同时也在kubeconfig文件里) 14 | 15 | - Generates Certificates for System Components 16 | - kubernetes-admin User created 17 | 18 | 19 | Create Certificate 20 | ----------------------- 21 | 22 | Create new Certificate for new user 23 | 24 | - Create a private key with openssl 25 | - Create a Certificate signing request with openssl 26 | - Create and submit ``CertificateSigning`` Requst object 27 | - Approve the ``CertificateSigning`` Request 28 | - Retrive the Certificate 29 | 30 | 31 | .. code-block:: bash 32 | 33 | $ # create a private key 34 | $ openssl genrsa -out demouser.key 2048 35 | 36 | # generate CSR 37 | $ # CN(common name) is your username, o(Organization) is the Group 38 | $ openssl req -new -key demouser.key -out demouser.csr -subj "/CN=demouser" 39 | 40 | # the CertificateSigning Request needs to be base64 encoded 41 | $ cat demouser.csr | base64 | tr -d "\n" 42 | 43 | 44 | Create ``CertificateSigning`` Requst object 45 | 46 | .. code-block:: yaml 47 | 48 | apiVersion: certificates.k8s.io/v1 49 | kind: CertificateSigningRequest 50 | metadata: 51 | name: demouser 52 | spec: 53 | groups: 54 | - system:authenticated 55 | request: put the base64 encoded csr here 56 | signerName: kubernetes.io/kube-apiserver-client 57 | usages: 58 | - client auth 59 | 60 | Approve ``CertificateSigning`` Requst object 61 | 62 | .. code-block:: bash 63 | 64 | $ # approve the CSR 65 | $ kubectl certificate approve demouser 66 | 67 | # retrieve the certificate from the CSR object, and decode it from base64 68 | $ kubectl get certificatesigningrequests demouser -o jsonpath='{.status.certificate}' | base64 --decode > demouser.crt 69 | 70 | $ # check certificate 71 | $ openssl x509 -in demouser.crt -text 72 | 73 | 74 | 75 | kubeconfig files 76 | ----------------------- 77 | 78 | 79 | - Users 80 | - Clusters 81 | - Contexts 82 | 83 | ``/etc/kubernetes/admin.conf`` 84 | 85 | update kubeconfig file 86 | ---------------------------------- 87 | 88 | .. code-block:: bash 89 | 90 | $ # add demo user 91 | $ kubectl config set-credentials demouser --client-key=demouser.key --client-certificate=demouser.crt --embed-certs=true 92 | 93 | $ # check 94 | $ kubectl config get-users 95 | NAME 96 | demouser 97 | kubernetes-admin 98 | 99 | # create contesxt 100 | $ kubectl config get-contexts 101 | CURRENT NAME CLUSTER AUTHINFO NAMESPACE 102 | * kubernetes-admin@kubernetes kubernetes kubernetes-admin default 103 | $ kubectl config set-context demo --user=demouser --cluster=kubernetes 104 | Context "demo" created. 105 | $ kubectl config get-contexts 106 | CURRENT NAME CLUSTER AUTHINFO NAMESPACE 107 | demo kubernetes demouser 108 | * kubernetes-admin@kubernetes kubernetes kubernetes-admin default 109 | 110 | $ # change context 111 | $ kubectl config use-context demo 112 | Switched to context "demo". 113 | $ kubectl config get-contexts 114 | CURRENT NAME CLUSTER AUTHINFO NAMESPACE 115 | * demo kubernetes demouser 116 | kubernetes-admin@kubernetes kubernetes kubernetes-admin default 117 | $ kubectl get nodes 118 | Error from server (Forbidden): nodes is forbidden: User "demouser" cannot list resource "nodes" in API group "" at the cluster scope 119 | 120 | 121 | $ kubectl get nodes -v 6 122 | I0711 20:58:04.364228 65356 loader.go:372] Config loaded from file: /home/vagrant/.kube/config 123 | I0711 20:58:04.383605 65356 round_trippers.go:553] GET https://192.168.56.10:6443/api/v1/nodes?limit=500 403 Forbidden in 14 milliseconds 124 | I0711 20:58:04.384119 65356 helpers.go:222] server response object: [{ 125 | "kind": "Status", 126 | "apiVersion": "v1", 127 | "metadata": {}, 128 | "status": "Failure", 129 | "message": "nodes is forbidden: User \"demouser\" cannot list resource \"nodes\" in API group \"\" at the cluster scope", 130 | "reason": "Forbidden", 131 | "details": { 132 | "kind": "nodes" 133 | }, 134 | "code": 403 135 | }] 136 | Error from server (Forbidden): nodes is forbidden: User "demouser" cannot list resource "nodes" in API group "" at the cluster scope 137 | 138 | 139 | 清理 140 | -------- 141 | 142 | .. code-block:: bash 143 | 144 | $ kubectl config use-context kubernetes-admin@kubernetes 145 | $ kubectl config delete-context demo 146 | $ kubectl config delete-user demouser 147 | -------------------------------------------------------------------------------- /source/security/service_account.rst: -------------------------------------------------------------------------------- 1 | ServiceAccount 2 | =================== 3 | 4 | All Kubernetes clusters have two categories of users: 5 | 6 | - service accounts managed by Kubernete, 程序(pod)连接API Server 7 | - normal users. 普通用户,比如通过kubectl连接API Server 8 | 9 | 10 | Create ServiceAccount 11 | ------------------------- 12 | 13 | 14 | .. code-block:: bash 15 | 16 | $ kubectl create serviceaccount demosa 17 | $ kubectl describe serviceaccounts demosa 18 | Name: demosa 19 | Namespace: default 20 | Labels: 21 | Annotations: 22 | Image pull secrets: 23 | Mountable secrets: 24 | Tokens: 25 | Events: 26 | vagrant@k8s-master:~$ 27 | 28 | Check ServiceAccount in pod 29 | ----------------------------- 30 | 31 | Create pod 32 | 33 | .. code-block:: yaml 34 | 35 | apiVersion: v1 36 | kind: Pod 37 | metadata: 38 | name: client 39 | spec: 40 | serviceAccount: demosa 41 | containers: 42 | - name: client 43 | image: xiaopeng163/net-box:latest 44 | command: 45 | - sh 46 | - -c 47 | - "sleep 1000000" 48 | 49 | 查看service account的token, 50 | 51 | .. code-block:: 52 | 53 | $ kubectl describe pod client 54 | 55 | 或者通过jsonpath过滤 56 | 57 | .. code-block:: bash 58 | 59 | $ kubectl get pods client -o jsonpath='{.spec.containers[0].volumeMounts}' | python3 -m json.tool 60 | [ 61 | { 62 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", 63 | "name": "kube-api-access-tvr98", 64 | "readOnly": true 65 | } 66 | ] 67 | 68 | .. code-block:: bash 69 | 70 | $ kubectl exec -it client -- sh 71 | /omd # 72 | /omd # 73 | /omd # cd /var/run/secrets/kubernetes.io/serviceaccount 74 | /run/secrets/kubernetes.io/serviceaccount # ls -l 75 | total 0 76 | lrwxrwxrwx 1 root root 13 Jul 16 14:15 ca.crt -> ..data/ca.crt 77 | lrwxrwxrwx 1 root root 16 Jul 16 14:15 namespace -> ..data/namespace 78 | lrwxrwxrwx 1 root root 12 Jul 16 14:15 token -> ..data/token 79 | /run/secrets/kubernetes.io/serviceaccount # 80 | 81 | ServiceAccount Authentication 82 | -------------------------------- 83 | 84 | 85 | .. code-block:: bash 86 | 87 | $ kubectl exec -it client -- sh 88 | /omd # cd /var/run/secrets/kubernetes.io/serviceaccount 89 | /run/secrets/kubernetes.io/serviceaccount # TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) 90 | /run/secrets/kubernetes.io/serviceaccount # CACERT=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt 91 | /run/secrets/kubernetes.io/serviceaccount # 92 | /run/secrets/kubernetes.io/serviceaccount # curl --cacert $CACERT -X GET https://kubernetes.default.svc.cluster.local/api 93 | { 94 | "kind": "Status", 95 | "apiVersion": "v1", 96 | "metadata": {}, 97 | "status": "Failure", 98 | "message": "forbidden: User \"system:anonymous\" cannot get path \"/api\"", 99 | "reason": "Forbidden", 100 | "details": {}, 101 | "code": 403 102 | }/run/secrets/kubernetes.io/serviceaccount # 103 | /run/secrets/kubernetes.io/serviceaccount # curl --cacert $CACERT --header "Authorization: Bearer $TOKEN" -X GET https://kubernetes.default.svc.cluster.local/api 104 | { 105 | "kind": "APIVersions", 106 | "versions": [ 107 | "v1" 108 | ], 109 | "serverAddressByClientCIDRs": [ 110 | { 111 | "clientCIDR": "0.0.0.0/0", 112 | "serverAddress": "192.168.56.10:6443" 113 | } 114 | 115 | 但是此时service account并没有访问集群资源的权限。 116 | 117 | .. code-block:: bash 118 | 119 | /run/secrets/kubernetes.io/serviceaccount # curl --cacert $CACERT --header "Authorization: Bearer $TOKEN" -X GET https://kubernetes.default.svc.cluster.local/api/v1/namespaces/default/pods?limit=500 120 | { 121 | "kind": "Status", 122 | "apiVersion": "v1", 123 | "metadata": {}, 124 | "status": "Failure", 125 | "message": "pods is forbidden: User \"system:serviceaccount:default:demosa\" cannot list resource \"pods\" in API group \"\" in the namespace \"default\"", 126 | "reason": "Forbidden", 127 | "details": { 128 | "kind": "pods" 129 | }, 130 | "code": 403 131 | }/run/secrets/kubernetes.io/serviceaccount # 132 | 133 | ServiceAccount Authorization 134 | -------------------------------- 135 | 136 | .. code-block:: bash 137 | 138 | $ kubectl auth can-i list pods --as=system:serviceaccount:default:demosa 139 | no 140 | $ kubectl get pods -v 6 --as=system:serviceaccount:default:demosa 141 | I0716 14:46:05.735051 61770 loader.go:372] Config loaded from file: /home/vagrant/.kube/config 142 | I0716 14:46:05.761522 61770 round_trippers.go:553] GET https://192.168.56.10:6443/api/v1/namespaces/default/pods?limit=500 403 Forbidden in 20 milliseconds 143 | I0716 14:46:05.762209 61770 helpers.go:222] server response object: [{ 144 | "kind": "Status", 145 | "apiVersion": "v1", 146 | "metadata": {}, 147 | "status": "Failure", 148 | "message": "pods is forbidden: User \"system:serviceaccount:default:demosa\" cannot list resource \"pods\" in API group \"\" in the namespace \"default\"", 149 | "reason": "Forbidden", 150 | "details": { 151 | "kind": "pods" 152 | }, 153 | "code": 403 154 | }] 155 | Error from server (Forbidden): pods is forbidden: User "system:serviceaccount:default:demosa" cannot list resource "pods" in API group "" in the namespace "default" 156 | 157 | RBAC 158 | ~~~~~~~ 159 | 160 | .. code-block:: bash 161 | 162 | $ kubectl create role demorole --verb=get,list --resource=pods 163 | $ kubectl create rolebinding demorolebinding --role=demorole --serviceaccount=default:demosa 164 | rolebinding.rbac.authorization.k8s.io/demorolebinding created 165 | $ kubectl auth can-i list pods --as=system:serviceaccount:default:demosa 166 | yes 167 | 168 | 现在我们可以进入到一个绑定此service account的pod进行测试了 169 | 170 | .. code-block:: bash 171 | 172 | 173 | $ kubectl exec -it client -- sh 174 | /omd # cd /var/run/secrets/kubernetes.io/serviceaccount 175 | /run/secrets/kubernetes.io/serviceaccount # TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) 176 | /run/secrets/kubernetes.io/serviceaccount # CACERT=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt 177 | /run/secrets/kubernetes.io/serviceaccount # 178 | /run/secrets/kubernetes.io/serviceaccount # curl --cacert $CACERT --header "Authorization: Bearer $TOKEN" -X GET https://kubernetes.default.svc.cluster.local/api/v1/namespaces/default/pods?limit=500 179 | { 180 | "kind": "PodList", 181 | "apiVersion": "v1", 182 | "metadata": { 183 | "resourceVersion": "1625465" 184 | }, 185 | .... 186 | .... 187 | 188 | 189 | -------------------------------------------------------------------------------- /source/storage/configmap.rst: -------------------------------------------------------------------------------- 1 | ConfigMap 2 | ============= 3 | 4 | https://kubernetes.io/docs/concepts/configuration/configmap/ 5 | 6 | Key value pairs exposed into a pod used application configuration settings 7 | 8 | Decouple application and pod configurations 9 | 10 | 11 | Create ConfigMaps 12 | ---------------------- 13 | 14 | .. code-block:: bash 15 | 16 | kubectl create configmap mysql-cfg \ 17 | --from-literal=MYSQL_ROOT_PASSWORD=root \ 18 | --from-literal=MYSQL_USER=demo \ 19 | --from-literal=MYSQL_PASSWORD=demo 20 | 21 | from a file 22 | 23 | .. code-block:: bash 24 | 25 | kubectl create configmap mysql-cfg --from-file=appconfig 26 | 27 | from yaml file 28 | 29 | .. code-block:: yaml 30 | 31 | apiVersion: v1 32 | kind: ConfigMap 33 | metadata: 34 | name: appconfig 35 | data: 36 | MYSQL_ROOT_PASSWORD: root 37 | MYSQL_USER: demo 38 | MYSQL_PASSWORD: demo 39 | 40 | 41 | Using ConfigMaps 42 | -------------------- 43 | 44 | 45 | Environment Variables 46 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 47 | 48 | 49 | .. code-block:: yaml 50 | 51 | apiVersion: v1 52 | kind: Pod 53 | metadata: 54 | name: mysql 55 | spec: 56 | containers: 57 | - name: mysql 58 | image: mysql:8.0 59 | env: 60 | - name: MYSQL_ROOT_PASSWORD 61 | valueFrom: 62 | configMapKeyRef: 63 | name: mysql-secret 64 | key: MYSQL_ROOT_PASSWORD 65 | - name: MYSQL_USER 66 | valueFrom: 67 | configMapKeyRef: 68 | name: mysql-secret 69 | key: MYSQL_USER 70 | - name: MYSQL_PASSWORD 71 | valueFrom: 72 | configMapKeyRef: 73 | name: mysql-secret 74 | key: MYSQL_PASSWORD 75 | 76 | or 77 | 78 | .. code-block:: yaml 79 | 80 | apiVersion: v1 81 | kind: Pod 82 | metadata: 83 | name: mysql 84 | spec: 85 | containers: 86 | - name: mysql 87 | image: mysql:8.0 88 | envFrom: 89 | - configMapRef: 90 | name: appconfig 91 | 92 | 93 | Volumes 94 | ~~~~~~~~~~~ 95 | 96 | 97 | .. code-block:: yaml 98 | 99 | apiVersion: v1 100 | kind: Pod 101 | metadata: 102 | name: pod-env 103 | spec: 104 | volumes: 105 | - name: appconfig 106 | configMap: 107 | name: appconfig 108 | containers: 109 | - name: busybox 110 | image: busybox 111 | command: ["sh", "-c", "while true; do echo $(date) >> /tmp/index.html; sleep 10; done"] 112 | volumeMounts: 113 | - name: appconfig 114 | mountPath: "/etc/appconfig" 115 | -------------------------------------------------------------------------------- /source/storage/env.rst: -------------------------------------------------------------------------------- 1 | Environment Variables 2 | ======================== 3 | 4 | Configuring application in pods: 5 | 6 | - command Line Arguments 7 | - Environment Variables 8 | - ConfigMaps 9 | 10 | 11 | Environment Variables inside Pods 12 | ----------------------------------- 13 | 14 | - User defined 15 | - System defined 16 | 17 | 18 | .. warning:: 19 | 20 | Environment Variables can't be updated once the pod is created 21 | 22 | 23 | Defining Environment Variables 24 | --------------------------------- 25 | 26 | .. code-block:: yaml 27 | 28 | apiVersion: v1 29 | kind: Pod 30 | metadata: 31 | name: pod-env 32 | spec: 33 | containers: 34 | - name: producer 35 | image: busybox 36 | command: ["sh", "-c", "while true; do echo $NAME >> /tmp/index.html; sleep 10; done"] 37 | env: 38 | - name: NAME 39 | value: Hello World 40 | 41 | apply and check the file 42 | 43 | .. code-block:: bash 44 | 45 | vagrant@k8s-master:~$ kubectl apply -f pod-env.yml 46 | pod/pod-env created 47 | vagrant@k8s-master:~$ kubectl get pods 48 | NAME READY STATUS RESTARTS AGE 49 | pod-env 1/1 Running 0 17s 50 | vagrant@k8s-master:~$ kubectl exec pod-env -- more /tmp/index.html 51 | Hello World 52 | Hello World 53 | Hello World 54 | Hello World 55 | 56 | 57 | Another example for MySQL containers 58 | 59 | .. code-block:: yaml 60 | 61 | apiVersion: v1 62 | kind: Pod 63 | metadata: 64 | name: mysql 65 | spec: 66 | containers: 67 | - name: mysql 68 | image: mysql:8.0 69 | env: 70 | - name: MYSQL_ROOT_PASSWORD 71 | value: root 72 | -------------------------------------------------------------------------------- /source/storage/nfs.rst: -------------------------------------------------------------------------------- 1 | NFS Server setup 2 | ========================= 3 | 4 | 5 | .. list-table:: Kubeadm环境主机(+ NFS Server) 6 | :header-rows: 1 7 | 8 | * - hostname 9 | - IP 10 | - system 11 | - memory 12 | * - k8s-master 13 | - 192.168.56.10 14 | - Ubuntu 20.04 LTS 15 | - 4GB 16 | * - k8s-worker1 17 | - 192.168.56.11 18 | - Ubuntu 20.04 LTS 19 | - 2GB 20 | * - k8s-worker2 21 | - 192.168.56.12 22 | - Ubuntu 20.04 LTS 23 | - 2GB 24 | * - nfs-server 25 | - 192.168.56.20 26 | - Ubuntu 20.04 LTS 27 | - 2GB 28 | 29 | 30 | 以Ubuntu为例 31 | 32 | NFS server setup 33 | ----------------------- 34 | 35 | .. code-block:: bash 36 | 37 | # install NFS server and create directory for our exports 38 | 39 | sudo apt-get install -y nfs-kernel-server 40 | sudo mkdir -p /export/volumes 41 | sudo mkdir -p /export/volumes/pod 42 | 43 | # config NFS export 44 | 45 | sudo bash -c 'echo "/export/volumes *(rw,no_root_squash,no_subtree_check)" > /etc/exports' 46 | cat /etc/exports 47 | sudo systemctl restart nfs-kernel-server.service 48 | 49 | 50 | NFS client test 51 | ------------------- 52 | 53 | install NFS client 54 | 55 | .. warning:: 56 | 57 | 注意,需要在Kubernetes集群的所有节点上安装NFS Client 58 | 59 | .. code-block:: bash 60 | 61 | $ sudo apt-get install -y nfs-common 62 | 63 | 64 | .. code-block:: bash 65 | 66 | $ sudo mount -t nfs nfs-server-address:/export/volumes /mnt/ 67 | $ mount | grep nfs 68 | $ sudo umount /mnt 69 | -------------------------------------------------------------------------------- /source/storage/pv-pvc.rst: -------------------------------------------------------------------------------- 1 | Persistent Volumes and Persistent Volumes Claims 2 | ==================================================== 3 | 4 | https://kubernetes.io/docs/concepts/storage/persistent-volumes/ 5 | 6 | 7 | Persistent Volumes 8 | ---------------------------- 9 | 10 | A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator 11 | or dynamically provisioned using Storage Classes. 12 | 13 | 14 | Type of Persistent Volumes 15 | 16 | https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes 17 | 18 | 19 | Persistent Volumes Claims 20 | ---------------------------- 21 | 22 | A request for storage by a user 23 | 24 | - Size 25 | - Access mode (ReadWriteOnce, ReadWriteMany, ReadOnlyMany), access mode is node level access, not pod level 26 | - Storage Class 27 | 28 | the cluster will map the Persistent Volumes Claim to a Persistent Volume 29 | 30 | 31 | Static Provisionling Workflow 32 | -------------------------------- 33 | 34 | - create a PersistentVolume 35 | - create a PersistentVolumeClaim 36 | - Define Volume in pod spec 37 | 38 | 39 | Storage Lifecycle 40 | ------------------ 41 | 42 | - Binding (PVC created, match PVC to PV) 43 | - Using (pod lifetime) 44 | - Reclaim (PVC deleted, the pv will be reclaimed based on the recalimd policy: delete or retain) 45 | 46 | - delete, 就是一旦pvc删除了,那么实际这块pv也会被清理,里面的数据会被删除,然后这块空间会等待下一次的pvc 47 | - retain, pvc虽然删除了,但是这块pv里的数据会保留,pv的状态会变为released,这是无法马上被下一次的pvc使用,管理员必须手动清理删除 48 | 49 | Define a Persistent Volume 50 | ----------------------------------- 51 | 52 | .. code-block:: yaml 53 | 54 | apiVersion: v1 55 | kind: PersistentVolume 56 | metadata: 57 | name: pv-nfs 58 | spec: 59 | capacity: 60 | storage: 4Gi 61 | accessModes: 62 | - ReadWriteMany 63 | persistentVolumeReclaimPolicy: Retain 64 | nfs: 65 | server: 192.168.56.20 66 | path: "/export/volumes/pod" 67 | 68 | 69 | .. code-block:: bash 70 | 71 | vagrant@k8s-master:~$ kubectl apply -f pv.yml 72 | persistentvolume/test-nfs created 73 | vagrant@k8s-master:~$ kubectl get persistentvolume 74 | NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE 75 | pv-nfs 4Gi RWX Retain Available 5s 76 | 77 | Define PersistentVolumeClaim 78 | -------------------------------- 79 | 80 | .. code-block:: yaml 81 | 82 | apiVersion: v1 83 | kind: PersistentVolumeClaim 84 | metadata: 85 | name: pvc-nfs 86 | spec: 87 | accessModes: 88 | - ReadWriteMany 89 | resources: 90 | requests: 91 | storage: 1Gi 92 | 93 | 94 | Use persistentvolume in Pod 95 | -------------------------------- 96 | 97 | .. code-block:: yaml 98 | 99 | apiVersion: apps/v1 100 | kind: Deployment 101 | metadata: 102 | name: web 103 | spec: 104 | replicas: 1 105 | selector: 106 | matchLabels: 107 | app: web 108 | template: 109 | metadata: 110 | labels: 111 | app: web 112 | spec: 113 | volumes: 114 | - name: webcontent 115 | persistentVolumeClaim: 116 | claimName: pvc-nfs 117 | containers: 118 | - image: nginx 119 | name: nginx 120 | ports: 121 | - containerPort: 80 122 | volumeMounts: 123 | - name: webcontent 124 | mountPath: "/usr/share/nginx/html/web-app" 125 | 126 | 查看volume mount 127 | 128 | .. code-block:: bash 129 | 130 | $ kubectl get pods -o wide 131 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 132 | web-9996cd57b-988n7 1/1 Running 0 25m 10.244.2.170 k8s-worker2 133 | 134 | 去节点 k8s-worker2 上, 可以看到pod挂载的volume信息 135 | 136 | .. code-block:: bash 137 | 138 | vagrant@k8s-worker2:~$ mount | grep nfs 139 | 192.168.56.20:/export/volumes/pod on /var/lib/kubelet/pods/1c17d9be-239c-44b4-8f35-99ad0c7976d2/volumes/kubernetes.io~nfs/pv-nfs type nfs4 (rw,relatime,vers=4.2,rsize=262144,wsize=262144,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=192.168.56.12,local_lock=none,addr=192.168.56.20) 140 | 141 | 去NFS server上创建一个文件,具体路径为: 142 | 143 | .. code-block:: bash 144 | 145 | vagrant@nfs-server:/export/volumes/pod$ pwd 146 | /export/volumes/pod 147 | vagrant@nfs-server:/export/volumes/pod$ ls 148 | index.html 149 | vagrant@nfs-server:/export/volumes/pod$ more index.html 150 | hello k8s 151 | vagrant@nfs-server:/export/volumes/pod$ 152 | 153 | 创建一个service 154 | 155 | .. code-block:: bash 156 | 157 | $ kubectl expose deployment web --port=80 --type=NodePort 158 | $ kubectl get service 159 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 160 | kubernetes ClusterIP 10.96.0.1 443/TCP 25d 161 | web NodePort 10.97.45.206 80:32615/TCP 6s 162 | 163 | 打开浏览器访问 http::32615/web-app/ 164 | 165 | 应该就能看到 hello k8s 166 | 167 | 168 | clean 169 | ----------- 170 | 171 | .. code-block:: bash 172 | 173 | $ kubectl delete service web 174 | $ kubectl delete deployments.apps web 175 | $ kubectl delete persistentvolumeclaims pvc-nfs 176 | $ kubectl delete persistentvolume pv-nfs 177 | -------------------------------------------------------------------------------- /source/storage/secrets.rst: -------------------------------------------------------------------------------- 1 | Secrets 2 | ========= 3 | 4 | https://kubernetes.io/docs/concepts/configuration/secret/ 5 | 6 | - Store sensitive information as Object 7 | - Retrieve for later use 8 | - passwords, API tokens, keys and certificates 9 | - safer,flexible 10 | 11 | 12 | Properties of Secrets 13 | ------------------------- 14 | 15 | - base64 encoded 16 | - Encryption can be configured 17 | - Stored in etcd 18 | - Namepaced 19 | - Pod can not start if reference unavaiable Secrets 20 | 21 | 22 | Create Secrets 23 | ------------------- 24 | 25 | 26 | from kubectl 27 | ~~~~~~~~~~~~~~~ 28 | 29 | .. code-block:: bash 30 | 31 | kubectl create secret generic mysql-secret \ 32 | --from-literal=MYSQL_ROOT_PASSWORD=root \ 33 | --from-literal=MYSQL_USER=demo \ 34 | --from-literal=MYSQL_PASSWORD=demo 35 | 36 | from yaml 37 | ~~~~~~~~~~~~~ 38 | 39 | or from yaml, secret的值是经过base64的编码的 40 | 41 | .. code-block:: yaml 42 | 43 | apiVersion: v1 44 | kind: Secret 45 | metadata: 46 | name: mysql-secret 47 | type: Opaque 48 | data: 49 | MYSQL_PASSWORD: ZGVtbw== 50 | MYSQL_ROOT_PASSWORD: cm9vdA== 51 | MYSQL_USER: ZGVtbw== 52 | 53 | 54 | encode: 55 | 56 | .. code-block:: bash 57 | 58 | vagrant@k8s-master:~$ echo root | base64 59 | cm9vdAo= 60 | vagrant@k8s-master:~$ echo demo | base64 61 | ZGVtbwo= 62 | 63 | decode: 64 | 65 | .. code-block:: bash 66 | 67 | $ echo ZGVtbw== | base64 --decode 68 | demo 69 | 70 | from config file 71 | ~~~~~~~~~~~~~~~~~~~~ 72 | 73 | .. code-block:: yaml 74 | 75 | apiVersion: v1 76 | kind: Secret 77 | metadata: 78 | name: mysecret 79 | type: Opaque 80 | stringData: 81 | config.yaml: | 82 | MYSQL_ROOT_PASSWORD: root 83 | MYSQL_PASSWORD: demo 84 | MYSQL_USER: demo 85 | 86 | 87 | 88 | Using Secrets in Pods 89 | ------------------------ 90 | 91 | Environment Variables 92 | ~~~~~~~~~~~~~~~~~~~~~~~ 93 | 94 | 95 | .. code-block:: yaml 96 | 97 | apiVersion: v1 98 | kind: Pod 99 | metadata: 100 | name: mysql 101 | spec: 102 | containers: 103 | - name: mysql 104 | image: mysql:8.0 105 | env: 106 | - name: MYSQL_ROOT_PASSWORD 107 | valueFrom: 108 | secretKeyRef: 109 | name: mysql-secret 110 | key: MYSQL_ROOT_PASSWORD 111 | - name: MYSQL_USER 112 | valueFrom: 113 | secretKeyRef: 114 | name: mysql-secret 115 | key: MYSQL_USER 116 | - name: MYSQL_PASSWORD 117 | valueFrom: 118 | secretKeyRef: 119 | name: mysql-secret 120 | key: MYSQL_PASSWORD 121 | 122 | or 123 | 124 | .. code-block:: yaml 125 | 126 | apiVersion: v1 127 | kind: Pod 128 | metadata: 129 | name: mysql 130 | spec: 131 | containers: 132 | - name: mysql 133 | image: mysql:8.0 134 | envFrom: 135 | - secretRef: 136 | name: mysql-secret 137 | 138 | 139 | Volumes or Files 140 | ~~~~~~~~~~~~~~~~~~ 141 | 142 | 143 | .. code-block:: yaml 144 | 145 | apiVersion: v1 146 | kind: Pod 147 | metadata: 148 | name: pod-env 149 | spec: 150 | volumes: 151 | - name: appconfig 152 | secret: 153 | secretName: mysql-secret 154 | containers: 155 | - name: busybox 156 | image: busybox 157 | command: ["sh", "-c", "while true; do echo $(date) >> /tmp/index.html; sleep 10; done"] 158 | volumeMounts: 159 | - name: appconfig 160 | mountPath: "/etc/appconfig" 161 | 162 | type of Secrets 163 | ------------------ 164 | 165 | - updatable 166 | - Immutable 167 | 168 | 169 | -------------------------------------------------------------------------------- /source/storage/volume.rst: -------------------------------------------------------------------------------- 1 | Volumes 2 | ===================== 3 | 4 | https://kubernetes.io/docs/concepts/storage/volumes/ 5 | 6 | Kubernetes中的Volume基本延续了Docker中Volume的概念。 Kubernetes 将 Volume 分为持久化的 PersistentVolume 和非持久化的普通 Volume 两类。 7 | 8 | - 普通的volume只是为了一个Pod中的多个container之间可以共享数据,它具有和pod相同的生命周期,所以本质上不具有持久化的功能 9 | - Persistent Volume 是指能够将数据进行持久化存储的一种资源对象,它可以独立于 Pod 存在,生命周期与 Pod 无关,因此也决定了 PersistentVolume 不应该依附于任何一个宿主机节点,否则必然会对 Pod 调度产生干扰限制。 10 | 11 | emptyDir 12 | ------------ 13 | 14 | https://kubernetes.io/docs/concepts/storage/volumes/#emptydir 15 | 16 | An emptyDir volume is first created when a Pod is assigned to a node, and exists as long as that Pod is running on that node. 17 | As the name says, the emptyDir volume is initially empty. All containers in the Pod can read and write the same files in the emptyDir volume, 18 | though that volume can be mounted at the same or different paths in each container. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted permanently. 19 | 20 | 21 | .. code-block:: yaml 22 | 23 | apiVersion: v1 24 | kind: Pod 25 | metadata: 26 | name: multicontainer-pod 27 | spec: 28 | containers: 29 | - name: producer 30 | image: busybox 31 | command: ["sh", "-c", "while true; do echo $(hostname) $(date) >> /var/log/index.html; sleep 10; done"] 32 | volumeMounts: 33 | - name: webcontent 34 | mountPath: /var/log 35 | - name: consumer 36 | image: nginx 37 | ports: 38 | - containerPort: 80 39 | volumeMounts: 40 | - name: webcontent 41 | mountPath: /usr/share/nginx/html 42 | volumes: 43 | - name: webcontent 44 | emptyDir: {} 45 | 46 | 47 | hostPath 48 | --------------- 49 | 50 | https://kubernetes.io/docs/concepts/storage/volumes/#hostpath 51 | 52 | A hostPath volume mounts a file or directory from the host node's filesystem into your Pod. 53 | This is not something that most Pods will need, but it offers a powerful escape hatch for some applications. 54 | 55 | 56 | .. code-block:: yaml 57 | 58 | apiVersion: v1 59 | kind: Pod 60 | metadata: 61 | name: multicontainer-pod 62 | spec: 63 | containers: 64 | - name: producer 65 | image: busybox 66 | command: ["sh", "-c", "while true; do echo $(hostname) $(date) >> /var/log/index.html; sleep 10; done"] 67 | volumeMounts: 68 | - name: webcontent 69 | mountPath: /var/log 70 | - name: consumer 71 | image: nginx 72 | ports: 73 | - containerPort: 80 74 | volumeMounts: 75 | - name: webcontent 76 | mountPath: /usr/share/nginx/html 77 | volumes: 78 | - name: webcontent 79 | hostPath: 80 | path: /tmp 81 | type: Directory 82 | -------------------------------------------------------------------------------- /source/stroage.rst: -------------------------------------------------------------------------------- 1 | Storage 2 | ========== 3 | 4 | .. note:: 5 | 6 | 学习本章之前,最好对Docker的Volume有一定的了解 https://dockertips.readthedocs.io/en/latest/docker-volume.html 7 | 8 | Storage API Objects 9 | 10 | - Volume 11 | - Persistent Volume 12 | - Persistent Volume Claim 13 | - Storage Class 14 | 15 | Configuration 16 | 17 | - Environment Variables 18 | - Secrets 19 | - ConfigMap 20 | 21 | .. toctree:: 22 | :maxdepth: 2 23 | :caption: Contents: 24 | 25 | storage/volume 26 | storage/nfs 27 | storage/pv-pvc 28 | storage/env 29 | storage/secrets 30 | storage/configmap 31 | 32 | -------------------------------------------------------------------------------- /source/troubleshooting.rst: -------------------------------------------------------------------------------- 1 | Troubleshooting 2 | ==================== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | :caption: Contents: 7 | 8 | troubleshooting/tools 9 | troubleshooting/app 10 | troubleshooting/control-plane 11 | troubleshooting/nodes 12 | -------------------------------------------------------------------------------- /source/troubleshooting/app.rst: -------------------------------------------------------------------------------- 1 | Application Failure 2 | ======================= 3 | 4 | 5 | .. code-block:: yaml 6 | 7 | apiVersion: apps/v1 8 | kind: Deployment 9 | metadata: 10 | name: hello-world 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | app: hello-world 16 | template: 17 | metadata: 18 | labels: 19 | app: hello-world 20 | spec: 21 | containers: 22 | - name: hello-world 23 | image: gcr.io/google-samples/hello-app:1.o 24 | ports: 25 | - containerPort: 8080 26 | --- 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: hello-world 31 | spec: 32 | ports: 33 | - port: 80 34 | protocol: TCP 35 | targetPort: 8080 36 | selector: 37 | app: helloworld 38 | -------------------------------------------------------------------------------- /source/troubleshooting/control-plane.rst: -------------------------------------------------------------------------------- 1 | Troubleshooting Control Plane 2 | ================================= 3 | 4 | - Server online 5 | - Network reachability 6 | - systemd 7 | - container runtime 8 | - kubelet 9 | - static pod manifest 10 | 11 | static pod config: /var/lib/kubelet/config.yaml ``staticPodPath`` 12 | 13 | 14 | control plane 15 | ------------------- 16 | 17 | .. code-block:: bash 18 | 19 | # check kube-system pods 20 | kubectl get pods --namespaces kube-system 21 | 22 | # user container runtime 23 | crictl --runtime-endpoint unix:///run/containerd/containerd.sock ps 24 | 25 | # check static pod configuration 26 | sudo more /var/lib/kubelet/config.yaml 27 | 28 | sudo ls -l /etc/kubernetes/manifests 29 | 30 | 31 | Workloads 32 | -------------- 33 | 34 | check resource , describe, get, event, logs. -------------------------------------------------------------------------------- /source/troubleshooting/nodes.rst: -------------------------------------------------------------------------------- 1 | Troubleshooting Nodes 2 | ========================= 3 | 4 | - Server online 5 | - Network reachability 6 | - systemd 7 | - container runtime 8 | - kubelet 9 | - kube-proxy 10 | 11 | 12 | kubelet 13 | ---------- 14 | 15 | .. code-block:: bash 16 | 17 | # get status 18 | systemctl status kubelet.service --no-pager 19 | 20 | # start on system boot 21 | systemctl enable kubelet.service 22 | 23 | # start kubelet 24 | systemctl start kubelet.service 25 | 26 | # journallog 27 | sudo journalctl -u kubelet.service --no-pager 28 | 29 | # systemd service unit cfg 30 | /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 31 | 32 | # kubelet config 33 | /var/lib/kubelet/config.yaml 34 | 35 | -------------------------------------------------------------------------------- /source/troubleshooting/tools.rst: -------------------------------------------------------------------------------- 1 | Troubleshooting Tools 2 | ========================== 3 | 4 | - kubectl logs 5 | - kubectl events 6 | - systemctl 7 | - journalctl 8 | - system logs 9 | 10 | 11 | 12 | --------------------------------------------------------------------------------