├── .gitignore ├── LICENSE ├── README.rst ├── code ├── docker │ ├── api │ │ ├── requirements.txt │ │ └── test.py │ ├── best-practise │ │ ├── 1-how-to-chose-image │ │ │ ├── Dockerfile │ │ │ ├── app.py │ │ │ ├── requirements.txt │ │ │ └── run.sh │ │ ├── 2-build-with-cache │ │ │ ├── .dockerignore │ │ │ ├── Dockerfile │ │ │ ├── app.py │ │ │ ├── requirements.txt │ │ │ └── run.sh │ │ ├── 3-multi-stage-build │ │ │ ├── Dockerfile │ │ │ ├── Dockerfile.multi-stage │ │ │ └── app.go │ │ ├── 5-dockerignore-file │ │ │ ├── .dockerignore │ │ │ ├── Dockerfile │ │ │ ├── README.md │ │ │ ├── app.py │ │ │ ├── requirements.txt │ │ │ └── run.sh │ │ ├── 6-no-root │ │ │ ├── Dockerfile │ │ │ └── docker-entrypoint.sh │ │ └── 7-good-bad-dockerfile │ │ │ ├── bad │ │ │ ├── Dockerfile │ │ │ └── yourscript.py │ │ │ └── good │ │ │ ├── Dockerfile │ │ │ ├── requirements.txt │ │ │ └── yourscript.py │ ├── docker-compose │ │ ├── flask-redis │ │ │ ├── Dockerfile │ │ │ ├── README.rst │ │ │ ├── app.py │ │ │ ├── docker-compose-v3.yml │ │ │ ├── docker-compose.yml │ │ │ ├── install.sh │ │ │ └── requirements.txt │ │ └── lb-scaling │ │ │ ├── Dockerfile │ │ │ ├── README.rst │ │ │ ├── app.py │ │ │ ├── docker-compose.yml │ │ │ └── requirements.txt │ ├── flask-hello-world │ │ ├── Dockerfile │ │ ├── app.py │ │ └── requirements.txt │ └── flask-redis │ │ ├── Dockerfile │ │ ├── README.rst │ │ ├── app.py │ │ ├── install.sh │ │ └── requirements.txt └── kubernetes │ ├── flask-redis │ ├── nginx-deploy.yml │ ├── nginx-rc.yml │ ├── redis.yml │ └── web.yml │ └── mysql_replication │ ├── mysql-master-rc.yaml │ ├── mysql-master-service.yaml │ ├── mysql-slave-rc.yaml │ └── mysql-slave-service.yaml ├── docs ├── Makefile ├── make.bat └── source │ ├── conf.py │ ├── coreos.rst │ ├── docker.rst │ ├── docker │ ├── _image │ │ ├── cnm-model.jpg │ │ ├── docker-compose.png │ │ ├── docker-flannel.png │ │ ├── docker-overlay.png │ │ ├── docker-swarm-visual.png │ │ ├── docker-turtles-communication.jpg │ │ ├── ovs-gre-docker.png │ │ └── two-container-network.png │ ├── bridged-network.rst │ ├── create-new-bridge.rst │ ├── customize-bridge.rst │ ├── docker-base-image.rst │ ├── docker-calico.rst │ ├── docker-cli.rst │ ├── docker-compose-lb-scale.rst │ ├── docker-compose.rst │ ├── docker-contiv.rst │ ├── docker-engine.rst │ ├── docker-etcd.rst │ ├── docker-flannel.rst │ ├── docker-machine-aws.rst │ ├── docker-machine.rst │ ├── docker-network.rst │ ├── docker-ovs.rst │ ├── docker-swarm-lb-scale.rst │ ├── docker-swarm-service.rst │ ├── docker-swarm-topo.rst │ ├── docker-swarm.rst │ ├── host-network.rst │ ├── netns.rst │ └── port-mapping.rst │ ├── index.rst │ ├── kubernetes.rst │ ├── kubernetes │ ├── kubeadm.rst │ ├── kubernetes-aws-tectonic.rst │ ├── kubernetes-aws.rst │ ├── minikube.rst │ └── stepbystep.rst │ └── lab-environment.rst ├── lab ├── docker │ ├── multi-node │ │ └── vagrant │ │ │ ├── Vagrantfile │ │ │ └── setup.sh │ └── single-node │ │ ├── vagrant-centos7 │ │ ├── Vagrantfile │ │ └── setup.sh │ │ ├── vagrant-centos8 │ │ ├── Vagrantfile │ │ └── setup.sh │ │ ├── vagrant-ubuntu18.04 │ │ ├── Vagrantfile │ │ └── setup.sh │ │ └── vagrant-ubuntu20.04 │ │ ├── Vagrantfile │ │ └── setup.sh ├── k8s │ └── multi-node │ │ └── vagrant │ │ └── Vagrantfile └── podman │ ├── Vagrantfile │ ├── docker.sh │ └── podman.sh └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | 91 | .vagrant 92 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Docker Kubernetes Lab Handbook 2 | ============================== 3 | 4 | |License| |Documentation Status| 5 | 6 | This handbook contains some docker and kubernetes lab tutorials. It will be useful if you are learning docker or kubernetes now. 7 | Please go to http://docker-k8s-lab.readthedocs.io/en/latest/ for details 8 | 9 | Any issues or suggestions, welcome to create issue or PR on GitHub. 10 | 11 | 中国的朋友也欢迎关注我的个人微信公众号"卖逗搞IT",会不定期的分享一些网络,python,容器相关的技术文章。 12 | 13 | |wechat| 14 | 15 | .. |License| image:: https://img.shields.io/hexpm/l/plug.svg 16 | :target: https://github.com/xiaopeng163/docker-k8s-lab/blob/master/LICENSE 17 | .. |Documentation Status| image:: https://readthedocs.org/projects/docker-k8s-lab/badge/?version=latest 18 | :target: http://docker-k8s-lab.readthedocs.io/en/latest/?badge=latest 19 | 20 | .. |wechat| image:: https://github.com/xiaopeng163/static/blob/master/QR/MY_WeChat_official_account.jpg 21 | -------------------------------------------------------------------------------- /code/docker/api/requirements.txt: -------------------------------------------------------------------------------- 1 | docker -------------------------------------------------------------------------------- /code/docker/api/test.py: -------------------------------------------------------------------------------- 1 | import docker 2 | 3 | client = docker.from_env() 4 | 5 | # get image list 6 | 7 | print client.images.list() 8 | 9 | # create a container 10 | 11 | container = client.containers.create( 12 | image='nginx:latest', 13 | detach=True, 14 | ports={'80/tcp': 80} 15 | ) 16 | 17 | # start container 18 | 19 | container.start() 20 | 21 | container.status() 22 | 23 | container.name() -------------------------------------------------------------------------------- /code/docker/best-practise/1-how-to-chose-image/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8.5-slim-buster 2 | #FROM python:3.8.5 3 | 4 | LABEL maintainer="XYZ " 5 | 6 | COPY . /app 7 | 8 | WORKDIR /app 9 | 10 | RUN pip install -r requirements.txt 11 | 12 | EXPOSE 5000 13 | 14 | ENTRYPOINT [ "./run.sh" ] 15 | -------------------------------------------------------------------------------- /code/docker/best-practise/1-how-to-chose-image/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | app = Flask(__name__) 3 | 4 | @app.route('/') 5 | def hello_world(): 6 | return 'Hello, World!' 7 | -------------------------------------------------------------------------------- /code/docker/best-practise/1-how-to-chose-image/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==1.1.1 2 | -------------------------------------------------------------------------------- /code/docker/best-practise/1-how-to-chose-image/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | export FLASK_APP=app.py 4 | flask run --host=0.0.0.0 5 | -------------------------------------------------------------------------------- /code/docker/best-practise/2-build-with-cache/.dockerignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/docker-k8s-lab/a3f5b9a2150a82bd587d3ca118fa6fff2e62b4a1/code/docker/best-practise/2-build-with-cache/.dockerignore -------------------------------------------------------------------------------- /code/docker/best-practise/2-build-with-cache/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8.5-slim-buster 2 | #FROM python:3.8.5 3 | 4 | LABEL maintainer="XYZ " 5 | 6 | COPY requirements.txt /app/requirements.txt 7 | 8 | WORKDIR /app 9 | 10 | RUN pip install --quiet -r requirements.txt 11 | 12 | COPY . /app 13 | 14 | EXPOSE 5000 15 | 16 | ENTRYPOINT [ "./run.sh" ] 17 | -------------------------------------------------------------------------------- /code/docker/best-practise/2-build-with-cache/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | app = Flask(__name__) 3 | 4 | @app.route('/') 5 | def hello_world(): 6 | return 'Hello, World!' 7 | -------------------------------------------------------------------------------- /code/docker/best-practise/2-build-with-cache/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==1.1.1 2 | -------------------------------------------------------------------------------- /code/docker/best-practise/2-build-with-cache/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | export FLASK_APP=app.py 4 | flask run --host=0.0.0.0 5 | -------------------------------------------------------------------------------- /code/docker/best-practise/3-multi-stage-build/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine 2 | WORKDIR /app 3 | ADD . /app 4 | RUN cd /app && go build -o goapp 5 | ENTRYPOINT ./goapp -------------------------------------------------------------------------------- /code/docker/best-practise/3-multi-stage-build/Dockerfile.multi-stage: -------------------------------------------------------------------------------- 1 | # build stage 2 | FROM golang:alpine AS build-env 3 | ADD . /src 4 | RUN cd /src && go build -o goapp 5 | 6 | # final stage 7 | FROM alpine 8 | WORKDIR /app 9 | COPY --from=build-env /src/goapp /app/ 10 | ENTRYPOINT ./goapp -------------------------------------------------------------------------------- /code/docker/best-practise/3-multi-stage-build/app.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "fmt" 4 | 5 | func main() { 6 | fmt.Println("Hello world!") 7 | } -------------------------------------------------------------------------------- /code/docker/best-practise/5-dockerignore-file/.dockerignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | *.md 3 | **/*.log 4 | **/*.pyc 5 | -------------------------------------------------------------------------------- /code/docker/best-practise/5-dockerignore-file/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8.5-slim-buster 2 | #FROM python:3.8.5 3 | 4 | LABEL maintainer="XYZ " 5 | 6 | COPY requirements.txt /app/requirements.txt 7 | 8 | WORKDIR /app 9 | 10 | RUN pip install --quiet -r requirements.txt 11 | 12 | COPY . /app 13 | 14 | EXPOSE 5000 15 | 16 | ENTRYPOINT [ "./run.sh" ] 17 | -------------------------------------------------------------------------------- /code/docker/best-practise/5-dockerignore-file/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/docker-k8s-lab/a3f5b9a2150a82bd587d3ca118fa6fff2e62b4a1/code/docker/best-practise/5-dockerignore-file/README.md -------------------------------------------------------------------------------- /code/docker/best-practise/5-dockerignore-file/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | app = Flask(__name__) 3 | 4 | @app.route('/') 5 | def hello_world(): 6 | return 'Hello, World!' 7 | -------------------------------------------------------------------------------- /code/docker/best-practise/5-dockerignore-file/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==1.1.1 2 | -------------------------------------------------------------------------------- /code/docker/best-practise/5-dockerignore-file/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | export FLASK_APP=app.py 4 | flask run --host=0.0.0.0 5 | -------------------------------------------------------------------------------- /code/docker/best-practise/6-no-root/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:xenial 2 | 3 | # add our user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added 4 | RUN groupadd -r mongodb && useradd -r -g mongodb mongodb 5 | 6 | RUN set -eux; \ 7 | apt-get update; \ 8 | apt-get install -y --no-install-recommends \ 9 | ca-certificates \ 10 | jq \ 11 | numactl \ 12 | ; \ 13 | if ! command -v ps > /dev/null; then \ 14 | apt-get install -y --no-install-recommends procps; \ 15 | fi; \ 16 | rm -rf /var/lib/apt/lists/* 17 | 18 | # grab gosu for easy step-down from root (https://github.com/tianon/gosu/releases) 19 | ENV GOSU_VERSION 1.12 20 | # grab "js-yaml" for parsing mongod's YAML config files (https://github.com/nodeca/js-yaml/releases) 21 | ENV JSYAML_VERSION 3.13.1 22 | 23 | RUN set -ex; \ 24 | \ 25 | savedAptMark="$(apt-mark showmanual)"; \ 26 | apt-get update; \ 27 | apt-get install -y --no-install-recommends \ 28 | wget \ 29 | ; \ 30 | if ! command -v gpg > /dev/null; then \ 31 | apt-get install -y --no-install-recommends gnupg dirmngr; \ 32 | savedAptMark="$savedAptMark gnupg dirmngr"; \ 33 | elif gpg --version | grep -q '^gpg (GnuPG) 1\.'; then \ 34 | # "This package provides support for HKPS keyservers." (GnuPG 1.x only) 35 | apt-get install -y --no-install-recommends gnupg-curl; \ 36 | fi; \ 37 | rm -rf /var/lib/apt/lists/*; \ 38 | \ 39 | dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')"; \ 40 | wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch"; \ 41 | wget -O /usr/local/bin/gosu.asc "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch.asc"; \ 42 | export GNUPGHOME="$(mktemp -d)"; \ 43 | gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4; \ 44 | gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu; \ 45 | command -v gpgconf && gpgconf --kill all || :; \ 46 | rm -r "$GNUPGHOME" /usr/local/bin/gosu.asc; \ 47 | \ 48 | wget -O /js-yaml.js "https://github.com/nodeca/js-yaml/raw/${JSYAML_VERSION}/dist/js-yaml.js"; \ 49 | # TODO some sort of download verification here 50 | \ 51 | apt-mark auto '.*' > /dev/null; \ 52 | apt-mark manual $savedAptMark > /dev/null; \ 53 | apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \ 54 | \ 55 | # smoke test 56 | chmod +x /usr/local/bin/gosu; \ 57 | gosu --version; \ 58 | gosu nobody true 59 | 60 | RUN mkdir /docker-entrypoint-initdb.d 61 | 62 | ENV GPG_KEYS 9DA31620334BD75D9DCB49F368818C72E52529D4 63 | RUN set -ex; \ 64 | export GNUPGHOME="$(mktemp -d)"; \ 65 | for key in $GPG_KEYS; do \ 66 | gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys "$key"; \ 67 | done; \ 68 | gpg --batch --export $GPG_KEYS > /etc/apt/trusted.gpg.d/mongodb.gpg; \ 69 | command -v gpgconf && gpgconf --kill all || :; \ 70 | rm -r "$GNUPGHOME"; \ 71 | apt-key list 72 | 73 | # Allow build-time overrides (eg. to build image with MongoDB Enterprise version) 74 | # Options for MONGO_PACKAGE: mongodb-org OR mongodb-enterprise 75 | # Options for MONGO_REPO: repo.mongodb.org OR repo.mongodb.com 76 | # Example: docker build --build-arg MONGO_PACKAGE=mongodb-enterprise --build-arg MONGO_REPO=repo.mongodb.com . 77 | ARG MONGO_PACKAGE=mongodb-org 78 | ARG MONGO_REPO=repo.mongodb.org 79 | ENV MONGO_PACKAGE=${MONGO_PACKAGE} MONGO_REPO=${MONGO_REPO} 80 | 81 | ENV MONGO_MAJOR 4.0 82 | ENV MONGO_VERSION 4.0.20 83 | # bashbrew-architectures:amd64 arm64v8 84 | RUN echo "deb http://$MONGO_REPO/apt/ubuntu xenial/${MONGO_PACKAGE%-unstable}/$MONGO_MAJOR multiverse" | tee "/etc/apt/sources.list.d/${MONGO_PACKAGE%-unstable}.list" 85 | 86 | RUN set -x \ 87 | # installing "mongodb-enterprise" pulls in "tzdata" which prompts for input 88 | && export DEBIAN_FRONTEND=noninteractive \ 89 | && apt-get update \ 90 | && apt-get install -y \ 91 | ${MONGO_PACKAGE}=$MONGO_VERSION \ 92 | ${MONGO_PACKAGE}-server=$MONGO_VERSION \ 93 | ${MONGO_PACKAGE}-shell=$MONGO_VERSION \ 94 | ${MONGO_PACKAGE}-mongos=$MONGO_VERSION \ 95 | ${MONGO_PACKAGE}-tools=$MONGO_VERSION \ 96 | && rm -rf /var/lib/apt/lists/* \ 97 | && rm -rf /var/lib/mongodb \ 98 | && mv /etc/mongod.conf /etc/mongod.conf.orig 99 | 100 | RUN mkdir -p /data/db /data/configdb \ 101 | && chown -R mongodb:mongodb /data/db /data/configdb 102 | VOLUME /data/db /data/configdb 103 | 104 | COPY docker-entrypoint.sh /usr/local/bin/ 105 | ENTRYPOINT ["docker-entrypoint.sh"] 106 | 107 | EXPOSE 27017 108 | CMD ["mongod"] -------------------------------------------------------------------------------- /code/docker/best-practise/7-good-bad-dockerfile/bad/Dockerfile: -------------------------------------------------------------------------------- 1 | # DO NOT USE THIS DOCKERFILE AS AN EXAMPLE, IT IS BROKEN 2 | FROM python:3 3 | 4 | COPY yourscript.py / 5 | 6 | RUN pip install flask 7 | 8 | CMD [ "python", "./yourscript.py" ] -------------------------------------------------------------------------------- /code/docker/best-practise/7-good-bad-dockerfile/bad/yourscript.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/docker-k8s-lab/a3f5b9a2150a82bd587d3ca118fa6fff2e62b4a1/code/docker/best-practise/7-good-bad-dockerfile/bad/yourscript.py -------------------------------------------------------------------------------- /code/docker/best-practise/7-good-bad-dockerfile/good/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.7.3-stretch 2 | 3 | LABEL author="xxxxxx" 4 | 5 | COPY requirements.txt /tmp/ 6 | 7 | RUN pip install -r /tmp/requirements.txt 8 | 9 | RUN useradd --create-home appuser 10 | WORKDIR /home/appuser 11 | USER appuser 12 | 13 | COPY yourscript.py . 14 | 15 | CMD [ "python", "./yourscript.py" ] -------------------------------------------------------------------------------- /code/docker/best-practise/7-good-bad-dockerfile/good/requirements.txt: -------------------------------------------------------------------------------- 1 | flask==1.0 -------------------------------------------------------------------------------- /code/docker/best-practise/7-good-bad-dockerfile/good/yourscript.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/docker-k8s-lab/a3f5b9a2150a82bd587d3ca118fa6fff2e62b4a1/code/docker/best-practise/7-good-bad-dockerfile/good/yourscript.py -------------------------------------------------------------------------------- /code/docker/docker-compose/flask-redis/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2.7 2 | MAINTAINER Peng Xiao "xiaoquwl@gmail.com" 3 | COPY . /app 4 | WORKDIR /app 5 | RUN pip install -r requirements.txt 6 | EXPOSE 5000 7 | CMD [ "python", "app.py" ] -------------------------------------------------------------------------------- /code/docker/docker-compose/flask-redis/README.rst: -------------------------------------------------------------------------------- 1 | # Setup with docker-compose 2 | 3 | .. code-block:: bash 4 | 5 | $ docker-compose build 6 | $ docker-compose up 7 | 8 | Then check the app by: 9 | 10 | .. code-block:: bash 11 | 12 | ubuntu@docker-host-aws:~/docker-k8s-lab/code/docker/flask-redis$ curl http://127.0.0.1 13 | Hello Container World! I have been seen 1 times and my hostname is docker-host-aws. 14 | ubuntu@docker-host-aws:~/docker-k8s-lab/code/docker/flask-redis$ curl http://127.0.0.1 15 | -------------------------------------------------------------------------------- /code/docker/docker-compose/flask-redis/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | from flask import Flask 3 | from redis import Redis 4 | import os 5 | import socket 6 | 7 | app = Flask(__name__) 8 | redis = Redis(host=os.environ.get('REDIS_HOST', 'redis'), port=6379) 9 | 10 | 11 | @app.route('/') 12 | def hello(): 13 | redis.incr('hits') 14 | return 'Hello Container World! I have been seen %s times and my hostname is %s.\n' % (redis.get('hits'),socket.gethostname()) 15 | 16 | 17 | if __name__ == "__main__": 18 | app.run(host="0.0.0.0", port=5000, debug=True) 19 | -------------------------------------------------------------------------------- /code/docker/docker-compose/flask-redis/docker-compose-v3.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | web: 4 | image: xiaopeng163/docker-flask-demo:2.0 5 | deploy: 6 | replicas: 3 7 | restart_policy: 8 | condition: on-failure 9 | resources: 10 | limits: 11 | cpus: "0.1" 12 | memory: 50M 13 | ports: 14 | - "80:5000" 15 | depends_on: 16 | - redis 17 | networks: 18 | - compose-demo-bridge 19 | 20 | redis: 21 | image: redis 22 | ports: ["6379"] 23 | deploy: 24 | placement: 25 | constraints: [node.role == manager] 26 | networks: 27 | - compose-demo-bridge 28 | 29 | networks: 30 | compose-demo-bridge: 31 | -------------------------------------------------------------------------------- /code/docker/docker-compose/flask-redis/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | 3 | services: 4 | web: 5 | build: . 6 | ports: 7 | - "80:5000" 8 | links: 9 | - redis 10 | networks: 11 | - compose-demo-bridge 12 | 13 | redis: 14 | image: redis 15 | ports: ["6379"] 16 | networks: 17 | - compose-demo-bridge 18 | 19 | 20 | networks: 21 | compose-demo-bridge: 22 | -------------------------------------------------------------------------------- /code/docker/docker-compose/flask-redis/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo apt-get install gcc python-pip 4 | wget http://download.redis.io/redis-stable.tar.gz 5 | tar xvzf redis-stable.tar.gz 6 | cd redis-stable 7 | make MALLOC=libc 8 | sudo make install 9 | nohup redis-server & 10 | 11 | sudo pip install -r requirements.txt 12 | -------------------------------------------------------------------------------- /code/docker/docker-compose/flask-redis/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | redis -------------------------------------------------------------------------------- /code/docker/docker-compose/lb-scaling/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2.7 2 | MAINTAINER Peng Xiao "xiaoquwl@gmail.com" 3 | COPY . /app 4 | WORKDIR /app 5 | RUN pip install -r requirements.txt 6 | EXPOSE 8080 7 | CMD [ "python", "app.py" ] -------------------------------------------------------------------------------- /code/docker/docker-compose/lb-scaling/README.rst: -------------------------------------------------------------------------------- 1 | # Setup with docker-compose 2 | 3 | .. code-block:: bash 4 | 5 | $ docker-compose build 6 | $ docker-compose up 7 | 8 | Then check the app by: 9 | 10 | .. code-block:: bash 11 | 12 | $ curl http://127.0.0.1 13 | Hello Container World! I have been seen 1 times and my hostname is docker-host-aws. 14 | $ curl http://127.0.0.1 15 | 16 | 17 | Auto scale and load blancing 18 | 19 | .. code-block:: bash 20 | 21 | $ docker-compose scale web=3 22 | $ curl 127.0.0.1 23 | Hello Container World! I have been seen 9 times and my hostname is 6f71b2798411. 24 | $ curl 127.0.0.1 25 | Hello Container World! I have been seen 10 times and my hostname is ca279e7dda99. 26 | $ curl 127.0.0.1 27 | Hello Container World! I have been seen 11 times and my hostname is 0895f7205c8f. 28 | -------------------------------------------------------------------------------- /code/docker/docker-compose/lb-scaling/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | from flask import Flask 3 | from redis import Redis 4 | import os 5 | import socket 6 | 7 | app = Flask(__name__) 8 | redis = Redis(host=os.environ.get('REDIS_HOST', 'redis'), port=6379) 9 | 10 | 11 | @app.route('/') 12 | def hello(): 13 | redis.incr('hits') 14 | return 'Hello Container World! I have been seen %s times and my hostname is %s.\n' % (redis.get('hits'),socket.gethostname()) 15 | 16 | 17 | if __name__ == "__main__": 18 | app.run(host="0.0.0.0", port=8080, debug=True) 19 | -------------------------------------------------------------------------------- /code/docker/docker-compose/lb-scaling/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | 3 | services: 4 | web: 5 | build: . 6 | ports: ["8080"] 7 | links: 8 | - redis 9 | networks: 10 | - compose-demo-bridge 11 | 12 | redis: 13 | image: redis 14 | ports: ["6379"] 15 | networks: 16 | - compose-demo-bridge 17 | 18 | lb: 19 | image: dockercloud/haproxy 20 | ports: 21 | - 80:80 22 | links: 23 | - web 24 | networks: 25 | - compose-demo-bridge 26 | volumes: 27 | - /var/run/docker.sock:/var/run/docker.sock 28 | 29 | networks: 30 | compose-demo-bridge: 31 | -------------------------------------------------------------------------------- /code/docker/docker-compose/lb-scaling/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | redis -------------------------------------------------------------------------------- /code/docker/flask-hello-world/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2.7 2 | MAINTAINER Peng Xiao "xiaoquwl@gmail.com" 3 | COPY . /app 4 | WORKDIR /app 5 | RUN pip install -r requirements.txt 6 | EXPOSE 5000 7 | CMD [ "python", "app.py" ] -------------------------------------------------------------------------------- /code/docker/flask-hello-world/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | app = Flask(__name__) 3 | 4 | @app.route("/") 5 | def hello(): 6 | return "Hello World!" 7 | 8 | if __name__ == "__main__": 9 | app.run() 10 | -------------------------------------------------------------------------------- /code/docker/flask-hello-world/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask 2 | -------------------------------------------------------------------------------- /code/docker/flask-redis/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2.7 2 | MAINTAINER Peng Xiao "xiaoquwl@gmail.com" 3 | COPY . /app 4 | WORKDIR /app 5 | RUN pip install -r requirements.txt 6 | EXPOSE 5000 7 | CMD [ "python", "app.py" ] -------------------------------------------------------------------------------- /code/docker/flask-redis/README.rst: -------------------------------------------------------------------------------- 1 | # Setup with no Docker 2 | 3 | .. code-block:: bash 4 | 5 | $ sh install.sh 6 | $ nohup python app.py 7 | 8 | Then check the app by: 9 | 10 | .. code-block:: bash 11 | 12 | ubuntu@docker-host-aws:~/docker-k8s-lab/code/docker/flask-redis$ curl http://0.0.0.0:5000 13 | Hello Container World! I have been seen 1 times and my hostname is docker-host-aws. 14 | ubuntu@docker-host-aws:~/docker-k8s-lab/code/docker/flask-redis$ curl http://0.0.0.0:5000 15 | Hello Container World! I have been seen 2 times and my hostname is docker-host-aws. 16 | ubuntu@docker-host-aws:~/docker-k8s-lab/code/docker/flask-redis$ curl http://0.0.0.0:5000 17 | Hello Container World! I have been seen 3 times and my hostname is docker-host-aws. 18 | 19 | -------------------------------------------------------------------------------- /code/docker/flask-redis/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | from redis import Redis 3 | import os 4 | import socket 5 | 6 | app = Flask(__name__) 7 | redis = Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1'), port=6379) 8 | 9 | 10 | @app.route('/') 11 | def hello(): 12 | redis.incr('hits') 13 | return 'Hello Container World! I have been seen %s times and my hostname is %s.\n' % (redis.get('hits'),socket.gethostname()) 14 | 15 | 16 | if __name__ == "__main__": 17 | app.run(host="0.0.0.0", port=5000, debug=True) 18 | -------------------------------------------------------------------------------- /code/docker/flask-redis/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo apt-get install gcc python-pip 4 | wget http://download.redis.io/redis-stable.tar.gz 5 | tar xvzf redis-stable.tar.gz 6 | cd redis-stable 7 | make MALLOC=libc 8 | sudo make install 9 | nohup redis-server & 10 | 11 | cd .. 12 | sudo pip install -r requirements.txt 13 | -------------------------------------------------------------------------------- /code/docker/flask-redis/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | redis -------------------------------------------------------------------------------- /code/kubernetes/flask-redis/nginx-deploy.yml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | spec: 6 | replicas: 3 7 | template: 8 | metadata: 9 | labels: 10 | app: nginx 11 | spec: 12 | containers: 13 | - name: nginx 14 | image: nginx 15 | ports: 16 | - containerPort: 80 17 | -------------------------------------------------------------------------------- /code/kubernetes/flask-redis/nginx-rc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: nginx 5 | spec: 6 | replicas: 3 7 | selector: 8 | app: nginx 9 | template: 10 | metadata: 11 | name: nginx 12 | labels: 13 | app: nginx 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx 18 | ports: 19 | - containerPort: 80 -------------------------------------------------------------------------------- /code/kubernetes/flask-redis/redis.yml: -------------------------------------------------------------------------------- 1 | apiVersion: "v1" 2 | kind: Pod 3 | metadata: 4 | name: redis 5 | labels: 6 | name: redis 7 | app: demo 8 | spec: 9 | containers: 10 | - name: redis 11 | image: redis:latest 12 | ports: 13 | - containerPort: 6379 14 | protocol: TCP -------------------------------------------------------------------------------- /code/kubernetes/flask-redis/web.yml: -------------------------------------------------------------------------------- 1 | apiVersion: "v1" 2 | kind: Pod 3 | metadata: 4 | name: web 5 | labels: 6 | name: web 7 | app: demo 8 | spec: 9 | containers: 10 | - name: web 11 | image: xiaopeng163/docker-flask-demo:1.0 12 | ports: 13 | - containerPort: 5000 14 | name: http 15 | protocol: TCP -------------------------------------------------------------------------------- /code/kubernetes/mysql_replication/mysql-master-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: mysql-master 5 | labels: 6 | name: mysql-master 7 | spec: 8 | replicas: 1 9 | selector: 10 | name: mysql-master 11 | template: 12 | metadata: 13 | labels: 14 | name: mysql-master 15 | spec: 16 | containers: 17 | - name: master 18 | image: paulliu/mysql-master:0.1 19 | ports: 20 | - containerPort: 3306 21 | volumeMounts: 22 | - name: mysql-data 23 | mountPath: /var/lib/mysql 24 | env: 25 | - name: MYSQL_ROOT_PASSWORD 26 | value: "test" 27 | - name: MYSQL_REPLICATION_USER 28 | value: 'demo' 29 | - name: MYSQL_REPLICATION_PASSWORD 30 | value: 'demo' 31 | volumes: 32 | - name: mysql-data 33 | hostPath: 34 | path: /var/lib/mysql 35 | -------------------------------------------------------------------------------- /code/kubernetes/mysql_replication/mysql-master-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mysql-master 5 | labels: 6 | name: mysql-master 7 | spec: 8 | ports: 9 | - port: 3306 10 | targetPort: 3306 11 | selector: 12 | name: mysql-master -------------------------------------------------------------------------------- /code/kubernetes/mysql_replication/mysql-slave-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: mysql-slave 5 | labels: 6 | name: mysql-slave 7 | spec: 8 | replicas: 1 9 | selector: 10 | name: mysql-slave 11 | template: 12 | metadata: 13 | labels: 14 | name: mysql-slave 15 | spec: 16 | containers: 17 | - name: slave 18 | image: paulliu/mysql-slave:0.1 19 | ports: 20 | - containerPort: 3306 21 | env: 22 | - name: MYSQL_ROOT_PASSWORD 23 | value: "test" 24 | - name: MYSQL_REPLICATION_USER 25 | value: 'demo' 26 | - name: MYSQL_REPLICATION_PASSWORD 27 | value: 'demo' 28 | -------------------------------------------------------------------------------- /code/kubernetes/mysql_replication/mysql-slave-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mysql-slave 5 | labels: 6 | name: mysql-slave 7 | spec: 8 | ports: 9 | - port: 3306 10 | targetPort: 3306 11 | selector: 12 | name: mysql-slave -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 16 | 17 | .PHONY: help 18 | help: 19 | @echo "Please use \`make ' where is one of" 20 | @echo " html to make standalone HTML files" 21 | @echo " dirhtml to make HTML files named index.html in directories" 22 | @echo " singlehtml to make a single large HTML file" 23 | @echo " pickle to make pickle files" 24 | @echo " json to make JSON files" 25 | @echo " htmlhelp to make HTML files and a HTML help project" 26 | @echo " qthelp to make HTML files and a qthelp project" 27 | @echo " applehelp to make an Apple Help Book" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " epub3 to make an epub3" 31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 32 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 33 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 34 | @echo " text to make text files" 35 | @echo " man to make manual pages" 36 | @echo " texinfo to make Texinfo files" 37 | @echo " info to make Texinfo files and run them through makeinfo" 38 | @echo " gettext to make PO message catalogs" 39 | @echo " changes to make an overview of all changed/added/deprecated items" 40 | @echo " xml to make Docutils-native XML files" 41 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 42 | @echo " linkcheck to check all external links for integrity" 43 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 44 | @echo " coverage to run coverage check of the documentation (if enabled)" 45 | @echo " dummy to check syntax errors of document sources" 46 | 47 | .PHONY: clean 48 | clean: 49 | rm -rf $(BUILDDIR)/* 50 | 51 | .PHONY: html 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | .PHONY: dirhtml 58 | dirhtml: 59 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 60 | @echo 61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 62 | 63 | .PHONY: singlehtml 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | .PHONY: pickle 70 | pickle: 71 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 72 | @echo 73 | @echo "Build finished; now you can process the pickle files." 74 | 75 | .PHONY: json 76 | json: 77 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 78 | @echo 79 | @echo "Build finished; now you can process the JSON files." 80 | 81 | .PHONY: htmlhelp 82 | htmlhelp: 83 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 84 | @echo 85 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 86 | ".hhp project file in $(BUILDDIR)/htmlhelp." 87 | 88 | .PHONY: qthelp 89 | qthelp: 90 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 91 | @echo 92 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 93 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 94 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/DockerKubernetesLab.qhcp" 95 | @echo "To view the help file:" 96 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/DockerKubernetesLab.qhc" 97 | 98 | .PHONY: applehelp 99 | applehelp: 100 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 101 | @echo 102 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 103 | @echo "N.B. You won't be able to view it unless you put it in" \ 104 | "~/Library/Documentation/Help or install it in your application" \ 105 | "bundle." 106 | 107 | .PHONY: devhelp 108 | devhelp: 109 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 110 | @echo 111 | @echo "Build finished." 112 | @echo "To view the help file:" 113 | @echo "# mkdir -p $$HOME/.local/share/devhelp/DockerKubernetesLab" 114 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/DockerKubernetesLab" 115 | @echo "# devhelp" 116 | 117 | .PHONY: epub 118 | epub: 119 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 120 | @echo 121 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 122 | 123 | .PHONY: epub3 124 | epub3: 125 | $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 126 | @echo 127 | @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." 128 | 129 | .PHONY: latex 130 | latex: 131 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 132 | @echo 133 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 134 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 135 | "(use \`make latexpdf' here to do that automatically)." 136 | 137 | .PHONY: latexpdf 138 | latexpdf: 139 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 140 | @echo "Running LaTeX files through pdflatex..." 141 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 142 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 143 | 144 | .PHONY: latexpdfja 145 | latexpdfja: 146 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 147 | @echo "Running LaTeX files through platex and dvipdfmx..." 148 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 149 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 150 | 151 | .PHONY: text 152 | text: 153 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 154 | @echo 155 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 156 | 157 | .PHONY: man 158 | man: 159 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 160 | @echo 161 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 162 | 163 | .PHONY: texinfo 164 | texinfo: 165 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 166 | @echo 167 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 168 | @echo "Run \`make' in that directory to run these through makeinfo" \ 169 | "(use \`make info' here to do that automatically)." 170 | 171 | .PHONY: info 172 | info: 173 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 174 | @echo "Running Texinfo files through makeinfo..." 175 | make -C $(BUILDDIR)/texinfo info 176 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 177 | 178 | .PHONY: gettext 179 | gettext: 180 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 181 | @echo 182 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 183 | 184 | .PHONY: changes 185 | changes: 186 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 187 | @echo 188 | @echo "The overview file is in $(BUILDDIR)/changes." 189 | 190 | .PHONY: linkcheck 191 | linkcheck: 192 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 193 | @echo 194 | @echo "Link check complete; look for any errors in the above output " \ 195 | "or in $(BUILDDIR)/linkcheck/output.txt." 196 | 197 | .PHONY: doctest 198 | doctest: 199 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 200 | @echo "Testing of doctests in the sources finished, look at the " \ 201 | "results in $(BUILDDIR)/doctest/output.txt." 202 | 203 | .PHONY: coverage 204 | coverage: 205 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 206 | @echo "Testing of coverage in the sources finished, look at the " \ 207 | "results in $(BUILDDIR)/coverage/python.txt." 208 | 209 | .PHONY: xml 210 | xml: 211 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 212 | @echo 213 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 214 | 215 | .PHONY: pseudoxml 216 | pseudoxml: 217 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 218 | @echo 219 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 220 | 221 | .PHONY: dummy 222 | dummy: 223 | $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy 224 | @echo 225 | @echo "Build finished. Dummy builder generates no files." 226 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source 10 | set I18NSPHINXOPTS=%SPHINXOPTS% source 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. epub3 to make an epub3 31 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 32 | echo. text to make text files 33 | echo. man to make manual pages 34 | echo. texinfo to make Texinfo files 35 | echo. gettext to make PO message catalogs 36 | echo. changes to make an overview over all changed/added/deprecated items 37 | echo. xml to make Docutils-native XML files 38 | echo. pseudoxml to make pseudoxml-XML files for display purposes 39 | echo. linkcheck to check all external links for integrity 40 | echo. doctest to run all doctests embedded in the documentation if enabled 41 | echo. coverage to run coverage check of the documentation if enabled 42 | echo. dummy to check syntax errors of document sources 43 | goto end 44 | ) 45 | 46 | if "%1" == "clean" ( 47 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 48 | del /q /s %BUILDDIR%\* 49 | goto end 50 | ) 51 | 52 | 53 | REM Check if sphinx-build is available and fallback to Python version if any 54 | %SPHINXBUILD% 1>NUL 2>NUL 55 | if errorlevel 9009 goto sphinx_python 56 | goto sphinx_ok 57 | 58 | :sphinx_python 59 | 60 | set SPHINXBUILD=python -m sphinx.__init__ 61 | %SPHINXBUILD% 2> nul 62 | if errorlevel 9009 ( 63 | echo. 64 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 65 | echo.installed, then set the SPHINXBUILD environment variable to point 66 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 67 | echo.may add the Sphinx directory to PATH. 68 | echo. 69 | echo.If you don't have Sphinx installed, grab it from 70 | echo.http://sphinx-doc.org/ 71 | exit /b 1 72 | ) 73 | 74 | :sphinx_ok 75 | 76 | 77 | if "%1" == "html" ( 78 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 79 | if errorlevel 1 exit /b 1 80 | echo. 81 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 82 | goto end 83 | ) 84 | 85 | if "%1" == "dirhtml" ( 86 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 87 | if errorlevel 1 exit /b 1 88 | echo. 89 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 90 | goto end 91 | ) 92 | 93 | if "%1" == "singlehtml" ( 94 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 95 | if errorlevel 1 exit /b 1 96 | echo. 97 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 98 | goto end 99 | ) 100 | 101 | if "%1" == "pickle" ( 102 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 103 | if errorlevel 1 exit /b 1 104 | echo. 105 | echo.Build finished; now you can process the pickle files. 106 | goto end 107 | ) 108 | 109 | if "%1" == "json" ( 110 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 111 | if errorlevel 1 exit /b 1 112 | echo. 113 | echo.Build finished; now you can process the JSON files. 114 | goto end 115 | ) 116 | 117 | if "%1" == "htmlhelp" ( 118 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 119 | if errorlevel 1 exit /b 1 120 | echo. 121 | echo.Build finished; now you can run HTML Help Workshop with the ^ 122 | .hhp project file in %BUILDDIR%/htmlhelp. 123 | goto end 124 | ) 125 | 126 | if "%1" == "qthelp" ( 127 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 128 | if errorlevel 1 exit /b 1 129 | echo. 130 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 131 | .qhcp project file in %BUILDDIR%/qthelp, like this: 132 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\DockerKubernetesLab.qhcp 133 | echo.To view the help file: 134 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\DockerKubernetesLab.ghc 135 | goto end 136 | ) 137 | 138 | if "%1" == "devhelp" ( 139 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 140 | if errorlevel 1 exit /b 1 141 | echo. 142 | echo.Build finished. 143 | goto end 144 | ) 145 | 146 | if "%1" == "epub" ( 147 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 148 | if errorlevel 1 exit /b 1 149 | echo. 150 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 151 | goto end 152 | ) 153 | 154 | if "%1" == "epub3" ( 155 | %SPHINXBUILD% -b epub3 %ALLSPHINXOPTS% %BUILDDIR%/epub3 156 | if errorlevel 1 exit /b 1 157 | echo. 158 | echo.Build finished. The epub3 file is in %BUILDDIR%/epub3. 159 | goto end 160 | ) 161 | 162 | if "%1" == "latex" ( 163 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 164 | if errorlevel 1 exit /b 1 165 | echo. 166 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdf" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "latexpdfja" ( 181 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 182 | cd %BUILDDIR%/latex 183 | make all-pdf-ja 184 | cd %~dp0 185 | echo. 186 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 187 | goto end 188 | ) 189 | 190 | if "%1" == "text" ( 191 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 192 | if errorlevel 1 exit /b 1 193 | echo. 194 | echo.Build finished. The text files are in %BUILDDIR%/text. 195 | goto end 196 | ) 197 | 198 | if "%1" == "man" ( 199 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 200 | if errorlevel 1 exit /b 1 201 | echo. 202 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 203 | goto end 204 | ) 205 | 206 | if "%1" == "texinfo" ( 207 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 208 | if errorlevel 1 exit /b 1 209 | echo. 210 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 211 | goto end 212 | ) 213 | 214 | if "%1" == "gettext" ( 215 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 216 | if errorlevel 1 exit /b 1 217 | echo. 218 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 219 | goto end 220 | ) 221 | 222 | if "%1" == "changes" ( 223 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 224 | if errorlevel 1 exit /b 1 225 | echo. 226 | echo.The overview file is in %BUILDDIR%/changes. 227 | goto end 228 | ) 229 | 230 | if "%1" == "linkcheck" ( 231 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 232 | if errorlevel 1 exit /b 1 233 | echo. 234 | echo.Link check complete; look for any errors in the above output ^ 235 | or in %BUILDDIR%/linkcheck/output.txt. 236 | goto end 237 | ) 238 | 239 | if "%1" == "doctest" ( 240 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 241 | if errorlevel 1 exit /b 1 242 | echo. 243 | echo.Testing of doctests in the sources finished, look at the ^ 244 | results in %BUILDDIR%/doctest/output.txt. 245 | goto end 246 | ) 247 | 248 | if "%1" == "coverage" ( 249 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 250 | if errorlevel 1 exit /b 1 251 | echo. 252 | echo.Testing of coverage in the sources finished, look at the ^ 253 | results in %BUILDDIR%/coverage/python.txt. 254 | goto end 255 | ) 256 | 257 | if "%1" == "xml" ( 258 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 259 | if errorlevel 1 exit /b 1 260 | echo. 261 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 262 | goto end 263 | ) 264 | 265 | if "%1" == "pseudoxml" ( 266 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 267 | if errorlevel 1 exit /b 1 268 | echo. 269 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 270 | goto end 271 | ) 272 | 273 | if "%1" == "dummy" ( 274 | %SPHINXBUILD% -b dummy %ALLSPHINXOPTS% %BUILDDIR%/dummy 275 | if errorlevel 1 exit /b 1 276 | echo. 277 | echo.Build finished. Dummy builder generates no files. 278 | goto end 279 | ) 280 | 281 | :end 282 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Docker Kubernetes Lab documentation build configuration file, created by 4 | # sphinx-quickstart on Fri Nov 25 23:35:48 2016. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | # import os 20 | # import sys 21 | # sys.path.insert(0, os.path.abspath('.')) 22 | 23 | # -- General configuration ------------------------------------------------ 24 | 25 | # If your documentation needs a minimal Sphinx version, state it here. 26 | # 27 | # needs_sphinx = '1.0' 28 | 29 | # Add any Sphinx extension module names here, as strings. They can be 30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 31 | # ones. 32 | extensions = [] 33 | 34 | # Add any paths that contain templates here, relative to this directory. 35 | templates_path = ['_templates'] 36 | 37 | # The suffix(es) of source filenames. 38 | # You can specify multiple suffix as a list of string: 39 | # 40 | # source_suffix = ['.rst', '.md'] 41 | source_suffix = '.rst' 42 | 43 | # The encoding of source files. 44 | # 45 | # source_encoding = 'utf-8-sig' 46 | 47 | # The master toctree document. 48 | master_doc = 'index' 49 | 50 | # General information about the project. 51 | project = u'Docker Kubernetes Lab' 52 | copyright = u'2016, Peng Xiao' 53 | author = u'Peng Xiao' 54 | 55 | # The version info for the project you're documenting, acts as replacement for 56 | # |version| and |release|, also used in various other places throughout the 57 | # built documents. 58 | # 59 | # The short X.Y version. 60 | version = u'0.1' 61 | # The full version, including alpha/beta/rc tags. 62 | release = u'0.1' 63 | 64 | # The language for content autogenerated by Sphinx. Refer to documentation 65 | # for a list of supported languages. 66 | # 67 | # This is also used if you do content translation via gettext catalogs. 68 | # Usually you set "language" from the command line for these cases. 69 | language = None 70 | 71 | # There are two options for replacing |today|: either, you set today to some 72 | # non-false value, then it is used: 73 | # 74 | # today = '' 75 | # 76 | # Else, today_fmt is used as the format for a strftime call. 77 | # 78 | # today_fmt = '%B %d, %Y' 79 | 80 | # List of patterns, relative to source directory, that match files and 81 | # directories to ignore when looking for source files. 82 | # This patterns also effect to html_static_path and html_extra_path 83 | exclude_patterns = [] 84 | 85 | # The reST default role (used for this markup: `text`) to use for all 86 | # documents. 87 | # 88 | # default_role = None 89 | 90 | # If true, '()' will be appended to :func: etc. cross-reference text. 91 | # 92 | # add_function_parentheses = True 93 | 94 | # If true, the current module name will be prepended to all description 95 | # unit titles (such as .. function::). 96 | # 97 | # add_module_names = True 98 | 99 | # If true, sectionauthor and moduleauthor directives will be shown in the 100 | # output. They are ignored by default. 101 | # 102 | # show_authors = False 103 | 104 | # The name of the Pygments (syntax highlighting) style to use. 105 | pygments_style = 'sphinx' 106 | 107 | # A list of ignored prefixes for module index sorting. 108 | # modindex_common_prefix = [] 109 | 110 | # If true, keep warnings as "system message" paragraphs in the built documents. 111 | # keep_warnings = False 112 | 113 | # If true, `todo` and `todoList` produce output, else they produce nothing. 114 | todo_include_todos = False 115 | 116 | 117 | # -- Options for HTML output ---------------------------------------------- 118 | 119 | # The theme to use for HTML and HTML Help pages. See the documentation for 120 | # a list of builtin themes. 121 | # 122 | html_theme = 'sphinx_rtd_theme' 123 | 124 | # Theme options are theme-specific and customize the look and feel of a theme 125 | # further. For a list of options available for each theme, see the 126 | # documentation. 127 | # 128 | # html_theme_options = {} 129 | 130 | # Add any paths that contain custom themes here, relative to this directory. 131 | # html_theme_path = [] 132 | 133 | # The name for this set of Sphinx documents. 134 | # " v documentation" by default. 135 | # 136 | # html_title = u'Docker Kubernetes Lab v0.1' 137 | 138 | # A shorter title for the navigation bar. Default is the same as html_title. 139 | # 140 | # html_short_title = None 141 | 142 | # The name of an image file (relative to this directory) to place at the top 143 | # of the sidebar. 144 | # 145 | # html_logo = None 146 | 147 | # The name of an image file (relative to this directory) to use as a favicon of 148 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 149 | # pixels large. 150 | # 151 | # html_favicon = None 152 | 153 | # Add any paths that contain custom static files (such as style sheets) here, 154 | # relative to this directory. They are copied after the builtin static files, 155 | # so a file named "default.css" will overwrite the builtin "default.css". 156 | html_static_path = ['_static'] 157 | 158 | # Add any extra paths that contain custom files (such as robots.txt or 159 | # .htaccess) here, relative to this directory. These files are copied 160 | # directly to the root of the documentation. 161 | # 162 | # html_extra_path = [] 163 | 164 | # If not None, a 'Last updated on:' timestamp is inserted at every page 165 | # bottom, using the given strftime format. 166 | # The empty string is equivalent to '%b %d, %Y'. 167 | # 168 | # html_last_updated_fmt = None 169 | 170 | # If true, SmartyPants will be used to convert quotes and dashes to 171 | # typographically correct entities. 172 | # 173 | # html_use_smartypants = True 174 | 175 | # Custom sidebar templates, maps document names to template names. 176 | # 177 | # html_sidebars = {} 178 | 179 | # Additional templates that should be rendered to pages, maps page names to 180 | # template names. 181 | # 182 | # html_additional_pages = {} 183 | 184 | # If false, no module index is generated. 185 | # 186 | # html_domain_indices = True 187 | 188 | # If false, no index is generated. 189 | # 190 | # html_use_index = True 191 | 192 | # If true, the index is split into individual pages for each letter. 193 | # 194 | # html_split_index = False 195 | 196 | # If true, links to the reST sources are added to the pages. 197 | # 198 | # html_show_sourcelink = True 199 | 200 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 201 | # 202 | # html_show_sphinx = True 203 | 204 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 205 | # 206 | # html_show_copyright = True 207 | 208 | # If true, an OpenSearch description file will be output, and all pages will 209 | # contain a tag referring to it. The value of this option must be the 210 | # base URL from which the finished HTML is served. 211 | # 212 | # html_use_opensearch = '' 213 | 214 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 215 | # html_file_suffix = None 216 | 217 | # Language to be used for generating the HTML full-text search index. 218 | # Sphinx supports the following languages: 219 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 220 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' 221 | # 222 | # html_search_language = 'en' 223 | 224 | # A dictionary with options for the search language support, empty by default. 225 | # 'ja' uses this config value. 226 | # 'zh' user can custom change `jieba` dictionary path. 227 | # 228 | # html_search_options = {'type': 'default'} 229 | 230 | # The name of a javascript file (relative to the configuration directory) that 231 | # implements a search results scorer. If empty, the default will be used. 232 | # 233 | # html_search_scorer = 'scorer.js' 234 | 235 | # Output file base name for HTML help builder. 236 | htmlhelp_basename = 'DockerKubernetesLabdoc' 237 | 238 | # -- Options for LaTeX output --------------------------------------------- 239 | 240 | latex_elements = { 241 | # The paper size ('letterpaper' or 'a4paper'). 242 | # 243 | # 'papersize': 'letterpaper', 244 | 245 | # The font size ('10pt', '11pt' or '12pt'). 246 | # 247 | # 'pointsize': '10pt', 248 | 249 | # Additional stuff for the LaTeX preamble. 250 | # 251 | # 'preamble': '', 252 | 253 | # Latex figure (float) alignment 254 | # 255 | # 'figure_align': 'htbp', 256 | } 257 | 258 | # Grouping the document tree into LaTeX files. List of tuples 259 | # (source start file, target name, title, 260 | # author, documentclass [howto, manual, or own class]). 261 | latex_documents = [ 262 | (master_doc, 'DockerKubernetesLab.tex', u'Docker Kubernetes Lab', 263 | u'Peng Xiao', 'manual'), 264 | ] 265 | 266 | # The name of an image file (relative to this directory) to place at the top of 267 | # the title page. 268 | # 269 | # latex_logo = None 270 | 271 | # For "manual" documents, if this is true, then toplevel headings are parts, 272 | # not chapters. 273 | # 274 | # latex_use_parts = False 275 | 276 | # If true, show page references after internal links. 277 | # 278 | # latex_show_pagerefs = False 279 | 280 | # If true, show URL addresses after external links. 281 | # 282 | # latex_show_urls = False 283 | 284 | # Documents to append as an appendix to all manuals. 285 | # 286 | # latex_appendices = [] 287 | 288 | # It false, will not define \strong, \code, itleref, \crossref ... but only 289 | # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added 290 | # packages. 291 | # 292 | # latex_keep_old_macro_names = True 293 | 294 | # If false, no module index is generated. 295 | # 296 | # latex_domain_indices = True 297 | 298 | 299 | # -- Options for manual page output --------------------------------------- 300 | 301 | # One entry per manual page. List of tuples 302 | # (source start file, name, description, authors, manual section). 303 | man_pages = [ 304 | (master_doc, 'dockerkuberneteslab', u'Docker Kubernetes Lab', 305 | [author], 1) 306 | ] 307 | 308 | # If true, show URL addresses after external links. 309 | # 310 | # man_show_urls = False 311 | 312 | 313 | # -- Options for Texinfo output ------------------------------------------- 314 | 315 | # Grouping the document tree into Texinfo files. List of tuples 316 | # (source start file, target name, title, author, 317 | # dir menu entry, description, category) 318 | texinfo_documents = [ 319 | (master_doc, 'DockerKubernetesLab', u'Docker Kubernetes Lab', 320 | author, 'DockerKubernetesLab', 'One line description of project.', 321 | 'Miscellaneous'), 322 | ] 323 | 324 | # Documents to append as an appendix to all manuals. 325 | # 326 | # texinfo_appendices = [] 327 | 328 | # If false, no module index is generated. 329 | # 330 | # texinfo_domain_indices = True 331 | 332 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 333 | # 334 | # texinfo_show_urls = 'footnote' 335 | 336 | # If true, do not generate a @detailmenu in the "Top" node's menu. 337 | # 338 | # texinfo_no_detailmenu = False 339 | -------------------------------------------------------------------------------- /docs/source/coreos.rst: -------------------------------------------------------------------------------- 1 | CoreOS 2 | ======= 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | -------------------------------------------------------------------------------- /docs/source/docker.rst: -------------------------------------------------------------------------------- 1 | Docker 2 | ======= 3 | 4 | .. toctree:: 5 | :maxdepth: 1 6 | 7 | docker/docker-engine 8 | docker/docker-machine 9 | docker/docker-machine-aws 10 | docker/docker-cli 11 | docker/docker-base-image 12 | docker/docker-network 13 | docker/netns 14 | docker/bridged-network 15 | docker/port-mapping 16 | docker/customize-bridge 17 | docker/create-new-bridge 18 | docker/host-network 19 | docker/docker-etcd 20 | docker/docker-ovs 21 | docker/docker-calico 22 | docker/docker-flannel 23 | docker/docker-contiv 24 | docker/docker-compose 25 | docker/docker-compose-lb-scale 26 | docker/docker-swarm 27 | docker/docker-swarm-service 28 | docker/docker-swarm-lb-scale 29 | docker/docker-swarm-topo 30 | -------------------------------------------------------------------------------- /docs/source/docker/_image/cnm-model.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/docker-k8s-lab/a3f5b9a2150a82bd587d3ca118fa6fff2e62b4a1/docs/source/docker/_image/cnm-model.jpg -------------------------------------------------------------------------------- /docs/source/docker/_image/docker-compose.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/docker-k8s-lab/a3f5b9a2150a82bd587d3ca118fa6fff2e62b4a1/docs/source/docker/_image/docker-compose.png -------------------------------------------------------------------------------- /docs/source/docker/_image/docker-flannel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/docker-k8s-lab/a3f5b9a2150a82bd587d3ca118fa6fff2e62b4a1/docs/source/docker/_image/docker-flannel.png -------------------------------------------------------------------------------- /docs/source/docker/_image/docker-overlay.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/docker-k8s-lab/a3f5b9a2150a82bd587d3ca118fa6fff2e62b4a1/docs/source/docker/_image/docker-overlay.png -------------------------------------------------------------------------------- /docs/source/docker/_image/docker-swarm-visual.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/docker-k8s-lab/a3f5b9a2150a82bd587d3ca118fa6fff2e62b4a1/docs/source/docker/_image/docker-swarm-visual.png -------------------------------------------------------------------------------- /docs/source/docker/_image/docker-turtles-communication.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/docker-k8s-lab/a3f5b9a2150a82bd587d3ca118fa6fff2e62b4a1/docs/source/docker/_image/docker-turtles-communication.jpg -------------------------------------------------------------------------------- /docs/source/docker/_image/ovs-gre-docker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/docker-k8s-lab/a3f5b9a2150a82bd587d3ca118fa6fff2e62b4a1/docs/source/docker/_image/ovs-gre-docker.png -------------------------------------------------------------------------------- /docs/source/docker/_image/two-container-network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaopeng163/docker-k8s-lab/a3f5b9a2150a82bd587d3ca118fa6fff2e62b4a1/docs/source/docker/_image/two-container-network.png -------------------------------------------------------------------------------- /docs/source/docker/create-new-bridge.rst: -------------------------------------------------------------------------------- 1 | Create a new bridge network and connect with container 2 | ======================================================= 3 | 4 | Lab Environments 5 | ----------------- 6 | 7 | We use the docker hosts created by docker-machine on Amazon AWS. 8 | 9 | .. code-block:: bash 10 | 11 | $ docker-machine ls 12 | NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS 13 | docker-host-aws - amazonec2 Running tcp://52.53.176.55:2376 v1.13.0 14 | (docker-k8s-lab)➜ docker-k8s-lab git:(master) ✗ docker ssh docker-host-aws 15 | docker: 'ssh' is not a docker command. 16 | See 'docker --help' 17 | $ docker-machine ssh docker-host-aws 18 | ubuntu@docker-host-aws:~$ docker version 19 | Client: 20 | Version: 1.13.0 21 | API version: 1.25 22 | Go version: go1.7.3 23 | Git commit: 49bf474 24 | Built: Tue Jan 17 09:50:17 2017 25 | OS/Arch: linux/amd64 26 | 27 | Server: 28 | Version: 1.13.0 29 | API version: 1.25 (minimum version 1.12) 30 | Go version: go1.7.3 31 | Git commit: 49bf474 32 | Built: Tue Jan 17 09:50:17 2017 33 | OS/Arch: linux/amd64 34 | Experimental: false 35 | ubuntu@docker-host-aws:~$ 36 | 37 | Create a new Bridge Network 38 | --------------------------- 39 | 40 | Use ``docker network create -d bridge NETWORK_NAME`` command to create a new bridge network [#f1]_. 41 | 42 | .. code-block:: bash 43 | 44 | ubuntu@docker-host-aws:~$ docker network ls 45 | NETWORK ID NAME DRIVER SCOPE 46 | 326ddef352c5 bridge bridge local 47 | 28cc7c021812 demo bridge local 48 | 1ca18e6b4867 host host local 49 | e9530f1fb046 none null local 50 | ubuntu@docker-host-aws:~$ docker network rm demo 51 | demo 52 | ubuntu@docker-host-aws:~$ docker network ls 53 | NETWORK ID NAME DRIVER SCOPE 54 | 326ddef352c5 bridge bridge local 55 | 1ca18e6b4867 host host local 56 | e9530f1fb046 none null local 57 | ubuntu@docker-host-aws:~$ docker network create -d bridge my-bridge 58 | e0fc5f7ff50e97787a7b13064f12806232dcc88bafa9c2eb07cec5e81cefd886 59 | ubuntu@docker-host-aws:~$ docker network ls 60 | NETWORK ID NAME DRIVER SCOPE 61 | 326ddef352c5 bridge bridge local 62 | 1ca18e6b4867 host host local 63 | e0fc5f7ff50e my-bridge bridge local 64 | e9530f1fb046 none null local 65 | ubuntu@docker-host-aws:~$ 66 | ubuntu@docker-host-aws:~$ ip a 67 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default 68 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 69 | inet 127.0.0.1/8 scope host lo 70 | valid_lft forever preferred_lft forever 71 | inet6 ::1/128 scope host 72 | valid_lft forever preferred_lft forever 73 | 2: eth0: mtu 9001 qdisc pfifo_fast state UP group default qlen 1000 74 | link/ether 02:30:c1:3e:63:3a brd ff:ff:ff:ff:ff:ff 75 | inet 172.31.29.93/20 brd 172.31.31.255 scope global eth0 76 | valid_lft forever preferred_lft forever 77 | inet6 fe80::30:c1ff:fe3e:633a/64 scope link 78 | valid_lft forever preferred_lft forever 79 | 4: docker0: mtu 1500 qdisc noqueue state DOWN group default 80 | link/ether 02:42:a7:88:bd:32 brd ff:ff:ff:ff:ff:ff 81 | inet 172.17.0.1/16 scope global docker0 82 | valid_lft forever preferred_lft forever 83 | inet6 fe80::42:a7ff:fe88:bd32/64 scope link 84 | valid_lft forever preferred_lft forever 85 | 56: br-e0fc5f7ff50e: mtu 1500 qdisc noqueue state DOWN group default 86 | link/ether 02:42:c0:80:09:3c brd ff:ff:ff:ff:ff:ff 87 | inet 172.18.0.1/16 scope global br-e0fc5f7ff50e 88 | valid_lft forever preferred_lft forever 89 | ubuntu@docker-host-aws:~$ brctl show 90 | bridge name bridge id STP enabled interfaces 91 | br-e0fc5f7ff50e 8000.0242c080093c no 92 | docker0 8000.0242a788bd32 no 93 | ubuntu@docker-host-aws:~$ 94 | 95 | 96 | Create a Container connected with new Bridge 97 | --------------------------------------------- 98 | 99 | Create a container connected with the ``my-bridge`` network. 100 | 101 | .. code-block:: bash 102 | 103 | $ docker run -d --name test1 --network my-bridge busybox sh -c "while true;do sleep 3600;done" 104 | $ docker exec -it test1 sh 105 | / # ip a 106 | 1: lo: mtu 65536 qdisc noqueue 107 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 108 | inet 127.0.0.1/8 scope host lo 109 | valid_lft forever preferred_lft forever 110 | inet6 ::1/128 scope host 111 | valid_lft forever preferred_lft forever 112 | 57: eth0: mtu 1500 qdisc noqueue 113 | link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff 114 | inet 172.18.0.2/16 scope global eth0 115 | valid_lft forever preferred_lft forever 116 | inet6 fe80::42:acff:fe12:2/64 scope link 117 | valid_lft forever preferred_lft forever 118 | 119 | ubuntu@docker-host-aws:~$ brctl show 120 | bridge name bridge id STP enabled interfaces 121 | br-e0fc5f7ff50e 8000.0242c080093c no veth2f36f74 122 | docker0 8000.0242a788bd32 no 123 | ubuntu@docker-host-aws:~$ 124 | 125 | The new container will connect with the ``my-bridge``. 126 | 127 | Change a Container's network 128 | ----------------------------- 129 | 130 | Create two containers which connect with the default ``docker0`` bridge. 131 | 132 | .. code-block:: bash 133 | 134 | ubuntu@docker-host-aws:~$ docker run -d --name test1 busybox sh -c "while true;do sleep 3600;done" 135 | 73624dd5373b594526d73a1d6fb68a32b92c1ed75e84575f32e4e0f2e1d8d356 136 | ubuntu@docker-host-aws:~$ docker run -d --name test2 busybox sh -c "while true;do sleep 3600;done" 137 | 33498192d489832a8534fb516029be7fbaf0b58e665d3e4922147857ffbbc10b 138 | 139 | Create a new bridge network 140 | 141 | .. code-block:: bash 142 | 143 | ubuntu@docker-host-aws:~$ docker network create -d bridge demo-bridge 144 | be9309ebb3b3fc18c3d43b0fef7c82fe348ce7bf841e281934deccf6bd6e51eb 145 | 146 | Use ``docker network connect demo-bridge test1`` command to connect container ``test1`` to bridge ``demo-bridge``. 147 | 148 | .. code-block:: bash 149 | 150 | ubuntu@docker-host-aws:~$ docker network connect demo-bridge test1 151 | ubuntu@docker-host-aws:~$ brctl show 152 | bridge name bridge id STP enabled interfaces 153 | br-be9309ebb3b3 8000.02423906b898 no vethec7dc1d 154 | docker0 8000.0242a788bd32 no veth3238a5d 155 | veth7b516dd 156 | ubuntu@docker-host-aws:~$ docker network inspect demo-bridge 157 | [ 158 | { 159 | "Name": "demo-bridge", 160 | "Id": "be9309ebb3b3fc18c3d43b0fef7c82fe348ce7bf841e281934deccf6bd6e51eb", 161 | "Created": "2017-02-23T06:16:28.251575297Z", 162 | "Scope": "local", 163 | "Driver": "bridge", 164 | "EnableIPv6": false, 165 | "IPAM": { 166 | "Driver": "default", 167 | "Options": {}, 168 | "Config": [ 169 | { 170 | "Subnet": "172.18.0.0/16", 171 | "Gateway": "172.18.0.1" 172 | } 173 | ] 174 | }, 175 | "Internal": false, 176 | "Attachable": false, 177 | "Containers": { 178 | "73624dd5373b594526d73a1d6fb68a32b92c1ed75e84575f32e4e0f2e1d8d356": { 179 | "Name": "test1", 180 | "EndpointID": "b766bfcc7fc851620b63931f114f5b81b5e072c7ffd64d8f1c99d9828810f17a", 181 | "MacAddress": "02:42:ac:12:00:02", 182 | "IPv4Address": "172.18.0.2/16", 183 | "IPv6Address": "" 184 | } 185 | }, 186 | "Options": {}, 187 | "Labels": {} 188 | } 189 | ] 190 | 191 | Now the container ``test1`` has connected with the default ``docker0`` bridge and ``demo-bridge``. we can do them same action 192 | to connect container ``test2`` to ``demo-bridge`` network. After that: 193 | 194 | .. code-block:: bash 195 | 196 | ubuntu@docker-host-aws:~$ brctl show 197 | bridge name bridge id STP enabled interfaces 198 | br-be9309ebb3b3 8000.02423906b898 no veth67bd1b0 199 | vethec7dc1d 200 | docker0 8000.0242a788bd32 no veth3238a5d 201 | veth7b516dd 202 | ubuntu@docker-host-aws:~$ docker network inspect demo-bridge 203 | [ 204 | { 205 | "Name": "demo-bridge", 206 | "Id": "be9309ebb3b3fc18c3d43b0fef7c82fe348ce7bf841e281934deccf6bd6e51eb", 207 | "Created": "2017-02-23T06:16:28.251575297Z", 208 | "Scope": "local", 209 | "Driver": "bridge", 210 | "EnableIPv6": false, 211 | "IPAM": { 212 | "Driver": "default", 213 | "Options": {}, 214 | "Config": [ 215 | { 216 | "Subnet": "172.18.0.0/16", 217 | "Gateway": "172.18.0.1" 218 | } 219 | ] 220 | }, 221 | "Internal": false, 222 | "Attachable": false, 223 | "Containers": { 224 | "33498192d489832a8534fb516029be7fbaf0b58e665d3e4922147857ffbbc10b": { 225 | "Name": "test2", 226 | "EndpointID": "26d6bdc1c1c0459ba49718e07d6983a9dda1a1a96db3f1beedcbc5ea54abd163", 227 | "MacAddress": "02:42:ac:12:00:03", 228 | "IPv4Address": "172.18.0.3/16", 229 | "IPv6Address": "" 230 | }, 231 | "73624dd5373b594526d73a1d6fb68a32b92c1ed75e84575f32e4e0f2e1d8d356": { 232 | "Name": "test1", 233 | "EndpointID": "b766bfcc7fc851620b63931f114f5b81b5e072c7ffd64d8f1c99d9828810f17a", 234 | "MacAddress": "02:42:ac:12:00:02", 235 | "IPv4Address": "172.18.0.2/16", 236 | "IPv6Address": "" 237 | } 238 | }, 239 | "Options": {}, 240 | "Labels": {} 241 | } 242 | ] 243 | 244 | Now, if we go into ``test1``, we can ping ``test2`` directly by container name: 245 | 246 | .. code-block:: bash 247 | 248 | ubuntu@docker-host-aws:~$ docker exec -it test1 sh 249 | / # ip a 250 | 1: lo: mtu 65536 qdisc noqueue 251 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 252 | inet 127.0.0.1/8 scope host lo 253 | valid_lft forever preferred_lft forever 254 | inet6 ::1/128 scope host 255 | valid_lft forever preferred_lft forever 256 | 78: eth0: mtu 1500 qdisc noqueue 257 | link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff 258 | inet 172.17.0.2/16 scope global eth0 259 | valid_lft forever preferred_lft forever 260 | inet6 fe80::42:acff:fe11:2/64 scope link 261 | valid_lft forever preferred_lft forever 262 | 83: eth1: mtu 1500 qdisc noqueue 263 | link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff 264 | inet 172.18.0.2/16 scope global eth1 265 | valid_lft forever preferred_lft forever 266 | inet6 fe80::42:acff:fe12:2/64 scope link 267 | valid_lft forever preferred_lft forever 268 | / # ping test2 269 | PING test2 (172.18.0.3): 56 data bytes 270 | 64 bytes from 172.18.0.3: seq=0 ttl=64 time=0.095 ms 271 | 64 bytes from 172.18.0.3: seq=1 ttl=64 time=0.077 ms 272 | ^C 273 | --- test2 ping statistics --- 274 | 2 packets transmitted, 2 packets received, 0% packet loss 275 | round-trip min/avg/max = 0.077/0.086/0.095 ms 276 | 277 | Also, we can use ``docker network disconnect demo-bridge test1`` to disconnect container ``test1`` from 278 | network ``demo-bridge``. 279 | 280 | Reference 281 | ---------- 282 | 283 | .. [#f1] https://docs.docker.com/engine/reference/commandline/network_create/ 284 | -------------------------------------------------------------------------------- /docs/source/docker/customize-bridge.rst: -------------------------------------------------------------------------------- 1 | Customize the docker0 bridge 2 | ============================ 3 | 4 | The default docker0 bridge has some default configuration [#f1]_. 5 | 6 | .. code-block:: bash 7 | 8 | ubuntu@docker-node1:~$ docker network list 9 | NETWORK ID NAME DRIVER SCOPE 10 | 83a58f039549 bridge bridge local 11 | 0f93d7177516 host host local 12 | 68721ff2f526 none null local 13 | ubuntu@docker-node1:~$ 14 | ubuntu@docker-node1:~$ 15 | ubuntu@docker-node1:~$ docker network inspect bridge 16 | [ 17 | { 18 | "Name": "bridge", 19 | "Id": "83a58f039549470e3374c6631ef721b927e92917af1d21b464dd59551025ac22", 20 | "Scope": "local", 21 | "Driver": "bridge", 22 | "EnableIPv6": false, 23 | "IPAM": { 24 | "Driver": "default", 25 | "Options": null, 26 | "Config": [ 27 | { 28 | "Subnet": "172.17.0.0/16", 29 | "Gateway": "172.17.0.1" 30 | } 31 | ] 32 | }, 33 | "Internal": false, 34 | "Containers": { 35 | "13866c4e5bf2c73385883090ccd0b64ca6ff177d61174f4499210b8a17a7def1": { 36 | "Name": "test1", 37 | "EndpointID": "99fea9853df1fb5fbed3f927b3d2b00544188aa7913a8c0f4cb9f9a40639d789", 38 | "MacAddress": "02:42:ac:11:00:02", 39 | "IPv4Address": "172.17.0.2/16", 40 | "IPv6Address": "" 41 | } 42 | }, 43 | "Options": { 44 | "com.docker.network.bridge.default_bridge": "true", 45 | "com.docker.network.bridge.enable_icc": "true", 46 | "com.docker.network.bridge.enable_ip_masquerade": "true", 47 | "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", 48 | "com.docker.network.bridge.name": "docker0", 49 | "com.docker.network.driver.mtu": "1500" 50 | }, 51 | "Labels": {} 52 | } 53 | ] 54 | ubuntu@docker-node1:~$ 55 | 56 | What we want to do is to change the default IPAM dirver's configuration, IP address, netmask and IP allocation range. 57 | 58 | 59 | References 60 | ----------- 61 | 62 | .. [#f1] https://docs.docker.com/engine/userguide/networking/default_network/custom-docker0/ 63 | -------------------------------------------------------------------------------- /docs/source/docker/docker-base-image.rst: -------------------------------------------------------------------------------- 1 | Build a Base Image from Scratch 2 | ================================ 3 | 4 | we will build a ``hello world`` base image from Scratch. 5 | 6 | 7 | System Environment 8 | ------------------- 9 | 10 | Docker running on centos 7 and the version 11 | 12 | .. code-block:: sh 13 | 14 | $ docker version 15 | Client: 16 | Version: 17.12.0-ce 17 | API version: 1.35 18 | Go version: go1.9.2 19 | Git commit: c97c6d6 20 | Built: Wed Dec 27 20:10:14 2017 21 | OS/Arch: linux/amd64 22 | 23 | Server: 24 | Engine: 25 | Version: 17.12.0-ce 26 | API version: 1.35 (minimum version 1.12) 27 | Go version: go1.9.2 28 | Git commit: c97c6d6 29 | Built: Wed Dec 27 20:12:46 2017 30 | OS/Arch: linux/amd64 31 | Experimental: false 32 | 33 | install requirements: 34 | 35 | .. code-block:: sh 36 | 37 | $ sudo yum install -y gcc glibc-static 38 | 39 | 40 | Create a Hello world 41 | --------------------- 42 | 43 | 44 | create a ``hello.c`` and save 45 | 46 | .. code-block:: bash 47 | 48 | $ pwd 49 | /home/vagrant/hello-world 50 | [vagrant@localhost hello-world]$ more hello.c 51 | #include 52 | 53 | int main() 54 | { 55 | printf("hello docker\n"); 56 | } 57 | [vagrant@localhost hello-world]$ 58 | 59 | Compile the ``hello.c`` source file to an binary file, and run it. 60 | 61 | .. code-block:: bash 62 | 63 | $ gcc -o hello -static hello.c 64 | $ ls 65 | Dockerfile hello hello.c 66 | $ ./hello 67 | hello docker 68 | 69 | 70 | Build Docker image 71 | ------------------- 72 | 73 | Create a Dockerfile like this: 74 | 75 | .. code-block:: bash 76 | 77 | $ more Dockerfile 78 | FROM scratch 79 | ADD hello / 80 | CMD ["/hello"] 81 | 82 | build image through: 83 | 84 | .. code-block:: bash 85 | 86 | $ docker build -t xiaopeng163/hello-world . 87 | $ docker image ls 88 | REPOSITORY TAG IMAGE ID CREATED SIZE 89 | xiaopeng163/hello-world latest 78d57d4588e3 4 seconds ago 844kB 90 | 91 | Run the hello world container 92 | ------------------------------ 93 | 94 | .. code-block:: bash 95 | 96 | $ docker run xiaopeng163/hello-world 97 | hello docker 98 | 99 | Done! -------------------------------------------------------------------------------- /docs/source/docker/docker-calico.rst: -------------------------------------------------------------------------------- 1 | Multi-Host Networking Overlay with Calico 2 | ========================================= 3 | -------------------------------------------------------------------------------- /docs/source/docker/docker-cli.rst: -------------------------------------------------------------------------------- 1 | Docker Command Line Step by Step 2 | ================================ 3 | 4 | Docker Images 5 | ------------- 6 | 7 | Docker images can be pulled from the docker hub, or build from ``Dockerfile``. 8 | 9 | docker pull 10 | ~~~~~~~~~~~~ 11 | 12 | ``docker pull`` will pull a docker image from image registry, it's docker hub by default. 13 | 14 | .. code-block:: bash 15 | 16 | $ docker pull ubuntu:14.04 17 | 14.04: Pulling from library/ubuntu 18 | 19 | 04cf3f0e25b6: Pull complete 20 | d5b45e963ba0: Pull complete 21 | a5c78fda4e14: Pull complete 22 | 193d4969ca79: Pull complete 23 | d709551f9630: Pull complete 24 | Digest: sha256:edb984703bd3e8981ff541a5b9297ca1b81fde6e6e8094d86e390a38ebc30b4d 25 | Status: Downloaded newer image for ubuntu:14.04 26 | 27 | If the image has already on you host. 28 | 29 | .. code-block:: bash 30 | 31 | $ docker pull ubuntu:14.04 32 | 14.04: Pulling from library/ubuntu 33 | 34 | Digest: sha256:edb984703bd3e8981ff541a5b9297ca1b81fde6e6e8094d86e390a38ebc30b4d 35 | Status: Image is up to date for ubuntu:14.04 36 | 37 | docker build 38 | ~~~~~~~~~~~~ 39 | 40 | Create a ``Dockerfile`` in current folder. 41 | 42 | .. code-block:: bash 43 | 44 | $ more Dockerfile 45 | FROM ubuntu:14.04 46 | MAINTAINER xiaoquwl@gmail.com 47 | RUN apt-get update && apt-get install -y redis-server 48 | EXPOSE 6379 49 | ENTRYPOINT ["/usr/bin/redis-server"] 50 | 51 | Use ``docker build`` to create a image. 52 | 53 | .. code-block:: bash 54 | 55 | $ docker build -t xiaopeng163/redis:0.1 . 56 | $ docker images 57 | REPOSITORY TAG IMAGE ID CREATED SIZE 58 | xiaopeng163/redis 0.1 ccbca61a8ed4 7 seconds ago 212.4 MB 59 | ubuntu 14.04 3f755ca42730 2 days ago 187.9 MB 60 | 61 | docker history 62 | ~~~~~~~~~~~~~~ 63 | 64 | .. code-block:: bash 65 | 66 | $ docker history xiaopeng163/redis:0.1 67 | IMAGE CREATED CREATED BY SIZE COMMENT 68 | ccbca61a8ed4 2 minutes ago /bin/sh -c #(nop) ENTRYPOINT ["/usr/bin/redis 0 B 69 | 13d13c016420 2 minutes ago /bin/sh -c #(nop) EXPOSE 6379/tcp 0 B 70 | c2675d891098 2 minutes ago /bin/sh -c apt-get update && apt-get install 24.42 MB 71 | c3035660ff0c 2 minutes ago /bin/sh -c #(nop) MAINTAINER xiaoquwl@gmail.c 0 B 72 | 3f755ca42730 2 days ago /bin/sh -c #(nop) CMD ["/bin/bash"] 0 B 73 | 2 days ago /bin/sh -c mkdir -p /run/systemd && echo 'doc 7 B 74 | 2 days ago /bin/sh -c sed -i 's/^#\s*\(deb.*universe\)$/ 1.895 kB 75 | 2 days ago /bin/sh -c rm -rf /var/lib/apt/lists/* 0 B 76 | 2 days ago /bin/sh -c set -xe && echo '#!/bin/sh' > /u 194.6 kB 77 | 2 days ago /bin/sh -c #(nop) ADD file:b2236d49147fe14d8d 187.7 MB 78 | 79 | 80 | docker images 81 | ~~~~~~~~~~~~~ 82 | 83 | ``docker images`` will list all avaiable images on your local host. 84 | 85 | .. code-block:: bash 86 | 87 | $ docker images 88 | REPOSITORY TAG IMAGE ID CREATED SIZE 89 | ubuntu 14.04 aae2b63c4946 12 hours ago 187.9 MB 90 | 91 | docker rmi 92 | ~~~~~~~~~~ 93 | 94 | Remove docker images. 95 | 96 | .. code-block:: bash 97 | 98 | $ docker rmi aae2b63c4946 99 | Untagged: ubuntu:14.04 100 | Deleted: sha256:aae2b63c49461fcae4962e4a8043f66acf8e3af7e62f5ebceb70b181d8ca01e0 101 | Deleted: sha256:50a2a0443efd0936b13eebb86f52b85551ad7883e093ba0b5bad14fec6ccf2ee 102 | Deleted: sha256:9f0ca687b5937f9ac2c9675065b2daf1a6592e8a1e96bce9de46e94f70fbf418 103 | Deleted: sha256:6e85e9fb34e94d299bb156252c89dfb4dcec65deca5e2471f7e8ba206eba8f8d 104 | Deleted: sha256:cc4264e967e293d5cc16e5def86a0b3160b7a3d09e7a458f781326cd2cecedb1 105 | Deleted: sha256:3181634137c4df95685d73bfbc029c47f6b37eb8a80e74f82e01cd746d0b4b66 106 | 107 | 108 | Docker Containers 109 | ----------------- 110 | 111 | 112 | Start a container in interactive mode 113 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 114 | 115 | .. code-block:: bash 116 | 117 | $ docker run -i --name test3 ubuntu:14.04 118 | pwd 119 | / 120 | ls -l 121 | total 20 122 | drwxr-xr-x. 2 root root 4096 Nov 30 08:51 bin 123 | drwxr-xr-x. 2 root root 6 Apr 10 2014 boot 124 | drwxr-xr-x. 5 root root 360 Nov 30 09:00 dev 125 | drwxr-xr-x. 1 root root 62 Nov 30 09:00 etc 126 | drwxr-xr-x. 2 root root 6 Apr 10 2014 home 127 | drwxr-xr-x. 12 root root 4096 Nov 30 08:51 lib 128 | drwxr-xr-x. 2 root root 33 Nov 30 08:51 lib64 129 | drwxr-xr-x. 2 root root 6 Nov 23 01:30 media 130 | drwxr-xr-x. 2 root root 6 Apr 10 2014 mnt 131 | drwxr-xr-x. 2 root root 6 Nov 23 01:30 opt 132 | dr-xr-xr-x. 131 root root 0 Nov 30 09:00 proc 133 | drwx------. 2 root root 35 Nov 30 08:51 root 134 | drwxr-xr-x. 8 root root 4096 Nov 29 20:04 run 135 | drwxr-xr-x. 2 root root 4096 Nov 30 08:51 sbin 136 | drwxr-xr-x. 2 root root 6 Nov 23 01:30 srv 137 | dr-xr-xr-x. 13 root root 0 Sep 4 08:43 sys 138 | drwxrwxrwt. 2 root root 6 Nov 23 01:32 tmp 139 | drwxr-xr-x. 10 root root 97 Nov 30 08:51 usr 140 | drwxr-xr-x. 11 root root 4096 Nov 30 08:51 var 141 | 142 | ifconfig 143 | eth0 Link encap:Ethernet HWaddr 02:42:ac:11:00:04 144 | inet addr:172.17.0.4 Bcast:0.0.0.0 Mask:255.255.0.0 145 | inet6 addr: fe80::42:acff:fe11:4/64 Scope:Link 146 | UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 147 | RX packets:8 errors:0 dropped:0 overruns:0 frame:0 148 | TX packets:8 errors:0 dropped:0 overruns:0 carrier:0 149 | collisions:0 txqueuelen:0 150 | RX bytes:648 (648.0 B) TX bytes:648 (648.0 B) 151 | 152 | lo Link encap:Local Loopback 153 | inet addr:127.0.0.1 Mask:255.0.0.0 154 | inet6 addr: ::1/128 Scope:Host 155 | UP LOOPBACK RUNNING MTU:65536 Metric:1 156 | RX packets:0 errors:0 dropped:0 overruns:0 frame:0 157 | TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 158 | collisions:0 txqueuelen:0 159 | RX bytes:0 (0.0 B) TX bytes:0 (0.0 B) 160 | 161 | exit 162 | $ 163 | 164 | Start a container in background 165 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 166 | 167 | Start a container in background using ``xiaopeng163/redis:0.1`` image, and the name of the container is ``demo``. 168 | Through ``docker ps`` we can see all running Containers 169 | 170 | .. code-block:: bash 171 | 172 | $ docker run -d --name demo xiaopeng163/redis:0.1 173 | 4791db4ff0ef5a1ad9ff7c405bd7705d95779b2e9209967ffbef66cbaee80f3a 174 | $ docker ps 175 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 176 | 4791db4ff0ef xiaopeng163/redis:0.1 "/usr/bin/redis-serve" 5 seconds ago Up 4 seconds 6379/tcp demo 177 | 178 | stop/remove containers 179 | ~~~~~~~~~~~~~~~~~~~~~~ 180 | 181 | Sometime, we want to manage multiple containers each time, like ``start``, ``stop``, ``rm``. 182 | 183 | Firstly, we can use ``--filter`` to filter out the containers we want to manage. 184 | 185 | .. code-block:: bash 186 | 187 | $ docker ps -a --filter "status=exited" 188 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 189 | c05d6d379459 centos:7 "/bin/bash -c 'while " 3 days ago Exited (137) 11 hours ago test3 190 | 8975cb01d142 centos:7 "/bin/bash -c 'while " 5 days ago Exited (137) 3 days ago test2 191 | 192 | Secondly, we can use ``-q`` option to list only containers ids 193 | 194 | .. code-block:: bash 195 | 196 | $ docker ps -aq --filter "status=exited" 197 | c05d6d379459 198 | 8975cb01d142 199 | 200 | At last, we can batch processing these containers, like remove them all or start them all: 201 | 202 | .. code-block:: bash 203 | 204 | $ docker rm $(docker ps -aq --filter "status=exited") 205 | c05d6d379459 206 | 8975cb01d142 207 | -------------------------------------------------------------------------------- /docs/source/docker/docker-compose-lb-scale.rst: -------------------------------------------------------------------------------- 1 | Docker Compose Load Blancing and Scaling 2 | ========================================= 3 | 4 | Please finish :doc:`docker-compose` firstly. 5 | 6 | In this lab, we will create a web service, try to scale this service, and add load blancer. 7 | 8 | ``docker-compose.yml`` file, we just use two images. 9 | 10 | .. code-block:: bash 11 | 12 | $ more docker-compose.yml 13 | web: 14 | image: 'jwilder/whoami' 15 | lb: 16 | image: 'dockercloud/haproxy:latest' 17 | links: 18 | - web 19 | ports: 20 | - '80:80' 21 | 22 | Start and check the service. 23 | 24 | .. code-block:: bash 25 | 26 | $ docker-compose up 27 | $ docker-compose up -d 28 | Creating ubuntu_web_1 29 | Creating ubuntu_lb_1 30 | $ docker-compose ps 31 | Name Command State Ports 32 | --------------------------------------------------------------------------------------------- 33 | ubuntu_lb_1 /sbin/tini -- dockercloud- ... Up 1936/tcp, 443/tcp, 0.0.0.0:80->80/tcp 34 | ubuntu_web_1 /bin/sh -c php-fpm -d vari ... Up 80/tcp 35 | 36 | Open the browser and check the hostname. 37 | 38 | Scale the web service to 2 and check: 39 | 40 | .. code-block:: bash 41 | 42 | $ docker-compose scale web=3 43 | Creating and starting ubuntu_web_2 ... done 44 | Creating and starting ubuntu_web_3 ... done 45 | ubuntu@aws-swarm-manager:~$ docker-compose ps 46 | Name Command State Ports 47 | --------------------------------------------------------------------------------------------- 48 | ubuntu_lb_1 /sbin/tini -- dockercloud- ... Up 1936/tcp, 443/tcp, 0.0.0.0:80->80/tcp 49 | ubuntu_web_1 /bin/sh -c php-fpm -d vari ... Up 80/tcp 50 | ubuntu_web_2 /bin/sh -c php-fpm -d vari ... Up 80/tcp 51 | ubuntu_web_3 /bin/sh -c php-fpm -d vari ... Up 80/tcp 52 | -------------------------------------------------------------------------------- /docs/source/docker/docker-compose.rst: -------------------------------------------------------------------------------- 1 | Docker Compose Networking Deep Dive 2 | =================================== 3 | 4 | .. note:: 5 | 6 | We suggest that you should complete the lab :doc:`bridged-network` firstly before going to this lab. 7 | 8 | This lab will use ``example-voting-app`` as the demo application run by docker-compose, you can find the source code of the project in 9 | https://github.com/DaoCloud/example-voting-app 10 | 11 | Using Compose is basically a three-step process. [#f1]_ 12 | 13 | 1. Define your app’s environment with a Dockerfile so it can be reproduced anywhere. 14 | 2. Define the services that make up your app in docker-compose.yml so they can be run together in an isolated environment. 15 | 3. Lastly, run docker-compose up and Compose will start and run your entire app. 16 | 17 | For ``example-voting-app``, we already have ``Dockerfile`` and ``docker-compose.yml``, what need to do is ``docker-compose up``. 18 | 19 | Install Docker Compose 20 | ---------------------- 21 | 22 | There are many ways to install docker compose [#f2]_. 23 | 24 | In our one node docker engine lab environment :doc:`../lab-environment` 25 | we install docker compose as the following way in one docker host. 26 | 27 | .. code-block:: bash 28 | 29 | ubuntu@docker-node1:~$ sudo curl -L "https://github.com/docker/compose/releases/download/1.9.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 30 | ubuntu@docker-node1:~$ sudo chmod +x /usr/local/bin/docker-compose 31 | ubuntu@docker-node1:~$ docker-compose -v 32 | docker-compose version 1.9.0, build 2585387 33 | 34 | Start APP 35 | ---------- 36 | 37 | Clone ``example-voting-app`` repository to docker host, it defined five containers: ``voting-app``, ``result-app``, ``worker``, ``redis``, ``db``. 38 | and two networks: ``front-tier``, ``back-tier`` through ``docker-compose.yml``. 39 | 40 | .. code-block:: bash 41 | 42 | version: "2" 43 | 44 | services: 45 | voting-app: 46 | build: ./voting-app/. 47 | volumes: 48 | - ./voting-app:/app 49 | ports: 50 | - "5000:80" 51 | links: 52 | - redis 53 | networks: 54 | - front-tier 55 | - back-tier 56 | 57 | result-app: 58 | build: ./result-app/. 59 | volumes: 60 | - ./result-app:/app 61 | ports: 62 | - "5001:80" 63 | links: 64 | - db 65 | networks: 66 | - front-tier 67 | - back-tier 68 | 69 | worker: 70 | build: ./worker 71 | links: 72 | - db 73 | - redis 74 | networks: 75 | - back-tier 76 | 77 | redis: 78 | image: redis 79 | ports: ["6379"] 80 | networks: 81 | - back-tier 82 | 83 | db: 84 | image: postgres:9.4 85 | volumes: 86 | - "db-data:/var/lib/postgresql/data" 87 | networks: 88 | - back-tier 89 | 90 | volumes: 91 | db-data: 92 | 93 | networks: 94 | front-tier: 95 | back-tier: 96 | 97 | Then run ``docker-compose build`` to build required docker images. This will take some time. 98 | 99 | .. code-block:: bash 100 | 101 | ubuntu@docker-node1:~$ git clone https://github.com/DaoCloud/example-voting-app 102 | ubuntu@docker-node1:~$ cd example-voting-app/ 103 | ubuntu@docker-node1:~/example-voting-app$ sudo docker-compose build 104 | 105 | 106 | ubuntu@docker-node1:~/example-voting-app$ sudo docker-compose up 107 | Creating network "examplevotingapp_front-tier" with the default driver 108 | Creating network "examplevotingapp_back-tier" with the default driver 109 | Creating volume "examplevotingapp_db-data" with default driver 110 | .... 111 | Creating examplevotingapp_db_1 112 | Creating examplevotingapp_redis_1 113 | Creating examplevotingapp_voting-app_1 114 | Creating examplevotingapp_result-app_1 115 | Creating examplevotingapp_worker_1 116 | Attaching to examplevotingapp_redis_1, examplevotingapp_db_1, examplevotingapp_result-app_1, examplevotingapp_voting-app_1, examplevotingapp_worker_1 117 | ... 118 | 119 | There will be five containers, two bridge networks and seven veth interfaces created. 120 | 121 | .. code-block:: bash 122 | 123 | ubuntu@docker-node1:~/example-voting-app$ sudo docker ps 124 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 125 | c9c4e7fe7b6c examplevotingapp_worker "/usr/lib/jvm/java-7-" About an hour ago Up 5 seconds examplevotingapp_worker_1 126 | 4213167049aa examplevotingapp_result-app "node server.js" About an hour ago Up 4 seconds 0.0.0.0:5001->80/tcp examplevotingapp_result-app_1 127 | 8711d687bda9 examplevotingapp_voting-app "python app.py" About an hour ago Up 5 seconds 0.0.0.0:5000->80/tcp examplevotingapp_voting-app_1 128 | b7eda251865d redis "docker-entrypoint.sh" About an hour ago Up 5 seconds 0.0.0.0:32770->6379/tcp examplevotingapp_redis_1 129 | 7d6dbb98ce40 postgres:9.4 "/docker-entrypoint.s" About an hour ago Up 5 seconds 5432/tcp examplevotingapp_db_1 130 | ubuntu@docker-node1:~/example-voting-app$ sudo docker network ls 131 | NETWORK ID NAME DRIVER SCOPE 132 | 3b5cfe4aafa1 bridge bridge local 133 | 69a019d00603 examplevotingapp_back-tier bridge local 134 | 6ddb07377c35 examplevotingapp_front-tier bridge local 135 | b1670e00e2a3 host host local 136 | 6006af29f010 none null local 137 | ubuntu@docker-node1:~/example-voting-app$ brctl show 138 | bridge name bridge id STP enabled interfaces 139 | br-69a019d00603 8000.0242c780244f no veth2eccb94 140 | veth374be12 141 | veth57f50a8 142 | veth8418ed3 143 | veth91d724d 144 | br-6ddb07377c35 8000.02421dac7490 no veth156c0a9 145 | vethaba6401 146 | 147 | Through ``docker network inspect``, we can know which container connnect with the bridge. 148 | 149 | There are two containers connect with docker network ``examplevotingapp_front-tier``. 150 | 151 | .. code-block:: bash 152 | 153 | ubuntu@docker-node1:~/example-voting-app$ sudo docker network inspect examplevotingapp_front-tier 154 | [ 155 | { 156 | "Name": "examplevotingapp_front-tier", 157 | "Id": "6ddb07377c354bcf68542592a8c6eb34d334ce8515e64832b3c7bf2af56274ca", 158 | "Scope": "local", 159 | "Driver": "bridge", 160 | "EnableIPv6": false, 161 | "IPAM": { 162 | "Driver": "default", 163 | "Options": null, 164 | "Config": [ 165 | { 166 | "Subnet": "172.18.0.0/16", 167 | "Gateway": "172.18.0.1/16" 168 | } 169 | ] 170 | }, 171 | "Internal": false, 172 | "Containers": { 173 | "4213167049aa7b2cc1b3096333706f2ef0428e78b2847a7c5ddc755f5332505c": { 174 | "Name": "examplevotingapp_result-app_1", 175 | "EndpointID": "00c7e1101227ece1535385e8d6fe9210dfcdc3c58d71cedb4e9fad6c949120e3", 176 | "MacAddress": "02:42:ac:12:00:03", 177 | "IPv4Address": "172.18.0.3/16", 178 | "IPv6Address": "" 179 | }, 180 | "8711d687bda94069ed7d5a7677ca4c7953d384f1ebf83c3bd75ac51b1606ed2f": { 181 | "Name": "examplevotingapp_voting-app_1", 182 | "EndpointID": "ffc9905cbfd5332b9ef333bcc7578415977a0044c2ec2055d6760c419513ae5f", 183 | "MacAddress": "02:42:ac:12:00:02", 184 | "IPv4Address": "172.18.0.2/16", 185 | "IPv6Address": "" 186 | } 187 | }, 188 | "Options": {}, 189 | "Labels": {} 190 | } 191 | ] 192 | 193 | There are five containers connect with docker network ``examplevotingapp_back-tier``. 194 | 195 | .. code-block:: bash 196 | 197 | ubuntu@docker-node1:~/example-voting-app$ sudo docker network inspect examplevotingapp_back-tier 198 | [ 199 | { 200 | "Name": "examplevotingapp_back-tier", 201 | "Id": "69a019d00603ca3a06a30ac99fc0a2700dd8cc14ba8b8368de4fe0c26ad4c69d", 202 | "Scope": "local", 203 | "Driver": "bridge", 204 | "EnableIPv6": false, 205 | "IPAM": { 206 | "Driver": "default", 207 | "Options": null, 208 | "Config": [ 209 | { 210 | "Subnet": "172.19.0.0/16", 211 | "Gateway": "172.19.0.1/16" 212 | } 213 | ] 214 | }, 215 | "Internal": false, 216 | "Containers": { 217 | "4213167049aa7b2cc1b3096333706f2ef0428e78b2847a7c5ddc755f5332505c": { 218 | "Name": "examplevotingapp_result-app_1", 219 | "EndpointID": "cb531eb6deb08346d1dbcfa65ea67d43d4c2f244f002b195fc4dadd2adb0b47d", 220 | "MacAddress": "02:42:ac:13:00:06", 221 | "IPv4Address": "172.19.0.6/16", 222 | "IPv6Address": "" 223 | }, 224 | "7d6dbb98ce408c1837f42fdf743e365cc9b0ee2b7dffd108d97e81b172d43114": { 225 | "Name": "examplevotingapp_db_1", 226 | "EndpointID": "67007a454f320d336c13e30e028cd8e85537400b70a880eabdd1f0ed743b7a6a", 227 | "MacAddress": "02:42:ac:13:00:03", 228 | "IPv4Address": "172.19.0.3/16", 229 | "IPv6Address": "" 230 | }, 231 | "8711d687bda94069ed7d5a7677ca4c7953d384f1ebf83c3bd75ac51b1606ed2f": { 232 | "Name": "examplevotingapp_voting-app_1", 233 | "EndpointID": "d414b06b9368d1719a05d527500a06fc714a4efae187df32c1476385ee03ae67", 234 | "MacAddress": "02:42:ac:13:00:05", 235 | "IPv4Address": "172.19.0.5/16", 236 | "IPv6Address": "" 237 | }, 238 | "b7eda251865d824de90ebe0dfefa3e4aab924d5030ccfb21a55e79f910ff857a": { 239 | "Name": "examplevotingapp_redis_1", 240 | "EndpointID": "9acc267d3e6b41da6fe3db040cff964c91037df215a0f2be2155b94be3bb87d0", 241 | "MacAddress": "02:42:ac:13:00:02", 242 | "IPv4Address": "172.19.0.2/16", 243 | "IPv6Address": "" 244 | }, 245 | "c9c4e7fe7b6c1508f9d9d3a05e8a4e66aa1265f2a5c3d33f363343cd37184e6f": { 246 | "Name": "examplevotingapp_worker_1", 247 | "EndpointID": "557e978eaef18a64f24d400727d396431d74cd7e8735f060396e3226f31ab97b", 248 | "MacAddress": "02:42:ac:13:00:04", 249 | "IPv4Address": "172.19.0.4/16", 250 | "IPv6Address": "" 251 | } 252 | }, 253 | "Options": {}, 254 | "Labels": {} 255 | } 256 | ] 257 | 258 | Container information summary: 259 | 260 | 261 | ============================== ============================ 262 | Container Name IP Address 263 | ============================== ============================ 264 | examplevotingapp_result-app_1 172.19.0.6/16, 172.18.0.3/16 265 | examplevotingapp_voting-app_1 172.19.0.3/16, 172.18.0.2/16 266 | examplevotingapp_redis_1 172.19.0.2/16 267 | examplevotingapp_worker_1 172.19.0.4/16 268 | examplevotingapp_db_1 172.19.0.3/16 269 | ============================== ============================ 270 | 271 | Docker network information summary: 272 | 273 | ============================== ============= ============= ========================================================================================================================================= 274 | Docker Network Name Gateway Subnet Containers 275 | ============================== ============= ============= ========================================================================================================================================= 276 | examplevotingapp_front-tier 172.18.0.1/16 172.18.0.0/16 examplevotingapp_result-app_1, examplevotingapp_voting-app_1 277 | examplevotingapp_back-tier 172.19.0.1/16 172.19.0.0/16 examplevotingapp_result-app_1, examplevotingapp_voting-app_1, examplevotingapp_db_1, examplevotingapp_redis_1, examplevotingapp_worker_1 278 | ============================== ============= ============= ========================================================================================================================================= 279 | 280 | Network Topology 281 | ----------------- 282 | 283 | .. image:: _image/docker-compose.png 284 | 285 | For bridge network connection details, please reference lab :doc:`bridged-network` 286 | 287 | Reference 288 | --------- 289 | 290 | .. [#f1] https://docs.docker.com/compose/overview/ 291 | .. [#f2] https://docs.docker.com/compose/install/ 292 | -------------------------------------------------------------------------------- /docs/source/docker/docker-contiv.rst: -------------------------------------------------------------------------------- 1 | Multi-host networking with Contiv 2 | ================================== 3 | 4 | http://contiv.github.io/documents/tutorials/container-101.html 5 | -------------------------------------------------------------------------------- /docs/source/docker/docker-engine.rst: -------------------------------------------------------------------------------- 1 | Docker Engine Basic 2 | =================== 3 | 4 | When people say “Docker” they typically mean Docker Engine, the client-server application 5 | made up of the Docker daemon, a REST API that specifies interfaces for interacting with the daemon, 6 | and a command line interface (CLI) client that talks to the daemon (through the REST API wrapper). 7 | Docker Engine accepts docker commands from the CLI, such as docker run , docker ps to list running containers, 8 | docker images to list images, and so on [#f3]_. 9 | 10 | By default, the docker engine and command line interface will be installed together in the same host. 11 | 12 | .. note:: 13 | 14 | Because docker's quick development, and docker's compatibility issue [#f4]_, we recommand you chose the verion > 1.10.0. And all the labs in this handbook, I use 15 | version 1.11.x and 1.12.x 16 | 17 | Install Docker Engine on Linux 18 | ------------------------------ 19 | 20 | Host information: 21 | 22 | .. code-block:: bash 23 | 24 | $ cat /etc/redhat-release 25 | CentOS Linux release 7.2.1511 (Core) 26 | $ uname -a 27 | Linux ip-172-31-43-155 3.10.0-327.28.2.el7.x86_64 #1 SMP Wed Aug 3 11:11:39 UTC 2016 x86_64 x86_64 x86_64 GNU/Linux 28 | 29 | Install with scripts [#f1]_: 30 | 31 | 1. Log into your machine as a user with sudo or root privileges. 32 | Make sure your existing packages are up-to-date. 33 | 34 | .. code-block:: bash 35 | 36 | $ sudo yum update 37 | 38 | 39 | 2. Run the Docker installation script. 40 | 41 | .. code-block:: bash 42 | 43 | $ curl -fsSL https://get.docker.com/ | sh 44 | 45 | This script adds the docker.repo repository and installs Docker. 46 | 47 | 3. Enable the service. 48 | 49 | .. code-block:: bash 50 | 51 | $ sudo systemctl enable docker.service 52 | 53 | 4. Start the Docker daemon. 54 | 55 | .. code-block:: bash 56 | 57 | $ sudo systemctl start docker 58 | 59 | 5. Verify docker is installed correctly by running a test image in a container. 60 | 61 | .. code-block:: bash 62 | 63 | $ sudo docker run --rm hello-world 64 | 65 | 66 | Install Docker Engine on Mac 67 | ---------------------------- 68 | 69 | For the requirements and how to install ``Docker Toolbox`` on Mac, please go the reference link [#f5]_. 70 | 71 | Install Docker Engine on Windows 72 | -------------------------------- 73 | 74 | For the requirements and how to install ``Docker Toolbox`` on Windows, please go to the reference link [#f6]_. 75 | 76 | Docker Version 77 | -------------- 78 | 79 | .. code-block:: bash 80 | 81 | $ sudo docker version 82 | Client: 83 | Version: 1.11.2 84 | API version: 1.23 85 | Go version: go1.5.4 86 | Git commit: b9f10c9 87 | Built: Wed Jun 1 21:23:11 2016 88 | OS/Arch: linux/amd64 89 | 90 | Server: 91 | Version: 1.11.2 92 | API version: 1.23 93 | Go version: go1.5.4 94 | Git commit: b9f10c9 95 | Built: Wed Jun 1 21:23:11 2016 96 | OS/Arch: linux/amd64 97 | 98 | Because there may have backwards incompatibilities if the versions of the client and server are different. We recommand that you should use the same version 99 | for client and server. 100 | 101 | Docker without sudo 102 | -------------------- 103 | 104 | Because the docker daemon always runs as the root user, so it needs sudo or root to run some docker commands, like: 105 | docker command need sudo 106 | 107 | .. code-block:: bash 108 | 109 | $ docker images 110 | Cannot connect to the Docker daemon. Is the docker daemon running on this host? 111 | $ sudo docker images 112 | REPOSITORY TAG IMAGE ID CREATED SIZE 113 | hello-world latest c54a2cc56cbb 4 months ago 1.848 kB 114 | 115 | But you can add your current user to docker group [#f2]_. 116 | 117 | .. code-block:: bash 118 | 119 | $ sudo groupadd docker 120 | groupadd: group 'docker' already exists 121 | $ sudo gpasswd -a ${USER} docker 122 | Adding user centos to group docker 123 | $ sudo service docker restart 124 | Redirecting to /bin/systemctl restart docker.service 125 | 126 | Then logout current user, and login again. You can use docker command from your current user without sudo now. 127 | 128 | .. code-block:: bash 129 | 130 | $ docker images 131 | REPOSITORY TAG IMAGE ID CREATED SIZE 132 | hello-world latest c54a2cc56cbb 4 months ago 1.848 kB 133 | 134 | 135 | 136 | Reference 137 | ---------- 138 | 139 | .. [#f3] https://docs.docker.com/machine/overview/ 140 | .. [#f1] https://docs.docker.com/engine/installation/linux/centos/ 141 | .. [#f2] http://askubuntu.com/questions/477551/how-can-i-use-docker-without-sudo 142 | .. [#f4] https://success.docker.com/Policies/Compatibility_Matrix 143 | .. [#f5] https://docs.docker.com/engine/installation/mac/ 144 | .. [#f6] https://docs.docker.com/engine/installation/windows/ 145 | -------------------------------------------------------------------------------- /docs/source/docker/docker-flannel.rst: -------------------------------------------------------------------------------- 1 | Multi-Host Networking Overlay with Flannel 2 | ========================================== 3 | 4 | In the Lab :doc:`docker-etcd`, we use ``etcd`` as management plane and docker build-in overlay network as data plane to show 5 | how containers in different host connect with each other. 6 | 7 | This time we will use ``flannel`` to do almost the same thing. 8 | 9 | ``Flannel`` is created by CoreOS and it is a network fabric for containers, designed for Kubernetes. 10 | 11 | Theory of Operation [#f1]_ 12 | --------------------------- 13 | 14 | flannel runs an agent, ``flanneld``, on each host and is responsible for allocating a subnet lease out of a preconfigured address space. 15 | flannel uses ``etcd`` to store the network configuration, allocated subnets, and auxiliary data (such as host's IP). 16 | The forwarding of packets is achieved using one of several strategies that are known as backends. 17 | The simplest backend is udp and uses a TUN device to encapsulate every IP fragment in a UDP packet, forming an overlay network. 18 | The following diagram demonstrates the path a packet takes as it traverses the overlay network: 19 | 20 | .. image:: _image/docker-flannel.png 21 | 22 | Lab Environment 23 | --------------- 24 | 25 | Follow :doc:`../lab-environment` and setup two nodes of docker host. 26 | 27 | 28 | ============ ============== ============== 29 | Hostname IP Docker version 30 | ============ ============== ============== 31 | docker-node1 192.168.205.10 1.12.1 32 | docker-node2 192.168.205.11 1.12.1 33 | ============ ============== ============== 34 | 35 | Etcd Cluster Setup 36 | ------------------- 37 | 38 | Just follow :doc:`docker-etcd` to setup two nodes etcd cluster. 39 | 40 | When setup is ready, you should see the etcd cluster status as: 41 | 42 | .. code-block:: bash 43 | 44 | ubuntu@docker-node2:~/etcd-v3.0.12-linux-amd64$ ./etcdctl cluster-health 45 | member 21eca106efe4caee is healthy: got healthy result from http://192.168.205.10:2379 46 | member 8614974c83d1cc6d is healthy: got healthy result from http://192.168.205.11:2379 47 | cluster is healthy 48 | 49 | 50 | Install & Configure & Run flannel 51 | --------------------------------- 52 | 53 | Download flannel both on node1 and node2 54 | 55 | .. code-block:: bash 56 | 57 | $ wget https://github.com/coreos/flannel/releases/download/v0.6.2/flanneld-amd64 -O flanneld && chmod 755 flanneld 58 | 59 | flannel will read the configuration from etcd ``/coreos.com/network/config`` by default. We will use ``etcdctl`` to set our 60 | configuration to etcd cluster, the configuration is JSON format like that: 61 | 62 | .. code-block:: json 63 | 64 | ubuntu@docker-node1:~$ cat > flannel-network-config.json 65 | { 66 | "Network": "10.0.0.0/8", 67 | "SubnetLen": 20, 68 | "SubnetMin": "10.10.0.0", 69 | "SubnetMax": "10.99.0.0", 70 | "Backend": { 71 | "Type": "vxlan", 72 | "VNI": 100, 73 | "Port": 8472 74 | } 75 | } 76 | EOF 77 | 78 | For the configuration keys meaning, please go to https://github.com/coreos/flannel for more information. Set the configuration 79 | on host1: 80 | 81 | .. code-block:: bash 82 | 83 | ubuntu@docker-node1:~$ cd etcd-v3.0.12-linux-amd64/ 84 | ubuntu@docker-node1:~/etcd-v3.0.12-linux-amd64$ ./etcdctl set /coreos.com/network/config < ../flannel-network-config.json 85 | { 86 | "Network": "10.0.0.0/8", 87 | "SubnetLen": 20, 88 | "SubnetMin": "10.10.0.0", 89 | "SubnetMax": "10.99.0.0", 90 | "Backend": { 91 | "Type": "vxlan", 92 | "VNI": 100, 93 | "Port": 8472 94 | } 95 | } 96 | 97 | Check the configuration on host2: 98 | 99 | .. code-block:: bash 100 | 101 | ubuntu@docker-node2:~/etcd-v3.0.12-linux-amd64$ ./etcdctl get /coreos.com/network/config | jq . 102 | { 103 | "Network": "10.0.0.0/8", 104 | "SubnetLen": 20, 105 | "SubnetMin": "10.10.0.0", 106 | "SubnetMax": "10.99.0.0", 107 | "Backend": { 108 | "Type": "vxlan", 109 | "VNI": 100, 110 | "Port": 8472 111 | } 112 | } 113 | 114 | Start flannel on host1: 115 | 116 | .. code-block:: bash 117 | 118 | ubuntu@docker-node1:~$ cd 119 | ubuntu@docker-node1:~$ nohup sudo ./flanneld -iface=192.168.205.10 & 120 | 121 | After that a new interface ``flannel.100`` will be list on the host: 122 | 123 | .. code-block:: bash 124 | 125 | flannel.100 Link encap:Ethernet HWaddr 82:53:2e:6a:a9:43 126 | inet addr:10.15.64.0 Bcast:0.0.0.0 Mask:255.0.0.0 127 | inet6 addr: fe80::8053:2eff:fe6a:a943/64 Scope:Link 128 | UP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1 129 | RX packets:0 errors:0 dropped:0 overruns:0 frame:0 130 | TX packets:0 errors:0 dropped:8 overruns:0 carrier:0 131 | collisions:0 txqueuelen:0 132 | RX bytes:0 (0.0 B) TX bytes:0 (0.0 B) 133 | 134 | Before we start flannel on host2, we can check etcd configuration on host2: 135 | 136 | .. code-block:: bash 137 | 138 | ubuntu@docker-node2:~/etcd-v3.0.12-linux-amd64$ ./etcdctl ls /coreos.com/network/subnets 139 | /coreos.com/network/subnets/10.15.64.0-20 140 | ubuntu@docker-node2:~/etcd-v3.0.12-linux-amd64$ ./etcdctl get /coreos.com/network/subnets/10.15.64.0-20 | jq . 141 | { 142 | "PublicIP": "192.168.205.10", 143 | "BackendType": "vxlan", 144 | "BackendData": { 145 | "VtepMAC": "82:53:2e:6a:a9:43" 146 | } 147 | } 148 | 149 | This is the flannel backend information on host1. 150 | 151 | Start flannel on host2 152 | 153 | .. code-block:: bash 154 | 155 | ubuntu@docker-node2:~$ nohup sudo ./flanneld -iface=192.168.205.11 & 156 | 157 | Check the etcd configuration 158 | 159 | .. code-block:: bash 160 | 161 | ubuntu@docker-node2:~/etcd-v3.0.12-linux-amd64$ ./etcdctl ls /coreos.com/network/subnets/ 162 | /coreos.com/network/subnets/10.15.64.0-20 163 | /coreos.com/network/subnets/10.13.48.0-20 164 | ubuntu@docker-node2:~/etcd-v3.0.12-linux-amd64$ ./etcdctl get /coreos.com/network/subnets/10.13.48.0-20 165 | {"PublicIP":"192.168.205.11","BackendType":"vxlan","BackendData":{"VtepMAC":"9e:e7:65:f3:9d:31"}} 166 | 167 | This also has a new interface created by flannel ``flannel.100`` 168 | 169 | Restart docker daemon with flannel network 170 | ------------------------------------------ 171 | 172 | Restart docker daemon with Flannel network configuration, execute commands as follows on node1 and node2: 173 | 174 | .. code-block:: bash 175 | 176 | ubuntu@docker-node1:~$ sudo service docker stop 177 | ubuntu@docker-node1:~$ sudo docker ps 178 | Cannot connect to the Docker daemon. Is the docker daemon running on this host? 179 | ubuntu@docker-node1:~$ source /run/flannel/subnet.env 180 | ubuntu@docker-node1:~$ sudo ifconfig docker0 ${FLANNEL_SUBNET} 181 | ubuntu@docker-node1:~$ sudo docker daemon --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU} & 182 | 183 | After restarting, the docker daemon will bind docker0 which has a new address. We can check the new configuration with ``sudo docker network inspect bridge``. 184 | 185 | Adjust iptables 186 | --------------- 187 | 188 | Starting from Docker 1.13 default iptables policy for FORWARDING is DROP, so to make sure that containers will receive traffic from another hosts we need to adjust it: 189 | 190 | On host1: 191 | 192 | .. code-block:: bash 193 | 194 | ubuntu@docker-node1:~$ sudo iptables -P FORWARD ACCEPT 195 | 196 | On host2: 197 | 198 | .. code-block:: bash 199 | 200 | ubuntu@docker-node2:~$ sudo iptables -P FORWARD ACCEPT 201 | 202 | Start Containers 203 | ---------------- 204 | 205 | On host1: 206 | 207 | .. code-block:: bash 208 | 209 | ubuntu@docker-node1:~$ sudo docker run -d --name test1 busybox sh -c "while true; do sleep 3600; done" 210 | ubuntu@docker-node1:~$ sudo docker exec test1 ifconfig 211 | eth0 Link encap:Ethernet HWaddr 02:42:0A:0F:40:02 212 | inet addr:10.15.64.2 Bcast:0.0.0.0 Mask:255.255.240.0 213 | inet6 addr: fe80::42:aff:fe0f:4002/64 Scope:Link 214 | UP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1 215 | RX packets:16 errors:0 dropped:0 overruns:0 frame:0 216 | TX packets:8 errors:0 dropped:0 overruns:0 carrier:0 217 | collisions:0 txqueuelen:0 218 | RX bytes:1296 (1.2 KiB) TX bytes:648 (648.0 B) 219 | 220 | lo Link encap:Local Loopback 221 | inet addr:127.0.0.1 Mask:255.0.0.0 222 | inet6 addr: ::1/128 Scope:Host 223 | UP LOOPBACK RUNNING MTU:65536 Metric:1 224 | RX packets:0 errors:0 dropped:0 overruns:0 frame:0 225 | TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 226 | collisions:0 txqueuelen:1 227 | RX bytes:0 (0.0 B) TX bytes:0 (0.0 B) 228 | 229 | Oh host2: 230 | 231 | .. code-block:: bash 232 | 233 | ubuntu@docker-node2:~$ sudo docker run -d --name test2 busybox sh -c "while true; do sleep 3600; done" 234 | ubuntu@docker-node2:~$ sudo docker exec test2 ifconfig 235 | eth0 Link encap:Ethernet HWaddr 02:42:0A:0D:30:02 236 | inet addr:10.13.48.2 Bcast:0.0.0.0 Mask:255.255.240.0 237 | inet6 addr: fe80::42:aff:fe0d:3002/64 Scope:Link 238 | UP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1 239 | RX packets:8 errors:0 dropped:0 overruns:0 frame:0 240 | TX packets:8 errors:0 dropped:0 overruns:0 carrier:0 241 | collisions:0 txqueuelen:0 242 | RX bytes:648 (648.0 B) TX bytes:648 (648.0 B) 243 | 244 | lo Link encap:Local Loopback 245 | inet addr:127.0.0.1 Mask:255.0.0.0 246 | inet6 addr: ::1/128 Scope:Host 247 | UP LOOPBACK RUNNING MTU:65536 Metric:1 248 | RX packets:0 errors:0 dropped:0 overruns:0 frame:0 249 | TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 250 | collisions:0 txqueuelen:1 251 | RX bytes:0 (0.0 B) TX bytes:0 (0.0 B) 252 | 253 | Container test1 on host1 ping container test2 on host2 254 | 255 | .. code-block:: bash 256 | 257 | ubuntu@docker-node1:~$ sudo docker exec test1 ping google.com 258 | PING google.com (74.125.68.102): 56 data bytes 259 | 64 bytes from 74.125.68.102: seq=0 ttl=61 time=123.295 ms 260 | 64 bytes from 74.125.68.102: seq=1 ttl=61 time=127.646 ms 261 | ubuntu@docker-node1:~$ sudo docker exec test1 ping 10.13.48.2 262 | PING 10.13.48.2 (10.13.48.2): 56 data bytes 263 | 64 bytes from 10.13.48.2: seq=0 ttl=62 time=1.347 ms 264 | 64 bytes from 10.13.48.2: seq=1 ttl=62 time=0.430 ms 265 | 266 | Through ``sudo tcpdump -i enp0s8 -n not port 2380`` we can confirm the vxlan tunnel. 267 | 268 | .. code-block:: bash 269 | 270 | 05:54:43.824182 IP 192.168.205.10.36214 > 192.168.205.11.8472: OTV, flags [I] (0x08), overlay 0, instance 100 271 | IP 10.15.64.0 > 10.13.48.2: ICMP echo request, id 9728, seq 462, length 64 272 | 05:54:43.880055 IP 192.168.205.10.36214 > 192.168.205.11.8472: OTV, flags [I] (0x08), overlay 0, instance 100 273 | IP 10.15.64.0 > 10.13.48.2: ICMP echo request, id 11264, seq 245, length 64 274 | 05:54:44.179703 IP 192.168.205.10.36214 > 192.168.205.11.8472: OTV, flags [I] (0x08), overlay 0, instance 100 275 | IP 10.15.64.0 > 10.13.48.2: ICMP echo request, id 12288, seq 206, length 64 276 | 277 | Performance test [#f2]_ 278 | 279 | Reference 280 | --------- 281 | 282 | .. [#f1] https://github.com/coreos/flannel 283 | .. [#f2] http://chunqi.li/2015/10/10/Flannel-for-Docker-Overlay-Network/ 284 | -------------------------------------------------------------------------------- /docs/source/docker/docker-machine-aws.rst: -------------------------------------------------------------------------------- 1 | Docker Machine with Amazon AWS 2 | ============================== 3 | 4 | Sign up for AWS and configure credentials [#f1]_ 5 | ------------------------------------------------ 6 | 7 | Get AWS Access Key ID and Secret Access Key from ``IAM``. Please reference AWS documentation. Then chose a Region and Available Zone, 8 | in this lab, we chose ``region=us-west-1`` which means North California, and Avaiable zone is ``a``, please create a subnet in this zone [#f2]_. 9 | 10 | Create a docker machine 11 | ----------------------- 12 | 13 | .. code-block:: bash 14 | 15 | ➜ ~ docker-machine create --driver amazonec2 --amazonec2-region us-west-1 \ 16 | --amazonec2-zone a --amazonec2-vpc-id vpc-32c73756 \ 17 | --amazonec2-subnet-id subnet-16c84872 \ 18 | --amazonec2-ami ami-1b17257b \ 19 | --amazonec2-access-key $AWS_ACCESS_KEY_ID \ 20 | --amazonec2-secret-key $AWS_SECRET_ACCESS_KEY \ 21 | aws-swarm-manager 22 | Running pre-create checks... 23 | Creating machine... 24 | (aws-swarm-manager) Launching instance... 25 | Waiting for machine to be running, this may take a few minutes... 26 | Detecting operating system of created instance... 27 | Waiting for SSH to be available... 28 | Detecting the provisioner... 29 | Provisioning with ubuntu(upstart)... 30 | Installing Docker... 31 | Copying certs to the local machine directory... 32 | Copying certs to the remote machine... 33 | Setting Docker configuration on the remote daemon... 34 | Checking connection to Docker... 35 | Docker is up and running! 36 | To see how to connect your Docker Client to the Docker Engine running on this virtual machine, run: docker-machine env aws-swarm-manager 37 | ➜ ~ docker-machine ls 38 | NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS 39 | aws-swarm-manager - amazonec2 Running tcp://54.183.145.111:2376 v17.10.0-ce 40 | ➜ ~ 41 | 42 | Please pay attention to ``amazonec2-ami``, please chose a ``Ubuntu 16:04``. 43 | 44 | After created, We can use ``docker-machine ssh`` to access the host. 45 | 46 | .. code-block:: bash 47 | 48 | ➜ ~ docker-machine ssh aws-swarm-manager 49 | 50 | Welcome to Ubuntu 16.04.3 LTS (GNU/Linux 4.4.0-1038-aws x86_64) 51 | 52 | * Documentation: https://help.ubuntu.com 53 | * Management: https://landscape.canonical.com 54 | * Support: https://ubuntu.com/advantage 55 | 56 | Get cloud support with Ubuntu Advantage Cloud Guest: 57 | http://www.ubuntu.com/business/services/cloud 58 | 59 | 4 packages can be updated. 60 | 1 update is a security update. 61 | 62 | ubuntu@aws-swarm-manager:~$ sudo docker version 63 | Client: 64 | Version: 17.10.0-ce 65 | API version: 1.33 66 | Go version: go1.8.3 67 | Git commit: f4ffd25 68 | Built: Tue Oct 17 19:04:16 2017 69 | OS/Arch: linux/amd64 70 | 71 | Server: 72 | Version: 17.10.0-ce 73 | API version: 1.33 (minimum version 1.12) 74 | Go version: go1.8.3 75 | Git commit: f4ffd25 76 | Built: Tue Oct 17 19:02:56 2017 77 | OS/Arch: linux/amd64 78 | Experimental: false 79 | ubuntu@aws-swarm-manager:~$ 80 | 81 | You can also use ``docker-machine ip`` to get the ip address of the docker host. 82 | 83 | docker local client connect with remote aws docker host 84 | -------------------------------------------------------- 85 | 86 | Set the docker environment in local host. 87 | 88 | .. code-block:: bash 89 | 90 | ➜ ~ docker-machine env aws-swarm-manager 91 | export DOCKER_TLS_VERIFY="1" 92 | export DOCKER_HOST="tcp://xx.xx.xx.xx:2376" 93 | export DOCKER_CERT_PATH="/Users/penxiao/.docker/machine/machines/aws-swarm-manager" 94 | export DOCKER_MACHINE_NAME="aws-swarm-manager" 95 | # Run this command to configure your shell: 96 | # eval $(docker-machine env aws-swarm-manager) 97 | ➜ ~ eval $(docker-machine env aws-swarm-manager) 98 | ➜ ~ docker version 99 | Client: 100 | Version: 1.12.3 101 | API version: 1.24 102 | Go version: go1.6.3 103 | Git commit: 6b644ec 104 | Built: Thu Oct 27 00:09:21 2016 105 | OS/Arch: darwin/amd64 106 | Experimental: true 107 | 108 | Server: 109 | Version: 17.10.0-ce 110 | API version: 1.33 111 | Go version: go1.8.3 112 | Git commit: f4ffd25 113 | Built: Tue Oct 17 19:02:56 2017 114 | OS/Arch: linux/amd64 115 | ➜ ~ 116 | 117 | 118 | Reference 119 | --------- 120 | 121 | 122 | .. [#f1] https://docs.docker.com/machine/examples/aws/#/step-1-sign-up-for-aws-and-configure-credentials 123 | .. [#f2] http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/get-set-up-for-amazon-ec2.html 124 | -------------------------------------------------------------------------------- /docs/source/docker/docker-machine.rst: -------------------------------------------------------------------------------- 1 | Docker Machine on LocalHost 2 | =========================== 3 | 4 | On macOS and Windows, docker machine is installed along with other Docker products when you install the Docker Toolbox. For example if you 5 | are using Mac: 6 | 7 | .. code-block:: bash 8 | 9 | $ docker-machine -v 10 | docker-machine version 0.9.0, build 15fd4c7 11 | 12 | 13 | If you are using other OS and want to install docker machine, please go to https://docs.docker.com/machine/install-machine/ for more details. 14 | 15 | For what is docker machine and what docker machine can do, please go to https://docs.docker.com/machine/overview/ 16 | 17 | Create a machine 18 | ----------------- 19 | 20 | Docker Machine is a tool for provisioning and managing your Dockerized hosts (hosts with Docker Engine on them). 21 | Typically, you install Docker Machine on your local system. Docker Machine has its own command line client docker-machine and 22 | the Docker Engine client, docker. You can use Machine to install Docker Engine on one or more virtual systems. 23 | These virtual systems can be local (as when you use Machine to install and run Docker Engine in VirtualBox on Mac or Windows) 24 | or remote (as when you use Machine to provision Dockerized hosts on cloud providers). The Dockerized hosts themselves can be 25 | thought of, and are sometimes referred to as, managed “machines” [#f1]_. 26 | 27 | 28 | For this lab, we will use docker machine on Mac system, and create a docker host with virtualbox driver. 29 | 30 | Before we start, we can use ``ls`` command to check if there is any machine already in our host. 31 | 32 | .. code-block:: bash 33 | 34 | $ docker-machine ls 35 | NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS 36 | 37 | Then create a machine called ``default``. 38 | 39 | .. code-block:: bash 40 | 41 | $ docker-machine create -d virtualbox default 42 | Running pre-create checks... 43 | Creating machine... 44 | (default) Copying /Users/penxiao/.docker/machine/cache/boot2docker.iso to /Users/penxiao/.docker/machine/machines/default/boot2docker.iso... 45 | (default) Creating VirtualBox VM... 46 | (default) Creating SSH key... 47 | (default) Starting the VM... 48 | (default) Check network to re-create if needed... 49 | (default) Waiting for an IP... 50 | Waiting for machine to be running, this may take a few minutes... 51 | Detecting operating system of created instance... 52 | Waiting for SSH to be available... 53 | Detecting the provisioner... 54 | Provisioning with boot2docker... 55 | Copying certs to the local machine directory... 56 | Copying certs to the remote machine... 57 | Setting Docker configuration on the remote daemon... 58 | Checking connection to Docker... 59 | Docker is up and running! 60 | To see how to connect your Docker Client to the Docker Engine running on this virtual machine, run: docker-machine env default 61 | $ docker-machine ls 62 | NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS 63 | default - virtualbox Running tcp://192.168.99.100:2376 v1.12.3 64 | 65 | How to use the docker host 66 | -------------------------- 67 | 68 | There are two ways to access the docker host 69 | 70 | - ssh into the docker host directly, then paly with docker inside 71 | - use docker client on localhost (outside the docker host) to access the docker engine inside the docker host. 72 | 73 | 1. SSH into the docker host 74 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 75 | 76 | .. code-block:: bash 77 | 78 | $ docker-machine ssh default 79 | ## . 80 | ## ## ## == 81 | ## ## ## ## ## === 82 | /"""""""""""""""""\___/ === 83 | ~~~ {~~ ~~~~ ~~~ ~~~~ ~~~ ~ / ===- ~~~ 84 | \______ o __/ 85 | \ \ __/ 86 | \____\_______/ 87 | _ _ ____ _ _ 88 | | |__ ___ ___ | |_|___ \ __| | ___ ___| | _____ _ __ 89 | | '_ \ / _ \ / _ \| __| __) / _` |/ _ \ / __| |/ / _ \ '__| 90 | | |_) | (_) | (_) | |_ / __/ (_| | (_) | (__| < __/ | 91 | |_.__/ \___/ \___/ \__|_____\__,_|\___/ \___|_|\_\___|_| 92 | Boot2Docker version 1.12.3, build HEAD : 7fc7575 - Thu Oct 27 17:23:17 UTC 2016 93 | Docker version 1.12.3, build 6b644ec 94 | docker@default:~$ docker ps 95 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 96 | docker@default:~$ 97 | docker@default:~$ docker run --rm hello-world 98 | Unable to find image 'hello-world:latest' locally 99 | latest: Pulling from library/hello-world 100 | c04b14da8d14: Pull complete 101 | Digest: sha256:0256e8a36e2070f7bf2d0b0763dbabdd67798512411de4cdcf9431a1feb60fd9 102 | Status: Downloaded newer image for hello-world:latest 103 | 104 | 2. docker client connect with remote docker engine 105 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 106 | 107 | Get the environment commands for your new VM. 108 | 109 | .. code-block:: bash 110 | 111 | $ docker-machine env default 112 | export DOCKER_TLS_VERIFY="1" 113 | export DOCKER_HOST="tcp://192.168.99.100:2376" 114 | export DOCKER_CERT_PATH="/Users/penxiao/.docker/machine/machines/default" 115 | export DOCKER_MACHINE_NAME="default" 116 | # Run this command to configure your 117 | 118 | Connect your docker client CLI to the new machine. 119 | 120 | Before and after we run ``eval "$(docker-machine env default)"`` on localhost: 121 | 122 | .. code-block:: bash 123 | 124 | $ docker images 125 | REPOSITORY TAG IMAGE ID CREATED SIZE 126 | ubuntu 14.04 aae2b63c4946 5 days ago 188 MB 127 | mongo 2.6 1999482cb0a5 6 weeks ago 391 MB 128 | python 2.7 6b494b5f019c 3 months ago 676.1 MB 129 | tutum/nginx latest a2e9b71ed366 8 months ago 206.1 MB 130 | $ eval "$(docker-machine env default)" 131 | $ docker images 132 | REPOSITORY TAG IMAGE ID CREATED SIZE 133 | hello-world latest c54a2cc56cbb 5 months ago 1.848 kB 134 | 135 | This sets environment variables for the current shell that the Docker client will read which specify 136 | the TLS settings. You need to do this each time you open a new shell or restart your machine. 137 | You can now run Docker commands on this host. 138 | 139 | 140 | Reference 141 | ---------- 142 | 143 | .. [#f1] https://docs.docker.com/machine/overview/ 144 | -------------------------------------------------------------------------------- /docs/source/docker/docker-network.rst: -------------------------------------------------------------------------------- 1 | Docker Network Overview 2 | ======================= 3 | 4 | .. image:: _image/docker-turtles-communication.jpg 5 | 6 | Image reference from [#f1]_ 7 | 8 | When you install Docker, it creates three networks automatically. You can list these networks using the docker network ls command: 9 | 10 | .. code-block:: bash 11 | 12 | $ docker network ls 13 | NETWORK ID NAME DRIVER 14 | 32b93b141bae bridge bridge 15 | c363d9a92877 host host 16 | 88077db743a8 none null 17 | 18 | 19 | .. rubric:: Reference 20 | 21 | 22 | .. [#f1] https://blog.docker.com/2015/04/docker-networking-takes-a-step-in-the-right-direction-2/ 23 | -------------------------------------------------------------------------------- /docs/source/docker/docker-ovs.rst: -------------------------------------------------------------------------------- 1 | Multi-Host Overlay Networking with Open vSwitch 2 | =============================================== 3 | 4 | .. note:: 5 | 6 | Using OVS is not a good choice, because there are many problems need to resolve, like IP management, external routing. 7 | So we do not recommand this solution. 8 | 9 | This lab will show multi-host network, let's see how containers in different hosts can communicate with each other. 10 | 11 | There are at least two ways connect containers with open vSwitch. 12 | 13 | - connect default docker0 with ovs bridge 14 | - connect container with ovs bridge directly through veth pair. 15 | 16 | We will chose the first way, becuase it's easier. For the second way, if don't use the default docker0 bridge, we will need to do 17 | more work toconnect containers with ovs, such as create network namespace and veth pair manully, attach veth to container, resolve 18 | ip address management, NAT, etc. 19 | 20 | Topology 21 | -------- 22 | 23 | .. image:: _image/ovs-gre-docker.png 24 | 25 | containers connect with docker0 bridge 26 | -------------------------------------- 27 | 28 | Start a container on host 2 29 | 30 | .. code-block:: bash 31 | 32 | ubuntu@docker-node2:~$ docker run -d --name container1 centos:7 /bin/bash -c "while true; do sleep 3600; done" 33 | 98ddd33b16ed5206615aa6bd8e930b359a877794dffe921ee20f0c4b000a440a 34 | ubuntu@docker-node2:~$ 35 | ubuntu@docker-node2:~$ docker inspect --format '{{.NetworkSettings.IPAddress}}' container1 36 | 172.17.0.2 37 | 38 | Start two containers on host 1 39 | 40 | .. code-block:: bash 41 | 42 | ubuntu@docker-node1:~$ docker run -d --name container1 centos:7 /bin/bash -c "while true; do sleep 3600; done" 43 | 31109d970148d710c3465af86ec3fb14229c1660640ae56c5b18435286168824 44 | ubuntu@docker-node1:~$ docker run -d --name container2 centos:7 /bin/bash -c "while true; do sleep 3600; done" 45 | fdf1cebdd9a5264e18337ea3569a081c59e5e27e2219184557e44921faa63822 46 | ubuntu@docker-node1:~$ 47 | ubuntu@docker-node1:~$ docker inspect --format '{{.NetworkSettings.IPAddress}}' container1 48 | 172.17.0.2 49 | ubuntu@docker-node1:~$ docker inspect --format '{{.NetworkSettings.IPAddress}}' container2 50 | 172.17.0.3 51 | ubuntu@docker-node1:~$ 52 | 53 | Stop container 1 on host 1, because it has them same IP address as container 1 on host 2 54 | 55 | .. code-block:: bash 56 | 57 | ubuntu@docker-node1:~$ docker stop container1 58 | container1 59 | 60 | container 2 on host 1 can not access container 1 on host 2 61 | 62 | .. code-block:: bash 63 | 64 | ubuntu@docker-node1:~$ docker exec -it container2 bash 65 | [root@fdf1cebdd9a5 /]# ping 172.17.0.2 66 | PING 172.17.0.2 (172.17.0.2) 56(84) bytes of data. 67 | ^C 68 | --- 172.17.0.2 ping statistics --- 69 | 18 packets transmitted, 0 received, 100% packet loss, time 17033ms 70 | 71 | [root@fdf1cebdd9a5 /]# 72 | 73 | 74 | Configure OVS 75 | -------------- 76 | 77 | Install OVS: 78 | 79 | .. code-block:: bash 80 | 81 | $ sudo apt-get install -y openvswitch-switch openvswitch-common 82 | 83 | Host 1 84 | ~~~~~~~ 85 | 86 | Create a ovs bridge and a veth pair 87 | 88 | .. code-block:: bash 89 | 90 | ubuntu@docker-node1:~$ sudo ovs-vsctl add-br br-int 91 | ubuntu@docker-node1:~$ sudo ovs-vsctl show 92 | 9e5ebe46-02bf-4899-badd-7aa10245afcb 93 | Bridge br-int 94 | Port br-int 95 | Interface br-int 96 | type: internal 97 | ovs_version: "2.5.0" 98 | ubuntu@docker-node1:~$ 99 | ubuntu@docker-node1:~$ sudo ip link add veth0 type veth peer name veth1 100 | 101 | 102 | Connect veth pair with ``dockre0`` and ovs bridge ``br-int``, set them up. 103 | 104 | .. code-block:: bash 105 | 106 | ubuntu@docker-node1:~$ sudo ovs-vsctl add-port br-int veth1 107 | ubuntu@docker-node1:~$ sudo brctl addif docker0 veth0 108 | ubuntu@docker-node1:~$ sudo ip link set veth1 up 109 | ubuntu@docker-node1:~$ sudo ip link set veth0 up 110 | ubuntu@docker-node1:~$ ip link 111 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1 112 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 113 | 2: enp0s3: mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000 114 | link/ether 02:57:5b:96:48:35 brd ff:ff:ff:ff:ff:ff 115 | 3: enp0s8: mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000 116 | link/ether 08:00:27:c3:54:4f brd ff:ff:ff:ff:ff:ff 117 | 4: docker0: mtu 1500 qdisc noqueue state UP mode DEFAULT group default 118 | link/ether 02:42:23:8f:ab:da brd ff:ff:ff:ff:ff:ff 119 | 9: ovs-system: mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1 120 | link/ether 46:eb:b5:81:eb:31 brd ff:ff:ff:ff:ff:ff 121 | 10: br-int: mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1 122 | link/ether 42:a0:96:7b:e9:42 brd ff:ff:ff:ff:ff:ff 123 | 11: veth1@veth0: mtu 1500 qdisc noqueue master ovs-system state UP mode DEFAULT group default qlen 1000 124 | link/ether 2a:8a:93:9d:b2:b4 brd ff:ff:ff:ff:ff:ff 125 | 12: veth0@veth1: mtu 1500 qdisc noqueue master docker0 state UP mode DEFAULT group default qlen 1000 126 | link/ether ae:16:a0:03:12:4e brd ff:ff:ff:ff:ff:ff 127 | 128 | Host 2 129 | ~~~~~~~ 130 | 131 | Almost do the same thing on host 2. 132 | 133 | .. code-block:: bash 134 | 135 | ubuntu@docker-node2:~$ ovs-vsctl add-br br-int 136 | ubuntu@docker-node2:~$ sudo ip link add veth0 type veth peer name veth1 137 | ubuntu@docker-node2:~$ sudo ovs-vsctl add-port br-int veth1 138 | ubuntu@docker-node2:~$ sudo brctl addif docker0 veth0 139 | ubuntu@docker-node2:~$ sudo ip link set veth1 up 140 | ubuntu@docker-node2:~$ sudo ip link set veth0 up 141 | 142 | GRE tunnel between host 1 and host 2 143 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 144 | 145 | on host 1 146 | 147 | .. code-block:: bash 148 | 149 | ubuntu@docker-node1:~$ sudo ovs-vsctl add-port br-int gre0 -- \ 150 | set interface gre0 type=gre options:remote_ip=192.168.205.11 151 | 152 | on host 2 153 | 154 | .. code-block:: bash 155 | 156 | ubuntu@docker-node2:~$ sudo ovs-vsctl add-port br-int gre0 -- \ 157 | set interface gre0 type=gre options:remote_ip=192.168.205.10 158 | 159 | The connection between ovs bridge and docker0 bridge 160 | 161 | .. code-block:: bash 162 | 163 | ubuntu@docker-node1:~$ sudo ovs-vsctl show 164 | 9e5ebe46-02bf-4899-badd-7aa10245afcb 165 | Bridge br-int 166 | Port "veth1" 167 | Interface "veth1" 168 | Port br-int 169 | Interface br-int 170 | type: internal 171 | Port "gre0" 172 | Interface "gre0" 173 | type: gre 174 | options: {remote_ip="192.168.205.11"} 175 | ovs_version: "2.5.0" 176 | ubuntu@docker-node1:~$ brctl show 177 | bridge name bridge id STP enabled interfaces 178 | docker0 8000.0242238fabda no veth0 179 | vethd5c0abe 180 | ubuntu@docker-node1:~$ 181 | 182 | 183 | Check GRE tunnel connection 184 | ---------------------------- 185 | 186 | in container1 on host 2 ping container 2 on host 1 187 | 188 | .. code-block:: bash 189 | 190 | ubuntu@docker-node2:~$ docker exec -it container1 bash 191 | [root@98ddd33b16ed /]# ping 172.17.0.3 192 | PING 172.17.0.3 (172.17.0.3) 56(84) bytes of data. 193 | 64 bytes from 172.17.0.3: icmp_seq=1 ttl=64 time=1.19 ms 194 | 64 bytes from 172.17.0.3: icmp_seq=2 ttl=64 time=0.624 ms 195 | 64 bytes from 172.17.0.3: icmp_seq=3 ttl=64 time=0.571 ms 196 | ^C 197 | --- 172.17.0.3 ping statistics --- 198 | 3 packets transmitted, 3 received, 0% packet loss, time 2002ms 199 | rtt min/avg/max/mdev = 0.571/0.797/1.198/0.285 ms 200 | [root@98ddd33b16ed /]# 201 | 202 | At the same time, start ``tcpdump`` on host 1 and capture packges on the GRE source interface. 203 | 204 | .. code-block:: bash 205 | 206 | ubuntu@docker-node1:~$ sudo tcpdump -n -i enp0s8 proto gre 207 | tcpdump: verbose output suppressed, use -v or -vv for full protocol decode 208 | listening on enp0s8, link-type EN10MB (Ethernet), capture size 262144 bytes 209 | 14:12:17.966149 IP 192.168.205.11 > 192.168.205.10: GREv0, length 102: IP 172.17.0.2 > 172.17.0.3: ICMP echo request, id 23, seq 1, length 64 210 | 14:12:17.966843 IP 192.168.205.10 > 192.168.205.11: GREv0, length 102: IP 172.17.0.3 > 172.17.0.2: ICMP echo reply, id 23, seq 1, length 64 211 | 14:12:18.967513 IP 192.168.205.11 > 192.168.205.10: GREv0, length 102: IP 172.17.0.2 > 172.17.0.3: ICMP echo request, id 23, seq 2, length 64 212 | 14:12:18.967658 IP 192.168.205.10 > 192.168.205.11: GREv0, length 102: IP 172.17.0.3 > 172.17.0.2: ICMP echo reply, id 23, seq 2, length 64 213 | 14:12:19.968683 IP 192.168.205.11 > 192.168.205.10: GREv0, length 102: IP 172.17.0.2 > 172.17.0.3: ICMP echo request, id 23, seq 3, length 64 214 | 14:12:19.968814 IP 192.168.205.10 > 192.168.205.11: GREv0, length 102: IP 172.17.0.3 > 172.17.0.2: ICMP echo reply, id 23, seq 3, length 64 215 | 14:12:22.982906 ARP, Request who-has 192.168.205.11 tell 192.168.205.10, length 28 216 | 14:12:22.983262 ARP, Reply 192.168.205.11 is-at 08:00:27:b8:22:30 (oui Unknown), length 46 217 | 218 | Improvement 219 | ----------- 220 | 221 | There are some improvements can be done for this lab: 222 | 223 | - Create a new docket network instead of using the default docker0 bridge 224 | - docker bridge on host 1 and host 1 have different network ip range for containers 225 | -------------------------------------------------------------------------------- /docs/source/docker/docker-swarm-lb-scale.rst: -------------------------------------------------------------------------------- 1 | Docker Swarm with Load Balancing and Scaling 2 | ============================================ 3 | 4 | 5 | Create a Swarm Cluster 6 | ---------------------- 7 | 8 | Reference :doc:`docker-swarm` to create a swarm cluster which has four node (one manger node and three worker node). 9 | 10 | .. code-block:: bash 11 | 12 | ➜ ~ docker-machine ls 13 | NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS 14 | local-swarm-manager - virtualbox Running tcp://192.168.99.100:2376 v1.12.5 15 | local-swarm-worker1 - virtualbox Running tcp://192.168.99.101:2376 v1.12.5 16 | local-swarm-worker2 - virtualbox Running tcp://192.168.99.102:2376 v1.12.5 17 | local-swarm-worker3 - virtualbox Running tcp://192.168.99.103:2376 v1.12.5 18 | ➜ ~ docker-machine ssh local-swarm-manager 19 | docker@local-swarm-manager:~$ docker node ls 20 | ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS 21 | 3oseehppjrgkslxug746bfzvg local-swarm-worker2 Ready Active 22 | 4wi3zg11lghywrz3c3lph5929 local-swarm-worker3 Ready Active 23 | 64m0c4gyewt7si74idd2lbi16 local-swarm-worker1 Ready Active 24 | 9r994lgqivf2dr0v02np63co3 * local-swarm-manager Ready Active Leader 25 | docker@local-swarm-manager:~$ 26 | 27 | 28 | Create a Service 29 | ---------------- 30 | 31 | Create a service with cmd ``docker service create``. 32 | 33 | .. code-block:: bash 34 | 35 | docker@local-swarm-manager:~$ docker service create --replicas 1 --name helloworld --publish 80:8000 jwilder/whoami 36 | docker@local-swarm-manager:~$ docker service ls 37 | ID NAME REPLICAS IMAGE COMMAND 38 | 4issxzw4mknz helloworld 1/1 jwilder/whoami 39 | docker@local-swarm-manager:~$ docker service ps helloworld 40 | ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR 41 | 4m3bbm16oqqw0tafznii7cell helloworld.2 jwilder/whoami local-swarm-worker2 Running Running 8 minutes ago 42 | docker@local-swarm-manager:~$ 43 | 44 | 45 | We use docker image ``jwilder/whoami`` [#f1]_ which is a simple HTTP docker service that return it's container ID. 46 | It will export port 8000 by default, we use ``--publish 80:8000`` to publish its http port to 80. 47 | 48 | It will return the container host name when we use curl to access the service like: 49 | 50 | .. code-block:: bash 51 | 52 | docker@local-swarm-manager:~$ curl 127.0.0.1 53 | I\'m 6075d1ad668c 54 | docker@local-swarm-manager:~$ 55 | 56 | Scale a Service 57 | --------------- 58 | 59 | Use command ``docker service scale`` to scale a service. 60 | 61 | .. code-block:: bash 62 | 63 | docker@local-swarm-manager:~$ docker service ps helloworld 64 | ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR 65 | 9azr7sushz03hmequqw24o9kf helloworld.1 jwilder/whoami local-swarm-worker3 Running Preparing about a minute ago 66 | 4m3bbm16oqqw0tafznii7cell helloworld.2 jwilder/whoami local-swarm-worker2 Running Running 10 minutes ago 67 | eoiym8q7gqpwg1o6k0oys9bod helloworld.3 jwilder/whoami local-swarm-worker1 Running Running 59 seconds ago 68 | 2klxh8c8m3m8jctmqclnj8awg helloworld.4 jwilder/whoami local-swarm-manager Running Running 59 seconds ago 69 | dopnnfmpfqgvhwvel42vl2yw5 helloworld.5 jwilder/whoami local-swarm-worker3 Running Preparing about a minute ago 70 | docker@local-swarm-manager:~$ docker service ls 71 | ID NAME REPLICAS IMAGE COMMAND 72 | 4issxzw4mknz helloworld 3/5 jwilder/whoami 73 | 74 | 75 | There are four helloworld replicas, and two of them are preparing because it need download the docker image. 76 | 77 | We can use ``curl`` to test it again. 78 | 79 | .. code-block:: bash 80 | 81 | docker@local-swarm-manager:~$ for i in `seq 4`; do curl 127.0.0.1; done 82 | I\'m 2338a010daa4 83 | I\'m 1bc92fe7766d 84 | I\'m 6075d1ad668c 85 | I\'m 2338a010daa4 86 | docker@local-swarm-manager:~$ 87 | 88 | it's load balancing! 89 | 90 | Visualization Swarm Cluster 91 | --------------------------- 92 | 93 | There is a visualizer for Docker Swarm Mode using the Docker Remote API, Node.JS, and D3 [#f2]_. Start it on the manager node, 94 | then through web browser, we can get the picture like: 95 | 96 | .. image:: _image/docker-swarm-visual.png 97 | 98 | 99 | Reference 100 | --------- 101 | 102 | .. [#f1] https://github.com/jwilder/whoami 103 | .. [#f2] https://github.com/ManoMarks/docker-swarm-visualizer 104 | -------------------------------------------------------------------------------- /docs/source/docker/docker-swarm-service.rst: -------------------------------------------------------------------------------- 1 | Docker Swarm: Create and Scale a Service 2 | ========================================= 3 | 4 | In this lab we will create a new docker swarm cluster: one manger node and three worker nodes, then 5 | create a service and try to scale it. 6 | 7 | 8 | Create a Swarm Cluster 9 | ---------------------- 10 | 11 | Based on the lab :doc:`docker-swarm`, create four docker machines and init a swarm cluster. 12 | 13 | .. code-block:: bash 14 | 15 | ➜ ~ docker-machine ls 16 | NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS 17 | swarm-manager - virtualbox Running tcp://192.168.99.103:2376 v1.12.5 18 | swarm-worker1 - virtualbox Running tcp://192.168.99.104:2376 v1.12.5 19 | swarm-worker2 - virtualbox Running tcp://192.168.99.105:2376 v1.12.5 20 | swarm-worker3 - virtualbox Running tcp://192.168.99.106:2376 v1.12.5 21 | ➜ ~ 22 | 23 | docker@swarm-manager:~$ docker node ls 24 | ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS 25 | 0skz2g68hb76efq4xknhwsjt9 swarm-worker2 Ready Active 26 | 2q015a61bl879o6adtlb7kxkl swarm-worker3 Ready Active 27 | 2sph1ezrnr5q9vy0683ah3b90 * swarm-manager Ready Active Leader 28 | 59rzjt0kqbcgw4cz7zsfflk8z swarm-worker1 Ready Active 29 | docker@swarm-manager:~$ 30 | 31 | 32 | Create a Service 33 | ---------------- 34 | 35 | Use ``docker service create`` command on manager node to create a service 36 | 37 | .. code-block:: bash 38 | 39 | docker@swarm-manager:~$ docker service create --name myapp --publish 80:80/tcp nginx 40 | 7bb8pgwjky3pg1nfpu44aoyti 41 | docker@swarm-manager:~$ docker service inspect myapp --pretty 42 | ID: 7bb8pgwjky3pg1nfpu44aoyti 43 | Name: myapp 44 | Mode: Replicated 45 | Replicas: 1 46 | Placement: 47 | UpdateConfig: 48 | Parallelism: 1 49 | On failure: pause 50 | ContainerSpec: 51 | Image: nginx 52 | Resources: 53 | Ports: 54 | Protocol = tcp 55 | TargetPort = 80 56 | PublishedPort = 80 57 | docker@swarm-manager:~$ 58 | 59 | Open the web browser, you will see the nginx page http://192.168.99.103/ 60 | 61 | Scale a Service 62 | --------------- 63 | 64 | We can use ``docker service scale`` to scale a service. 65 | 66 | .. code-block:: bash 67 | 68 | docker@swarm-manager:~$ docker service scale myapp=2 69 | myapp scaled to 2 70 | docker@swarm-manager:~$ docker service inspect myapp --pretty 71 | ID: 7bb8pgwjky3pg1nfpu44aoyti 72 | Name: myapp 73 | Mode: Replicated 74 | Replicas: 2 75 | Placement: 76 | UpdateConfig: 77 | Parallelism: 1 78 | On failure: pause 79 | ContainerSpec: 80 | Image: nginx 81 | Resources: 82 | Ports: 83 | Protocol = tcp 84 | TargetPort = 80 85 | PublishedPort = 80 86 | 87 | In this example, we scale the service to 2 replicas. 88 | -------------------------------------------------------------------------------- /docs/source/docker/docker-swarm-topo.rst: -------------------------------------------------------------------------------- 1 | Docker Swarm Topology Deep Dive 2 | =============================== 3 | -------------------------------------------------------------------------------- /docs/source/docker/docker-swarm.rst: -------------------------------------------------------------------------------- 1 | Swarm Mode: Create a Docker Swarm Cluster 2 | ========================================= 3 | 4 | Docker swarm mode requires docker engine 1.12 or higher. This lab will need two docker engine host created by docker machine. 5 | 6 | Prepare Environment 7 | ------------------- 8 | 9 | Create two docker host machines. 10 | 11 | .. code-block:: bash 12 | 13 | ➜ ~ docker-machine ls 14 | NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS 15 | swarm-manager - virtualbox Running tcp://192.168.99.100:2376 v1.12.4 16 | swarm-worker1 - virtualbox Running tcp://192.168.99.101:2376 v1.12.4 17 | ➜ ~ docker-machine ip swarm-manager 18 | 192.168.99.100 19 | ➜ ~ docker-machine ip swarm-worker1 20 | 192.168.99.101 21 | ➜ ~ 22 | 23 | Create a Swarm Manage node 24 | -------------------------- 25 | 26 | SSH to swarm-manager host and init a manager node. 27 | 28 | .. code-block:: bash 29 | 30 | ➜ ~ docker-machine ssh swarm-manager 31 | docker@swarm-manager:~$ docker swarm init --advertise-addr 192.168.99.100 32 | Swarm initialized: current node (7f2gi8xoz6prs2gi53nqa4wu8) is now a manager. 33 | 34 | To add a worker to this swarm, run the following command: 35 | 36 | docker swarm join \ 37 | --token SWMTKN-1-58lrmtavqlt9v1ejujsfh5o9hf3p804xtn5qhnsriqw4an2vhd-8x1q7q4jpvs1govwmjhnhffo7 \ 38 | 192.168.99.100:2377 39 | 40 | To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. 41 | 42 | docker@swarm-manager:~$ 43 | 44 | From command ``docker info`` we can get the current information about this swarm cluster. 45 | 46 | Add one Docker Node to the Swarm cluster 47 | ----------------------------------------- 48 | 49 | Just run the command generated by ``swarm init`` last step in the other docker machine host. 50 | Please make sure the ``swarm-worker1`` host can access 192.168.99.100:2377 51 | 52 | .. code-block:: bash 53 | 54 | ➜ ~ docker-machine ssh swarm-worker1 55 | docker@swarm-worker1:~$ docker swarm join \ 56 | --token SWMTKN-1-58lrmtavqlt9v1ejujsfh5o9hf3p804xtn5qhnsriqw4an2vhd-8x1q7q4jpvs1govwmjhnhffo7 \ 57 | 192.168.99.100:2377 58 | This node joined a swarm as a worker. 59 | docker@swarm-worker1:~$ 60 | 61 | We can check the cluster status on manager node: 62 | 63 | .. code-block:: bash 64 | 65 | ➜ ~ docker-machine ssh swarm-manager 66 | Boot2Docker version 1.12.4, build HEAD : d0b8fd8 - Tue Dec 13 18:21:26 UTC 2016 67 | Docker version 1.12.4, build 1564f02 68 | docker@swarm-manager:~$ docker node ls 69 | ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS 70 | 7f2gi8xoz6prs2gi53nqa4wu8 * swarm-manager Ready Active Leader 71 | 9mm8t4l5stcudn5tx1fweht1d swarm-worker1 Ready Active 72 | docker@swarm-manager:~$ 73 | 74 | And there are two networks automatically created on these two hosts: 75 | 76 | .. code-block:: bash 77 | 78 | docker@swarm-manager:~$ sudo docker network ls 79 | NETWORK ID NAME DRIVER SCOPE 80 | f773d9bee59f bridge bridge local 81 | bcc7996ba96b docker_gwbridge bridge local 82 | a2d7040abdd0 host host local 83 | 01y2wr8jucgf ingress overlay swarm 84 | 8fde4990cff2 none null local 85 | docker@swarm-manager:~$ 86 | docker@swarm-worker1:~$ sudo docker network ls 87 | NETWORK ID NAME DRIVER SCOPE 88 | 470f8e1db857 bridge bridge local 89 | 18bcb76c26b0 docker_gwbridge bridge local 90 | 1e347b54188e host host local 91 | 01y2wr8jucgf ingress overlay swarm 92 | 9ba27b95c9ad none null local 93 | docker@swarm-worker1:~$ 94 | 95 | The first is ``docker_gwbridge`` and the second is ``ingress``, one is bridge network, and the other is overlay network. 96 | -------------------------------------------------------------------------------- /docs/source/docker/host-network.rst: -------------------------------------------------------------------------------- 1 | Host Network Deep Dive 2 | ====================== 3 | 4 | In host network mode, the container and the host will be in the same network namespace. 5 | 6 | Docker version for this lab: 7 | 8 | .. code-block:: bash 9 | 10 | $ docker version 11 | Client: 12 | Version: 1.11.2 13 | API version: 1.23 14 | Go version: go1.5.4 15 | Git commit: b9f10c9 16 | Built: Wed Jun 1 21:23:11 2016 17 | OS/Arch: linux/amd64 18 | 19 | Server: 20 | Version: 1.11.2 21 | API version: 1.23 22 | Go version: go1.5.4 23 | Git commit: b9f10c9 24 | Built: Wed Jun 1 21:23:11 2016 25 | OS/Arch: linux/amd64 26 | docker 27 | 28 | Start a container in host network mode with ``--net=host``. 29 | 30 | .. code-block:: bash 31 | 32 | $ docker run -d --name test3 --net=host centos:7 /bin/bash -c "while true; do sleep 3600; done" 33 | c05d6d379459a651dbd6a98606328236063c541842db5e456767c219e2c52716 34 | $ ip link 35 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT 36 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 37 | 2: eth0: mtu 9001 qdisc pfifo_fast state UP mode DEFAULT qlen 1000 38 | link/ether 06:95:4a:1f:08:7f brd ff:ff:ff:ff:ff:ff 39 | 3: docker0: mtu 1500 qdisc noqueue state DOWN mode DEFAULT 40 | link/ether 02:42:d6:23:e6:18 brd ff:ff:ff:ff:ff:ff 41 | $ docker network inspect host 42 | [ 43 | { 44 | "Name": "host", 45 | "Id": "c363d9a92877e78cb33e7e5dd7884babfd6d05ae2100162fca21f756fe340b79", 46 | "Scope": "local", 47 | "Driver": "host", 48 | "EnableIPv6": false, 49 | "IPAM": { 50 | "Driver": "default", 51 | "Options": null, 52 | "Config": [] 53 | }, 54 | "Internal": false, 55 | "Containers": { 56 | "c05d6d379459a651dbd6a98606328236063c541842db5e456767c219e2c52716": { 57 | "Name": "test3", 58 | "EndpointID": "929c58100f6e4356eadccbe2f44bf1ce40567763594266831259d012cd76e4d6", 59 | "MacAddress": "", 60 | "IPv4Address": "", 61 | "IPv6Address": "" 62 | } 63 | }, 64 | "Options": {}, 65 | "Labels": {} 66 | } 67 | ] 68 | 69 | Unlike bridge network mode, there is no veth pair. Go to the inside of the container. 70 | 71 | .. code-block:: bash 72 | 73 | $ docker exec -it test3 bash 74 | # yum install net-tools -y 75 | # ifconfig 76 | docker0: flags=4099 mtu 1500 77 | inet 172.17.0.1 netmask 255.255.0.0 broadcast 0.0.0.0 78 | inet6 fe80::42:d6ff:fe23:e618 prefixlen 64 scopeid 0x20 79 | ether 02:42:d6:23:e6:18 txqueuelen 0 (Ethernet) 80 | RX packets 6624 bytes 359995 (351.5 KiB) 81 | RX errors 0 dropped 0 overruns 0 frame 0 82 | TX packets 11019 bytes 16432384 (15.6 MiB) 83 | TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 84 | 85 | eth0: flags=4163 mtu 9001 86 | inet 172.31.43.155 netmask 255.255.240.0 broadcast 172.31.47.255 87 | inet6 fe80::495:4aff:fe1f:87f prefixlen 64 scopeid 0x20 88 | ether 06:95:4a:1f:08:7f txqueuelen 1000 (Ethernet) 89 | RX packets 1982838 bytes 765628507 (730.1 MiB) 90 | RX errors 0 dropped 0 overruns 0 frame 0 91 | TX packets 2689881 bytes 330857410 (315.5 MiB) 92 | TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 93 | 94 | lo: flags=73 mtu 65536 95 | inet 127.0.0.1 netmask 255.0.0.0 96 | inet6 ::1 prefixlen 128 scopeid 0x10 97 | loop txqueuelen 0 (Local Loopback) 98 | RX packets 6349 bytes 8535636 (8.1 MiB) 99 | RX errors 0 dropped 0 overruns 0 frame 0 100 | TX packets 6349 bytes 8535636 (8.1 MiB) 101 | TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 102 | # ping www.google.com 103 | PING www.google.com (172.217.3.196) 56(84) bytes of data. 104 | 64 bytes from sea15s12-in-f196.1e100.net (172.217.3.196): icmp_seq=1 ttl=43 time=7.34 ms 105 | 64 bytes from sea15s12-in-f4.1e100.net (172.217.3.196): icmp_seq=2 ttl=43 time=7.35 ms 106 | ^C 107 | --- www.google.com ping statistics --- 108 | 2 packets transmitted, 2 received, 0% packet loss, time 1001ms 109 | rtt min/avg/max/mdev = 7.342/7.346/7.350/0.004 ms 110 | 111 | The container has the same ip/mac address as the host. we see that when using host mode networking, 112 | the container effectively inherits the IP address from its host. This mode is faster than the bridge 113 | mode (because there is no routing overhead), but it exposes the container directly to the public network, 114 | with all its security implications [#f1]_. 115 | 116 | 117 | 118 | 119 | Reference 120 | ---------- 121 | 122 | .. [#f1] https://www.oreilly.com/learning/what-is-docker-networking 123 | -------------------------------------------------------------------------------- /docs/source/docker/netns.rst: -------------------------------------------------------------------------------- 1 | Linux Network Namespace Introduction 2 | ===================================== 3 | 4 | In this tutorial, we will learn what is Linux network namespace and how to use it. 5 | 6 | Docker uses many Linux namespace technologies for isolation, there are user namespace, process namespace, etc. For network isolation 7 | docker uses Linux network namespace technology, each docker container has its own network namespace, which means it has its own IP address, 8 | routing table, etc. 9 | 10 | First, let's see how to create and check a network namespace. The lab environment we used today is a docker host which is created by docker-machine tool 11 | on Amazon AWS. 12 | 13 | Create and List Network Namespace 14 | ---------------------------------- 15 | 16 | Use ``ip netns add `` to create a network namespace, and ``ip netns list`` to list all network namepaces on the host. 17 | 18 | .. code-block:: bash 19 | 20 | ubuntu@docker-host-aws:~$ sudo ip netns add test1 21 | ubuntu@docker-host-aws:~$ ip netns list 22 | test1 23 | ubuntu@docker-host-aws:~$ 24 | 25 | 26 | Delete Network Namespace 27 | ------------------------- 28 | 29 | Use ``ip netns delete `` to delete a network namespace. 30 | 31 | .. code-block:: bash 32 | 33 | ubuntu@docker-host-aws:~$ sudo ip netns delete test1 34 | ubuntu@docker-host-aws:~$ ip netns list 35 | ubuntu@docker-host-aws:~$ 36 | 37 | Execute CMD within Network Namespace 38 | ------------------------------------- 39 | 40 | How to check interfaces in a particular network namespace, we can use command ``ip netns exec `` like: 41 | 42 | .. code-block:: bash 43 | 44 | ubuntu@docker-host-aws:~$ sudo ip netns exec test1 ip a 45 | 1: lo: mtu 65536 qdisc noop state DOWN group default 46 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 47 | ubuntu@docker-host-aws:~$ 48 | 49 | ``ip a`` will list all ip interfaces within this ``test1`` network namespaces. From the output we can see that the ``lo`` inteface is ``DOWN``, 50 | we can run a command to let it up. 51 | 52 | .. code-block:: bash 53 | 54 | ubuntu@docker-host-aws:~$ sudo ip netns exec test1 ip link 55 | 1: lo: mtu 65536 qdisc noop state DOWN mode DEFAULT group default 56 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 57 | ubuntu@docker-host-aws:~$ sudo ip netns exec test1 ip link set dev lo up 58 | ubuntu@docker-host-aws:~$ sudo ip netns exec test1 ip link 59 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default 60 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 61 | 62 | The status of lo became ``UNKNOWN``, please ignore that and go on. 63 | 64 | Add Interface to a Network Namespace 65 | ------------------------------------ 66 | 67 | We will create a virtual interface pair, it has two virtual interfaces which are connected by a virtual cable 68 | 69 | .. code-block:: bash 70 | 71 | ubuntu@docker-host-aws:~$ sudo ip link add veth-a type veth peer name veth-b 72 | ubuntu@docker-host-aws:~$ ip link 73 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default 74 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 75 | 2: eth0: mtu 9001 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000 76 | link/ether 02:30:c1:3e:63:3a brd ff:ff:ff:ff:ff:ff 77 | 4: docker0: mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default 78 | link/ether 02:42:a7:88:bd:32 brd ff:ff:ff:ff:ff:ff 79 | 27: veth-b: mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 80 | link/ether 52:58:31:ef:0b:98 brd ff:ff:ff:ff:ff:ff 81 | 28: veth-a: mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 82 | link/ether 3e:89:92:ac:ef:10 brd ff:ff:ff:ff:ff:ff 83 | ubuntu@docker-host-aws:~$ 84 | 85 | All these two interfaces are located on localhost default network namespace. what we will do is move one of them to ``test1`` network namespace, 86 | we can do this through: 87 | 88 | .. code-block:: bash 89 | 90 | ubuntu@docker-host-aws:~$ sudo ip link set veth-b netns test1 91 | ubuntu@docker-host-aws:~$ ip link 92 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default 93 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 94 | 2: eth0: mtu 9001 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000 95 | link/ether 02:30:c1:3e:63:3a brd ff:ff:ff:ff:ff:ff 96 | 4: docker0: mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default 97 | link/ether 02:42:a7:88:bd:32 brd ff:ff:ff:ff:ff:ff 98 | 28: veth-a: mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 99 | link/ether 3e:89:92:ac:ef:10 brd ff:ff:ff:ff:ff:ff 100 | ubuntu@docker-host-aws:~$ sudo ip netns exec test1 ip link 101 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default 102 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 103 | 27: veth-b: mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 104 | link/ether 52:58:31:ef:0b:98 brd ff:ff:ff:ff:ff:ff 105 | ubuntu@docker-host-aws:~$ 106 | 107 | Now, the interface ``veth-b`` is in network namespace ``test1``. 108 | 109 | Assign IP address to veth interface 110 | ------------------------------------ 111 | 112 | In the localhost to set ``veth-a`` 113 | 114 | .. code-block:: bash 115 | 116 | ubuntu@docker-host-aws:~$ sudo ip addr add 192.168.1.1/24 dev veth-a 117 | ubuntu@docker-host-aws:~$ sudo ip link set veth-a up 118 | ubuntu@docker-host-aws:~$ ip link 119 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default 120 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 121 | 2: eth0: mtu 9001 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000 122 | link/ether 02:30:c1:3e:63:3a brd ff:ff:ff:ff:ff:ff 123 | 4: docker0: mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default 124 | link/ether 02:42:a7:88:bd:32 brd ff:ff:ff:ff:ff:ff 125 | 28: veth-a: mtu 1500 qdisc pfifo_fast state DOWN mode DEFAULT group default qlen 1000 126 | link/ether 3e:89:92:ac:ef:10 brd ff:ff:ff:ff:ff:ff 127 | 128 | ``veth-a`` has an IP address, but its status is DOWN. Now let's set ``veth-b`` in ``test1``. 129 | 130 | .. code-block:: bash 131 | 132 | ubuntu@docker-host-aws:~$ sudo ip netns exec test1 ip addr add 192.168.1.2/24 dev veth-b 133 | ubuntu@docker-host-aws:~$ sudo ip netns exec test1 ip link set dev veth-b up 134 | ubuntu@docker-host-aws:~$ ip link 135 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default 136 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 137 | 2: eth0: mtu 9001 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000 138 | link/ether 02:30:c1:3e:63:3a brd ff:ff:ff:ff:ff:ff 139 | 4: docker0: mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default 140 | link/ether 02:42:a7:88:bd:32 brd ff:ff:ff:ff:ff:ff 141 | 28: veth-a: mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000 142 | link/ether 3e:89:92:ac:ef:10 brd ff:ff:ff:ff:ff:ff 143 | ubuntu@docker-host-aws:~$ sudo ip netns exec test1 ip link 144 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default 145 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 146 | 27: veth-b: mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000 147 | link/ether 52:58:31:ef:0b:98 brd ff:ff:ff:ff:ff:ff 148 | 149 | After configured ``veth-b`` and up it, both ``veth-a`` and ``veth-b`` are UP. Now we can use ``ping`` to check their connectivity. 150 | 151 | .. code-block:: bash 152 | 153 | ubuntu@docker-host-aws:~$ ping 192.168.1.2 154 | PING 192.168.1.2 (192.168.1.2) 56(84) bytes of data. 155 | 64 bytes from 192.168.1.2: icmp_seq=1 ttl=64 time=0.047 ms 156 | 64 bytes from 192.168.1.2: icmp_seq=2 ttl=64 time=0.046 ms 157 | 64 bytes from 192.168.1.2: icmp_seq=3 ttl=64 time=0.052 ms 158 | ^C 159 | --- 192.168.1.2 ping statistics --- 160 | 3 packets transmitted, 3 received, 0% packet loss, time 1998ms 161 | rtt min/avg/max/mdev = 0.046/0.048/0.052/0.006 ms 162 | ubuntu@docker-host-aws:~$ 163 | 164 | 165 | Please go to http://www.opencloudblog.com/?p=66 to learn more. 166 | -------------------------------------------------------------------------------- /docs/source/docker/port-mapping.rst: -------------------------------------------------------------------------------- 1 | Container Port Mapping in Bridge networking 2 | =========================================== 3 | 4 | Through :doc:`bridged-network` we know that by default Docker containers can make connections to the outside world, 5 | but the outside world cannot connect to containers. Each outgoing connection will appear to originate from one of 6 | the host machine’s own IP addresses thanks to an iptables masquerading rule on the host machine that the Docker 7 | server creates when it starts: [#f1]_ 8 | 9 | .. code-block:: bash 10 | 11 | ubuntu@docker-node1:~$ sudo iptables -t nat -L -n 12 | ... 13 | Chain POSTROUTING (policy ACCEPT) 14 | target prot opt source destination 15 | MASQUERADE all -- 172.17.0.0/16 0.0.0.0/0 16 | ... 17 | ubuntu@docker-node1:~$ ifconfig docker0 18 | docker0 Link encap:Ethernet HWaddr 02:42:58:22:4c:30 19 | inet addr:172.17.0.1 Bcast:0.0.0.0 Mask:255.255.0.0 20 | UP BROADCAST MULTICAST MTU:1500 Metric:1 21 | RX packets:0 errors:0 dropped:0 overruns:0 frame:0 22 | TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 23 | collisions:0 txqueuelen:0 24 | RX bytes:0 (0.0 B) TX bytes:0 (0.0 B) 25 | 26 | ubuntu@docker-node1:~$ 27 | 28 | The Docker server creates a ``masquerade`` rule that let containers connect to IP addresses in the outside world. 29 | 30 | Bind Container port to the host 31 | -------------------------------- 32 | 33 | Start a nginx container which export port 80 and 443. we can access the port from inside of the docker host. 34 | 35 | .. code-block:: bash 36 | 37 | ubuntu@docker-node1:~$ sudo docker run -d --name demo nginx 38 | ubuntu@docker-node1:~$ sudo docker ps 39 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 40 | b5e53067e12f nginx "nginx -g 'daemon off" 8 minutes ago Up 8 minutes 80/tcp, 443/tcp demo 41 | ubuntu@docker-node1:~$ sudo docker inspect --format {{.NetworkSettings.IPAddress}} demo 42 | 172.17.0.2 43 | ubuntu@docker-node1:~$ curl 172.17.0.2 44 | 45 | 46 | 47 | Welcome to nginx! 48 | 55 | 56 | 57 |

Welcome to nginx!

58 |

If you see this page, the nginx web server is successfully installed and 59 | working. Further configuration is required.

60 | 61 |

For online documentation and support please refer to 62 | nginx.org.
63 | Commercial support is available at 64 | nginx.com.

65 | 66 |

Thank you for using nginx.

67 | 68 | 69 | 70 | If we want to access the nginx web from outside of the docker host, we must bind the port to docker host like this: 71 | 72 | .. code-block:: bash 73 | 74 | ubuntu@docker-node1:~$ sudo docker run -d -p 80 --name demo nginx 75 | 0fb783dcd5b3010c0ef47e4c929dfe0c9eac8ddec2e5e0470df5529bfd4cb64e 76 | ubuntu@docker-node1:~$ sudo docker ps 77 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 78 | 0fb783dcd5b3 nginx "nginx -g 'daemon off" 5 seconds ago Up 5 seconds 443/tcp, 0.0.0.0:32768->80/tcp demo 79 | ubuntu@docker-node1:~$ curl 192.168.205.10:32768 80 | 81 | 82 | 83 | Welcome to nginx! 84 | 91 | 92 | 93 |

Welcome to nginx!

94 |

If you see this page, the nginx web server is successfully installed and 95 | working. Further configuration is required.

96 | 97 |

For online documentation and support please refer to 98 | nginx.org.
99 | Commercial support is available at 100 | nginx.com.

101 | 102 |

Thank you for using nginx.

103 | 104 | 105 | ubuntu@docker-node1:~$ ifconfig enp0s8 106 | enp0s8 Link encap:Ethernet HWaddr 08:00:27:7a:ac:d2 107 | inet addr:192.168.205.10 Bcast:192.168.205.255 Mask:255.255.255.0 108 | inet6 addr: fe80::a00:27ff:fe7a:acd2/64 Scope:Link 109 | UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 110 | RX packets:0 errors:0 dropped:0 overruns:0 frame:0 111 | TX packets:8 errors:0 dropped:0 overruns:0 carrier:0 112 | collisions:0 txqueuelen:1000 113 | RX bytes:0 (0.0 B) TX bytes:648 (648.0 B) 114 | 115 | ubuntu@docker-node1:~$ 116 | 117 | If we want to point out which port on host want to bind: 118 | 119 | .. code-block:: bash 120 | 121 | ubuntu@docker-node1:~$ sudo docker run -d -p 80:80 --name demo1 nginx 122 | 4f548139a4be6574e3f9718f99a05e5174bdfb62d229ea656d35a979b5b0507d 123 | ubuntu@docker-node1:~$ sudo docker ps 124 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 125 | 4f548139a4be nginx "nginx -g 'daemon off" 5 seconds ago Up 4 seconds 0.0.0.0:80->80/tcp, 443/tcp demo1 126 | 0fb783dcd5b3 nginx "nginx -g 'daemon off" 2 minutes ago Up 2 minutes 443/tcp, 0.0.0.0:32768->80/tcp demo 127 | ubuntu@docker-node1:~$ 128 | 129 | What happened 130 | -------------- 131 | 132 | It's iptables 133 | 134 | .. code-block:: bash 135 | 136 | 137 | ubuntu@docker-node1:~$ sudo iptables -t nat -L -n 138 | Chain PREROUTING (policy ACCEPT) 139 | target prot opt source destination 140 | DOCKER all -- 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL 141 | 142 | Chain INPUT (policy ACCEPT) 143 | target prot opt source destination 144 | 145 | Chain OUTPUT (policy ACCEPT) 146 | target prot opt source destination 147 | DOCKER all -- 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL 148 | 149 | Chain POSTROUTING (policy ACCEPT) 150 | target prot opt source destination 151 | MASQUERADE all -- 172.17.0.0/16 0.0.0.0/0 152 | MASQUERADE tcp -- 172.17.0.2 172.17.0.2 tcp dpt:80 153 | MASQUERADE tcp -- 172.17.0.3 172.17.0.3 tcp dpt:80 154 | 155 | Chain DOCKER (2 references) 156 | target prot opt source destination 157 | RETURN all -- 0.0.0.0/0 0.0.0.0/0 158 | DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:32768 to:172.17.0.2:80 159 | DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 to:172.17.0.3:80 160 | ubuntu@docker-node1:~$ 161 | 162 | ubuntu@docker-node1:~$ sudo iptables -t nat -nvxL 163 | Chain PREROUTING (policy ACCEPT 0 packets, 0 bytes) 164 | pkts bytes target prot opt in out source destination 165 | 1 44 DOCKER all -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL 166 | 167 | Chain INPUT (policy ACCEPT 0 packets, 0 bytes) 168 | pkts bytes target prot opt in out source destination 169 | 170 | Chain OUTPUT (policy ACCEPT 0 packets, 0 bytes) 171 | pkts bytes target prot opt in out source destination 172 | 4 240 DOCKER all -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL 173 | 174 | Chain POSTROUTING (policy ACCEPT 2 packets, 120 bytes) 175 | pkts bytes target prot opt in out source destination 176 | 0 0 MASQUERADE all -- * !docker0 172.17.0.0/16 0.0.0.0/0 177 | 0 0 MASQUERADE tcp -- * * 172.17.0.2 172.17.0.2 tcp dpt:80 178 | 0 0 MASQUERADE tcp -- * * 172.17.0.3 172.17.0.3 tcp dpt:80 179 | 180 | Chain DOCKER (2 references) 181 | pkts bytes target prot opt in out source destination 182 | 0 0 RETURN all -- docker0 * 0.0.0.0/0 0.0.0.0/0 183 | 1 60 DNAT tcp -- !docker0 * 0.0.0.0/0 0.0.0.0/0 tcp dpt:32768 to:172.17.0.2:80 184 | 2 120 DNAT tcp -- !docker0 * 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 to:172.17.0.3:80 185 | ubuntu@docker-node1:~$ 186 | 187 | 188 | References 189 | ---------- 190 | 191 | .. [#f1] https://docs.docker.com/engine/userguide/networking/default_network/binding/ 192 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. Docker Kubernetes Lab documentation master file, created by 2 | sphinx-quickstart on Fri Nov 25 23:35:48 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Docker Kubernetes Lab Handbook 7 | ============================== 8 | 9 | This handbook contains some docker and kubernetes lab tutorials. It will be useful if you are learning docker or kubernetes now. 10 | The labs in this tutorial are all well documented, include the required environments, steps, detailed input and output. 11 | 12 | .. warning:: 13 | 14 | This is just a lab guide, not a documentation for docker or kubernetes, please go to their online documentation sites for more details about 15 | what docker or kubernetes is and how does it work. 16 | 17 | Table of Contents 18 | ----------------- 19 | 20 | .. toctree:: 21 | :maxdepth: 2 22 | 23 | lab-environment 24 | docker 25 | kubernetes 26 | coreos 27 | 28 | Feedback 29 | ======== 30 | 31 | Please go to github https://github.com/xiaopeng163/docker-k8s-lab and create issue or PR, thanks. 32 | 33 | 34 | Indices and tables 35 | ================== 36 | 37 | * :ref:`genindex` 38 | * :ref:`modindex` 39 | * :ref:`search` 40 | -------------------------------------------------------------------------------- /docs/source/kubernetes.rst: -------------------------------------------------------------------------------- 1 | Kubernetes 2 | =========== 3 | 4 | .. toctree:: 5 | :maxdepth: 1 6 | 7 | kubernetes/kubernetes-aws 8 | kubernetes/kubernetes-aws-tectonic 9 | kubernetes/minikube 10 | kubernetes/kubeadm 11 | kubernetes/stepbystep 12 | -------------------------------------------------------------------------------- /docs/source/kubernetes/kubeadm.rst: -------------------------------------------------------------------------------- 1 | Get Started with Kubeadm 2 | ======================== 3 | 4 | We will create a three nodes kubernetes cluster with ``kubeadm``. 5 | 6 | Prepare three vagrant hosts 7 | --------------------------- 8 | 9 | .. code-block:: bash 10 | 11 | $ git clone https://github.com/xiaopeng163/docker-k8s-lab 12 | $ cd docker-k8s-lab/lab/k8s/multi-node/vagrant 13 | $ vagrant up 14 | $ vagrant status 15 | Current machine states: 16 | 17 | k8s-master running (virtualbox) 18 | k8s-worker1 running (virtualbox) 19 | k8s-worker2 running (virtualbox) 20 | 21 | ``docker`` ``kubelet`` ``kubeadm`` ``kubectl`` ``kubernetes-cni`` are already installed on each host. 22 | 23 | 24 | Initialize master node 25 | -------------------------- 26 | 27 | Use ``kubeadm init`` command to initialize the master node just like ``docker swarm``. 28 | 29 | .. code-block:: bash 30 | 31 | ubuntu@k8s-master:~$ sudo kubeadm init --api-advertise-addresses=192.168.205.10 32 | [kubeadm] WARNING: kubeadm is in alpha, please do not use it for production clusters. 33 | [preflight] Running pre-flight checks 34 | [init] Using Kubernetes version: v1.5.1 35 | [tokens] Generated token: "af6b44.f383a4116ef0d028" 36 | [certificates] Generated Certificate Authority key and certificate. 37 | [certificates] Generated API Server key and certificate 38 | [certificates] Generated Service Account signing keys 39 | [certificates] Created keys and certificates in "/etc/kubernetes/pki" 40 | [kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf" 41 | [kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf" 42 | [apiclient] Created API client, waiting for the control plane to become ready 43 | [apiclient] All control plane components are healthy after 61.784561 seconds 44 | [apiclient] Waiting for at least one node to register and become ready 45 | [apiclient] First node is ready after 3.004480 seconds 46 | [apiclient] Creating a test deployment 47 | [apiclient] Test deployment succeeded 48 | [token-discovery] Created the kube-discovery deployment, waiting for it to become ready 49 | [token-discovery] kube-discovery is ready after 21.503085 seconds 50 | [addons] Created essential addon: kube-proxy 51 | [addons] Created essential addon: kube-dns 52 | 53 | Your Kubernetes master has initialized successfully! 54 | 55 | You should now deploy a pod network to the cluster. 56 | Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: 57 | http://kubernetes.io/docs/admin/addons/ 58 | 59 | You can now join any number of machines by running the following on each node: 60 | 61 | kubeadm join --token=af6b44.f383a4116ef0d028 192.168.205.10 62 | 63 | Join worker nodes 64 | ------------------ 65 | 66 | Run ``kubeadm join`` on each worker node to join the kubernetes cluster. 67 | 68 | .. code-block:: bash 69 | 70 | ubuntu@k8s-worker1:~$ kubeadm join --token=af6b44.f383a4116ef0d028 192.168.205.10 71 | ubuntu@k8s-worker2:~$ kubeadm join --token=af6b44.f383a4116ef0d028 192.168.205.10 72 | 73 | Use ``kubectl get nodes`` to check the cluster information. 74 | 75 | .. code-block:: bash 76 | 77 | ubuntu@k8s-master:~$ kubectl get nodes 78 | NAME STATUS AGE 79 | k8s-master Ready,master 10m 80 | k8s-worker1 Ready 1m 81 | k8s-worker2 Ready 3s 82 | -------------------------------------------------------------------------------- /docs/source/kubernetes/kubernetes-aws-tectonic.rst: -------------------------------------------------------------------------------- 1 | Create a Kubernetes Cluster on AWS with Tectonic 2 | ================================================ 3 | 4 | Please check the Youtube 5 | 6 | https://www.youtube.com/watch?v=wwho8DsN5iU&list=PLfQqWeOCIH4AF-4IUpHZaEdlQOkkVt-0D&index=12 -------------------------------------------------------------------------------- /docs/source/kubernetes/kubernetes-aws.rst: -------------------------------------------------------------------------------- 1 | Create a Kubernetes Cluster on AWS 2 | ================================== 3 | 4 | In this tutorial, we will create a Kubernetes Cluster on AWS different A-Zone, and will reference this https://kubernetes.io/docs/admin/multiple-zones/ 5 | 6 | Please make sure you have installed ``awscli`` (https://aws.amazon.com/cli/) 7 | 8 | Create the cluster 9 | ------------------- 10 | 11 | .. code-block:: bash 12 | 13 | curl -sS https://get.k8s.io | MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a NUM_NODES=1 bash 14 | 15 | This command will create a k8s cluster which include one master node and one worker node. 16 | 17 | Add more nodes to the cluster 18 | ------------------------------ 19 | 20 | .. code-block:: bash 21 | 22 | KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2b NUM_NODES=2 KUBE_SUBNET_CIDR=172.20.1.0/24 MASTER_INTERNAL_IP=172.20.0.9 kubernetes/cluster/kube-up.sh 23 | 24 | This will create two worker nodes in another zone ``us-west-2b``. 25 | 26 | Check our cluster 27 | ----------------- 28 | 29 | .. code-block:: bash 30 | 31 | ➜ ~ kubectl get nodes --show-labels 32 | NAME STATUS AGE LABELS 33 | ip-172-20-0-157.us-west-2.compute.internal Ready 1h beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=t2.micro,beta.kubernetes.io/os=linux,failure-domain.beta.kubernetes.io/region=us-west-2,failure-domain.beta.kubernetes.io/zone=us-west-2a,kubernetes.io/hostname=ip-172-20-0-157.us-west-2.compute.internal 34 | ip-172-20-1-145.us-west-2.compute.internal Ready 1h beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=t2.micro,beta.kubernetes.io/os=linux,failure-domain.beta.kubernetes.io/region=us-west-2,failure-domain.beta.kubernetes.io/zone=us-west-2b,kubernetes.io/hostname=ip-172-20-1-145.us-west-2.compute.internal 35 | ip-172-20-1-194.us-west-2.compute.internal Ready 1h beta.kubernetes.io/arch=amd64,beta.kubernetes.io/instance-type=t2.micro,beta.kubernetes.io/os=linux,failure-domain.beta.kubernetes.io/region=us-west-2,failure-domain.beta.kubernetes.io/zone=us-west-2b,kubernetes.io/hostname=ip-172-20-1-194.us-west-2.compute.internal 36 | ➜ ~ 37 | 38 | If you want to know what happened during these shell command, please go to https://medium.com/@canthefason/kube-up-i-know-what-you-did-on-aws-93e728d3f56a#.r3ynj2ooe -------------------------------------------------------------------------------- /docs/source/kubernetes/minikube.rst: -------------------------------------------------------------------------------- 1 | Get Start with minikube 2 | ======================= 3 | -------------------------------------------------------------------------------- /docs/source/kubernetes/stepbystep.rst: -------------------------------------------------------------------------------- 1 | Kubernetes Architecture Step by Step 2 | ==================================== 3 | 4 | We will have a overview of k8s architecture through this lab step by step. 5 | 6 | 7 | Prepare Lab Enviroment 8 | ----------------------- 9 | 10 | We will install kubernetes with Vagrant & CoreOS reference by https://coreos.com/kubernetes/docs/latest/kubernetes-on-vagrant.html. 11 | 12 | .. code-block:: bash 13 | 14 | ➜ vagrant git:(master) vagrant status 15 | Current machine states: 16 | 17 | e1 running (virtualbox) 18 | c1 running (virtualbox) 19 | w1 running (virtualbox) 20 | w2 running (virtualbox) 21 | w3 running (virtualbox) 22 | 23 | This environment represents multiple VMs. The VMs are all listed 24 | above with their current state. For more information about a specific 25 | VM, run `vagrant status NAME`. 26 | 27 | One etcd node, one controller node and three worker nodes. 28 | 29 | Kubectl version and cluster information 30 | 31 | .. code-block:: bash 32 | 33 | ➜ vagrant git:(master) kubectl version 34 | Client Version: version.Info{Major:"1", Minor:"5", GitVersion:"v1.5.1", GitCommit:"82450d03cb057bab0950214ef122b67c83fb11df", GitTreeState:"clean", BuildDate:"2016-12-14T00:57:05Z", GoVersion:"go1.7.4", Compiler:"gc", Platform:"darwin/amd64"} 35 | Server Version: version.Info{Major:"1", Minor:"5", GitVersion:"v1.5.1+coreos.0", GitCommit:"cc65f5321f9230bf9a3fa171155c1213d6e3480e", GitTreeState:"clean", BuildDate:"2016-12-14T04:08:28Z", GoVersion:"go1.7.4", Compiler:"gc", Platform:"linux/amd64"} 36 | ➜ vagrant git:(master) 37 | ➜ vagrant git:(master) kubectl get nodes 38 | NAME STATUS AGE 39 | 172.17.4.101 Ready,SchedulingDisabled 32m 40 | 172.17.4.201 Ready 32m 41 | 172.17.4.202 Ready 32m 42 | 172.17.4.203 Ready 32m 43 | ➜ vagrant git:(master) 44 | ➜ kubernetes-101 git:(master) ✗ kubectl cluster-info 45 | Kubernetes master is running at https://172.17.4.101:443 46 | Heapster is running at https://172.17.4.101:443/api/v1/proxy/namespaces/kube-system/services/heapster 47 | KubeDNS is running at https://172.17.4.101:443/api/v1/proxy/namespaces/kube-system/services/kube-dns 48 | kubernetes-dashboard is running at https://172.17.4.101:443/api/v1/proxy/namespaces/kube-system/services/kubernetes-dashboard 49 | 50 | To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. 51 | ➜ kubernetes-101 git:(master) ✗ 52 | 53 | Get the application we will deploy from github: 54 | 55 | .. code-block:: bash 56 | 57 | $ git clone https://github.com/xiaopeng163/kubernetes-101 58 | 59 | This application is a simple python flask web app with a redis server as backend. 60 | 61 | Create Pods 62 | ----------- 63 | 64 | Use cmd ``kubectl create`` to create a pod through a yml file. Firstly, create a redis server pod. 65 | 66 | .. code-block:: bash 67 | 68 | ➜ kubernetes-101 git:(master) ✗ cd Kubernetes 69 | ➜ Kubernetes git:(master) ✗ ls 70 | db-pod.yml db-svc.yml set.sh web-pod.yml web-rc.yml web-svc.yml 71 | ➜ Kubernetes git:(master) ✗ 72 | ➜ Kubernetes git:(master) ✗ kubectl create -f db-pod.yml 73 | pod "redis" created 74 | ➜ Kubernetes git:(master) ✗ kubectl get pods -o wide 75 | NAME READY STATUS RESTARTS AGE IP NODE 76 | redis 1/1 Running 0 1m 10.2.26.2 172.17.4.201 77 | 78 | It created a pod which running redis, and the pod is on node ``w1``. We can SSH to this node and check the exactly container created 79 | by kubernetes. 80 | 81 | .. code-block:: bash 82 | 83 | ➜ vagrant git:(master) vagrant ssh w1 84 | CoreOS alpha (1164.1.0) 85 | Last login: Mon Jan 9 06:33:50 2017 from 10.0.2.2 86 | core@w1 ~ $ docker ps 87 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 88 | 7df09a520c43 redis:latest "docker-entrypoint.sh" 19 minutes ago Up 19 minutes k8s_redis.afd331f6_redis_default_b6c27624-d632-11e6-b809-0800274503e1_fb526620 89 | 90 | Next, create a web server pod. 91 | 92 | .. code-block:: bash 93 | 94 | ➜ Kubernetes git:(master) ✗ kubectl create -f web-pod.yml 95 | pod "web" created 96 | ➜ Kubernetes git:(master) ✗ kubectl get pods -o wide 97 | NAME READY STATUS RESTARTS AGE IP NODE 98 | redis 1/1 Running 0 2h 10.2.26.2 172.17.4.201 99 | web 1/1 Running 0 6m 10.2.14.6 172.17.4.203 100 | ➜ Kubernetes git:(master) ✗ 101 | 102 | The web pod is running on node ``w3``. 103 | 104 | Create Services 105 | --------------- 106 | 107 | Now we have two pods, but they do not know each other. If you SSH to the ``w3`` node which ``web`` located on, and access the flask web, it will 108 | return a error. 109 | 110 | .. code-block:: bash 111 | 112 | core@w3 ~ $ curl 10.2.14.6:5000 113 | ..... 114 | ..... 115 | ConnectionError: Error -2 connecting to redis:6379. Name or service not known. 116 | 117 | --> 118 | core@w3 ~ $ 119 | 120 | The reason is the ``web`` pod can not resolve the ``redis`` name. We need to create a service. 121 | 122 | .. code-block:: bash 123 | 124 | ➜ Kubernetes git:(master) ✗ kubectl create -f db-svc.yml 125 | service "redis" created 126 | ➜ Kubernetes git:(master) ✗ kubectl get svc 127 | NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE 128 | kubernetes 10.3.0.1 443/TCP 3h 129 | redis 10.3.0.201 6379/TCP 42s 130 | 131 | After that, go to ``w3`` and access the flask web again, it works! 132 | 133 | .. code-block:: bash 134 | 135 | core@w3 ~ $ curl 10.2.14.6:5000 136 | Hello Container World! I have been seen 1 times. 137 | core@w3 ~ $ curl 10.2.14.6:5000 138 | Hello Container World! I have been seen 2 times. 139 | core@w3 ~ $ 140 | 141 | At last, we need to access the flask web service from the outside of the kubernetes cluster, that need to create another 142 | service. 143 | 144 | .. code-block:: bash 145 | 146 | ➜ Kubernetes git:(master) ✗ kubectl create -f web-svc.yml 147 | service "web" created 148 | ➜ Kubernetes git:(master) ✗ 149 | ➜ Kubernetes git:(master) ✗ kubectl get svc 150 | NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE 151 | kubernetes 10.3.0.1 443/TCP 3h 152 | redis 10.3.0.201 6379/TCP 11m 153 | web 10.3.0.51 80:32204/TCP 5s 154 | ➜ Kubernetes git:(master) ✗ curl 172.17.4.203:32204 155 | Hello Container World! I have been seen 3 times. 156 | ➜ Kubernetes git:(master) ✗ 157 | ➜ Kubernetes git:(master) ✗ curl 172.17.4.201:32204 158 | Hello Container World! I have been seen 4 times. 159 | ➜ Kubernetes git:(master) ✗ curl 172.17.4.202:32204 160 | Hello Container World! I have been seen 5 times. 161 | ➜ Kubernetes git:(master) ✗ 162 | 163 | Now we can access the flask web from the outside, actually from any node. 164 | 165 | 166 | Scaling Pods with Replication Controller 167 | ---------------------------------------- 168 | 169 | .. code-block:: bash 170 | 171 | ➜ Kubernetes git:(master) ✗ kubectl create -f web-rc.yml 172 | replicationcontroller "web" created 173 | ➜ Kubernetes git:(master) ✗ kubectl get pods -o wide 174 | NAME READY STATUS RESTARTS AGE IP NODE 175 | redis 1/1 Running 0 3h 10.2.26.2 172.17.4.201 176 | web 1/1 Running 0 57m 10.2.14.6 172.17.4.203 177 | web-jlzm4 1/1 Running 0 3m 10.2.71.3 172.17.4.202 178 | web-sz150 1/1 Running 0 3m 10.2.26.3 172.17.4.201 179 | ➜ Kubernetes git:(master) ✗ 180 | 181 | Rolling Update 182 | -------------- 183 | 184 | To update a service without an outage through rolling update. We will update our flask web container image from 1.0 to 2.0. 185 | 186 | .. code-block:: bash 187 | 188 | ➜ kubernetes-101 git:(master) ✗ kubectl get pods 189 | NAME READY STATUS RESTARTS AGE 190 | redis 1/1 Running 0 6h 191 | web 1/1 Running 0 4h 192 | web-jlzm4 1/1 Running 0 3h 193 | web-sz150 1/1 Running 0 3h 194 | ➜ kubernetes-101 git:(master) ✗ kubectl rolling-update web --image=xiaopeng163/docker-flask-demo:2.0 195 | Created web-db65f4ce913c452364a2075625221bec 196 | Scaling up web-db65f4ce913c452364a2075625221bec from 0 to 3, scaling down web from 3 to 0 (keep 3 pods available, do not exceed 4 pods) 197 | Scaling web-db65f4ce913c452364a2075625221bec up to 1 198 | Scaling web down to 2 199 | Scaling web-db65f4ce913c452364a2075625221bec up to 2 200 | Scaling web down to 1 201 | Scaling web-db65f4ce913c452364a2075625221bec up to 3 202 | Scaling web down to 0 203 | Update succeeded. Deleting old controller: web 204 | Renaming web to web-db65f4ce913c452364a2075625221bec 205 | replicationcontroller "web" rolling updated 206 | ➜ kubernetes-101 git:(master) ✗ kubectl get pods 207 | NAME READY STATUS RESTARTS AGE 208 | redis 1/1 Running 0 6h 209 | web-db65f4ce913c452364a2075625221bec-130ll 1/1 Running 0 3m 210 | web-db65f4ce913c452364a2075625221bec-85365 1/1 Running 0 4m 211 | web-db65f4ce913c452364a2075625221bec-tsr41 1/1 Running 0 2m 212 | ➜ kubernetes-101 git:(master) ✗ 213 | 214 | After update, check the service. 215 | 216 | .. code-block:: bash 217 | 218 | ➜ kubernetes-101 git:(master) ✗ for i in `seq 4`; do curl 172.17.4.203:32204; done 219 | Hello Container World! I have been seen 26 times and my hostname is web-db65f4ce913c452364a2075625221bec-130ll. 220 | Hello Container World! I have been seen 27 times and my hostname is web-db65f4ce913c452364a2075625221bec-85365. 221 | Hello Container World! I have been seen 28 times and my hostname is web-db65f4ce913c452364a2075625221bec-130ll. 222 | Hello Container World! I have been seen 29 times and my hostname is web-db65f4ce913c452364a2075625221bec-130ll. 223 | ➜ kubernetes-101 git:(master) ✗ 224 | 225 | We can see it automatically load balanced. 226 | 227 | 228 | Clear Environment 229 | ------------------ 230 | 231 | .. code-block:: bash 232 | 233 | $ kubectl delete services web 234 | $ kubectl delete services redis 235 | $ kubectl delete rc web 236 | $ kubectl delete pod redis 237 | $ kubectl delete pod web 238 | -------------------------------------------------------------------------------- /docs/source/lab-environment.rst: -------------------------------------------------------------------------------- 1 | Lab Environment Quick Setup 2 | =========================== 3 | 4 | Please install vagrant before using vagrant files to quick start. 5 | 6 | Download link: https://www.vagrantup.com/downloads.html 7 | 8 | For what vagrant is and how to use it with virtualbox and vmware fusion, please reference https://www.vagrantup.com/docs/ 9 | 10 | And please install git if you don't have one on your machine(https://git-scm.com/) 11 | 12 | Vagrant with one node docker engine 13 | ----------------------------------- 14 | 15 | we will use vagrant to create one linux virtual machine and install docker automatically. 16 | 17 | .. code-block:: bash 18 | 19 | $ git clone https://github.com/xiaopeng163/docker-k8s-lab 20 | $ cd docker-k8s-lab/lab/docker/single-node 21 | 22 | There are two kinds of Linux, one is Ubuntu18.04, and one is CentOS7, please chose one, for example 23 | 24 | .. code-block:: bash 25 | 26 | $ git clone https://github.com/xiaopeng163/docker-k8s-lab 27 | $ cd docker-k8s-lab/lab/docker/single-node 28 | $ cd vagrant-centos7 29 | $ vagrant up 30 | 31 | ``vagrant up`` will take some time to create a virtual machine, after finished, you can use ``vagrant ssh`` ssh into 32 | this machine. like 33 | 34 | .. code-block:: bash 35 | 36 | $ vagrant status 37 | Current machine states: 38 | 39 | docker-host running (virtualbox) 40 | 41 | The VM is running. To stop this VM, you can run `vagrant halt` to 42 | shut it down forcefully, or you can run `vagrant suspend` to simply 43 | suspend the virtual machine. In either case, to restart it again, 44 | simply run `vagrant up`. 45 | $ vagrant ssh 46 | Last login: Wed Jan 24 14:53:38 2018 from 10.0.2.2 47 | [vagrant@docker-host ~]$ docker version 48 | Client: 49 | Version: 18.01.0-ce 50 | API version: 1.35 51 | Go version: go1.9.2 52 | Git commit: 03596f5 53 | Built: Wed Jan 10 20:07:19 2018 54 | OS/Arch: linux/amd64 55 | Experimental: false 56 | Orchestrator: swarm 57 | 58 | Server: 59 | Engine: 60 | Version: 18.01.0-ce 61 | API version: 1.35 (minimum version 1.12) 62 | Go version: go1.9.2 63 | Git commit: 03596f5 64 | Built: Wed Jan 10 20:10:58 2018 65 | OS/Arch: linux/amd64 66 | Experimental: false 67 | 68 | 69 | Vagrant with two node docker engine 70 | ----------------------------------- 71 | 72 | .. code-block:: bash 73 | 74 | $ git clone https://github.com/xiaopeng163/docker-k8s-lab 75 | $ cd docker-k8s-lab/lab/docker/multi-node/vagrant 76 | $ vagrant up 77 | Bringing machine 'docker-node1' up with 'virtualbox' provider... 78 | Bringing machine 'docker-node2' up with 'virtualbox' provider... 79 | ==> docker-node1: Importing base box 'ubuntu/bionic64'... 80 | ==> docker-node1: Matching MAC address for NAT networking... 81 | ==> docker-node1: Checking if box 'ubuntu/bionic64' is up to date... 82 | ...... 83 | 84 | The first time you run ``vagrant up`` will take some time to finished creating the virtual machine, and the time will depend on 85 | your network connection situation. 86 | 87 | It will create two ubuntu 18.04 VMs based on the base box from the internet, and provision them. 88 | 89 | We can use ``vagrant ssh`` to access each node: 90 | 91 | .. code-block:: bash 92 | 93 | $ vagrant status 94 | Current machine states: 95 | 96 | docker-node1 running (virtualbox) 97 | docker-node2 running (virtualbox) 98 | 99 | This environment represents multiple VMs. The VMs are all listed 100 | above with their current state. For more information about a specific 101 | VM, run `vagrant status NAME`. 102 | $ vagrant ssh docker-node1 103 | Welcome to Ubuntu 18.04 LTS (GNU/Linux 4.4.0-51-generic x86_64) 104 | 105 | * Documentation: https://help.ubuntu.com 106 | * Management: https://landscape.canonical.com 107 | * Support: https://ubuntu.com/advantage 108 | 109 | Get cloud support with Ubuntu Advantage Cloud Guest: 110 | http://www.ubuntu.com/business/services/cloud 111 | 112 | 0 packages can be updated. 113 | 0 updates are security updates. 114 | 115 | 116 | Last login: Mon Dec 5 05:46:16 2016 from 10.0.2.2 117 | ubuntu@docker-node1:~$ docker run -d --name test2 hello-world 118 | Unable to find image 'hello-world:latest' locally 119 | latest: Pulling from library/hello-world 120 | c04b14da8d14: Pull complete 121 | Digest: sha256:0256e8a36e2070f7bf2d0b0763dbabdd67798512411de4cdcf9431a1feb60fd9 122 | Status: Downloaded newer image for hello-world:latest 123 | 52af64b1a65e3270cd525095974d70538fa9cf382a16123972312b72e858f57e 124 | ubuntu@docker-node1:~$ 125 | 126 | 127 | You can play with docker now ~~ 128 | 129 | If you want to recovery your environment, just: 130 | 131 | .. code-block:: bash 132 | 133 | $ vagrant halt 134 | $ vagrant destroy 135 | $ vagrant up 136 | -------------------------------------------------------------------------------- /lab/docker/multi-node/vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | # 4 | Vagrant.require_version ">= 1.6.0" 5 | 6 | boxes = [ 7 | { 8 | :name => "docker-node1", 9 | :eth1 => "192.168.205.10", 10 | :mem => "1024", 11 | :cpu => "1" 12 | }, 13 | { 14 | :name => "docker-node2", 15 | :eth1 => "192.168.205.11", 16 | :mem => "1024", 17 | :cpu => "1" 18 | } 19 | ] 20 | 21 | Vagrant.configure(2) do |config| 22 | 23 | config.vm.box = "ubuntu/bionic64" 24 | 25 | boxes.each do |opts| 26 | config.vm.define opts[:name] do |config| 27 | config.vm.hostname = opts[:name] 28 | 29 | config.vm.provider "vmware_fusion" do |v| 30 | v.vmx["memsize"] = opts[:mem] 31 | v.vmx["numvcpus"] = opts[:cpu] 32 | end 33 | 34 | config.vm.provider "virtualbox" do |v| 35 | v.customize ["modifyvm", :id, "--memory", opts[:mem]] 36 | v.customize ["modifyvm", :id, "--cpus", opts[:cpu]] 37 | end 38 | 39 | config.vm.network :private_network, ip: opts[:eth1] 40 | end 41 | end 42 | config.vm.provision "shell", privileged: false, path: "./setup.sh" 43 | end 44 | -------------------------------------------------------------------------------- /lab/docker/multi-node/vagrant/setup.sh: -------------------------------------------------------------------------------- 1 | #/bin/sh 2 | 3 | # install some tools 4 | sudo apt-get install -y git vim gcc build-essential telnet 5 | 6 | # install docker 7 | curl -fsSL get.docker.com -o get-docker.sh 8 | sh get-docker.sh 9 | 10 | if [ ! $(getent group docker) ]; 11 | then 12 | sudo groupadd docker; 13 | else 14 | echo "docker user group already exists" 15 | fi 16 | 17 | sudo gpasswd -a $USER docker 18 | sudo service docker restart 19 | 20 | rm -rf get-docker.sh 21 | 22 | # open password auth for backup if ssh key doesn't work, bydefault, username=vagrant password=vagrant 23 | # sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config 24 | # sudo service sshd restart 25 | -------------------------------------------------------------------------------- /lab/docker/single-node/vagrant-centos7/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.require_version ">= 1.6.0" 5 | 6 | boxes = [ 7 | { 8 | :name => "docker-host", 9 | :eth1 => "192.168.205.10", 10 | :mem => "1024", 11 | :cpu => "1" 12 | } 13 | ] 14 | 15 | Vagrant.configure(2) do |config| 16 | 17 | config.vm.box = "centos/7" 18 | boxes.each do |opts| 19 | config.vm.define opts[:name] do |config| 20 | config.vm.hostname = opts[:name] 21 | config.vm.provider "vmware_fusion" do |v| 22 | v.vmx["memsize"] = opts[:mem] 23 | v.vmx["numvcpus"] = opts[:cpu] 24 | end 25 | config.vm.provider "virtualbox" do |v| 26 | v.customize ["modifyvm", :id, "--memory", opts[:mem]] 27 | v.customize ["modifyvm", :id, "--cpus", opts[:cpu]] 28 | end 29 | config.vm.network :private_network, ip: opts[:eth1] 30 | end 31 | end 32 | config.vm.provision "shell", privileged: false, path: "./setup.sh" 33 | end 34 | -------------------------------------------------------------------------------- /lab/docker/single-node/vagrant-centos7/setup.sh: -------------------------------------------------------------------------------- 1 | #/bin/sh 2 | 3 | # install some tools 4 | sudo yum install -y git vim gcc glibc-static telnet psmisc 5 | 6 | # install docker 7 | curl -fsSL get.docker.com -o get-docker.sh 8 | sh get-docker.sh 9 | 10 | if [ ! $(getent group docker) ]; then 11 | sudo groupadd docker 12 | else 13 | echo "docker user group already exists" 14 | fi 15 | 16 | sudo gpasswd -a $USER docker 17 | sudo systemctl start docker 18 | 19 | rm -rf get-docker.sh 20 | 21 | # open password auth for backup if ssh key doesn't work, bydefault, username=vagrant password=vagrant 22 | sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config 23 | sudo systemctl restart sshd 24 | -------------------------------------------------------------------------------- /lab/docker/single-node/vagrant-centos8/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.require_version ">= 1.6.0" 5 | 6 | boxes = [ 7 | { 8 | :name => "docker-host", 9 | :eth1 => "192.168.205.10", 10 | :mem => "1024", 11 | :cpu => "1" 12 | } 13 | ] 14 | 15 | Vagrant.configure(2) do |config| 16 | 17 | config.vm.box = "centos/8" 18 | boxes.each do |opts| 19 | config.vm.define opts[:name] do |config| 20 | config.vm.hostname = opts[:name] 21 | config.vm.provider "vmware_fusion" do |v| 22 | v.vmx["memsize"] = opts[:mem] 23 | v.vmx["numvcpus"] = opts[:cpu] 24 | end 25 | config.vm.provider "virtualbox" do |v| 26 | v.customize ["modifyvm", :id, "--memory", opts[:mem]] 27 | v.customize ["modifyvm", :id, "--cpus", opts[:cpu]] 28 | end 29 | config.vm.network :private_network, ip: opts[:eth1] 30 | end 31 | end 32 | config.vm.provision "shell", privileged: false, path: "./setup.sh" 33 | end 34 | -------------------------------------------------------------------------------- /lab/docker/single-node/vagrant-centos8/setup.sh: -------------------------------------------------------------------------------- 1 | #/bin/sh 2 | 3 | # install some tools 4 | sudo yum install -y git vim gcc telnet psmisc 5 | 6 | # install docker 7 | curl -fsSL get.docker.com -o get-docker.sh 8 | sh get-docker.sh 9 | 10 | if [ ! $(getent group docker) ]; then 11 | sudo groupadd docker 12 | else 13 | echo "docker user group already exists" 14 | fi 15 | 16 | sudo gpasswd -a $USER docker 17 | sudo systemctl start docker 18 | 19 | rm -rf get-docker.sh 20 | 21 | # open password auth for backup if ssh key doesn't work, bydefault, username=vagrant password=vagrant 22 | sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config 23 | sudo systemctl restart sshd 24 | -------------------------------------------------------------------------------- /lab/docker/single-node/vagrant-ubuntu18.04/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | # 4 | Vagrant.require_version ">= 1.6.0" 5 | 6 | boxes = [ 7 | { 8 | :name => "docker-host", 9 | :eth1 => "192.168.205.10", 10 | :mem => "1024", 11 | :cpu => "1" 12 | } 13 | ] 14 | 15 | Vagrant.configure(2) do |config| 16 | config.vm.box = "ubuntu/bionic64" 17 | 18 | boxes.each do |opts| 19 | config.vm.define opts[:name] do |config| 20 | config.vm.hostname = opts[:name] 21 | 22 | config.vm.provider "vmware_fusion" do |v| 23 | v.vmx["memsize"] = opts[:mem] 24 | v.vmx["numvcpus"] = opts[:cpu] 25 | end 26 | 27 | config.vm.provider "virtualbox" do |v| 28 | v.customize ["modifyvm", :id, "--memory", opts[:mem]] 29 | v.customize ["modifyvm", :id, "--cpus", opts[:cpu]] 30 | end 31 | 32 | config.vm.network :private_network, ip: opts[:eth1] 33 | end 34 | end 35 | config.vm.provision "shell", privileged: false, path: "./setup.sh" 36 | end 37 | -------------------------------------------------------------------------------- /lab/docker/single-node/vagrant-ubuntu18.04/setup.sh: -------------------------------------------------------------------------------- 1 | #/bin/sh 2 | 3 | # install some tools 4 | sudo apt-get install -y git vim gcc build-essential telnet 5 | 6 | # install docker 7 | curl -fsSL get.docker.com -o get-docker.sh 8 | sh get-docker.sh 9 | 10 | if [ ! $(getent group docker) ]; 11 | then 12 | sudo groupadd docker; 13 | else 14 | echo "docker user group already exists" 15 | fi 16 | 17 | sudo gpasswd -a $USER docker 18 | sudo service docker restart 19 | 20 | rm -rf get-docker.sh 21 | 22 | # open password auth for backup if ssh key doesn't work, bydefault, username=vagrant password=vagrant 23 | # sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config 24 | # sudo service sshd restart 25 | -------------------------------------------------------------------------------- /lab/docker/single-node/vagrant-ubuntu20.04/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | # 4 | Vagrant.require_version ">= 1.6.0" 5 | 6 | boxes = [ 7 | { 8 | :name => "docker-host", 9 | :eth1 => "192.168.205.10", 10 | :mem => "1024", 11 | :cpu => "1" 12 | } 13 | ] 14 | 15 | Vagrant.configure(2) do |config| 16 | config.vm.box = "ubuntu/focal64" 17 | 18 | boxes.each do |opts| 19 | config.vm.define opts[:name] do |config| 20 | config.vm.hostname = opts[:name] 21 | 22 | config.vm.provider "vmware_fusion" do |v| 23 | v.vmx["memsize"] = opts[:mem] 24 | v.vmx["numvcpus"] = opts[:cpu] 25 | end 26 | 27 | config.vm.provider "virtualbox" do |v| 28 | v.customize ["modifyvm", :id, "--memory", opts[:mem]] 29 | v.customize ["modifyvm", :id, "--cpus", opts[:cpu]] 30 | end 31 | 32 | config.vm.network :private_network, ip: opts[:eth1] 33 | end 34 | end 35 | config.vm.provision "shell", privileged: false, path: "./setup.sh" 36 | end 37 | -------------------------------------------------------------------------------- /lab/docker/single-node/vagrant-ubuntu20.04/setup.sh: -------------------------------------------------------------------------------- 1 | #/bin/sh 2 | 3 | # install some tools 4 | sudo apt-get install -y git vim gcc build-essential telnet 5 | 6 | # install docker 7 | curl -fsSL get.docker.com -o get-docker.sh 8 | sh get-docker.sh 9 | 10 | if [ ! $(getent group docker) ]; 11 | then 12 | sudo groupadd docker; 13 | else 14 | echo "docker user group already exists" 15 | fi 16 | 17 | sudo gpasswd -a $USER docker 18 | sudo service docker restart 19 | 20 | rm -rf get-docker.sh 21 | 22 | # open password auth for backup if ssh key doesn't work, bydefault, username=vagrant password=vagrant 23 | # sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config 24 | # sudo service sshd restart 25 | -------------------------------------------------------------------------------- /lab/k8s/multi-node/vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | # 4 | Vagrant.require_version ">= 1.6.0" 5 | 6 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 7 | # configures the configuration version (we support older styles for 8 | # backwards compatibility). Please don't change it unless you know what 9 | # you're doing. 10 | 11 | 12 | boxes = [ 13 | { 14 | :name => "k8s-master", 15 | :eth1 => "192.168.205.10", 16 | :mem => "1024", 17 | :cpu => "1" 18 | }, 19 | { 20 | :name => "k8s-worker1", 21 | :eth1 => "192.168.205.11", 22 | :mem => "1024", 23 | :cpu => "1" 24 | }, 25 | { 26 | :name => "k8s-worker2", 27 | :eth1 => "192.168.205.12", 28 | :mem => "1024", 29 | :cpu => "1" 30 | } 31 | ] 32 | 33 | Vagrant.configure(2) do |config| 34 | # The most common configuration options are documented and commented below. 35 | # For a complete reference, please see the online documentation at 36 | # https://docs.vagrantup.com. 37 | 38 | # Every Vagrant development environment requires a box. You can search for 39 | # boxes at https://atlas.hashicorp.com/search. 40 | config.vm.box = "ubuntu/xenial64" 41 | 42 | boxes.each do |opts| 43 | config.vm.define opts[:name] do |config| 44 | config.vm.hostname = opts[:name] 45 | 46 | config.vm.provider "vmware_fusion" do |v| 47 | v.vmx["memsize"] = opts[:mem] 48 | v.vmx["numvcpus"] = opts[:cpu] 49 | end 50 | 51 | config.vm.provider "virtualbox" do |v| 52 | v.customize ["modifyvm", :id, "--memory", opts[:mem]] 53 | v.customize ["modifyvm", :id, "--cpus", opts[:cpu]] 54 | end 55 | 56 | config.vm.network :private_network, ip: opts[:eth1] 57 | end 58 | end 59 | # Disable automatic box update checking. If you disable this, then 60 | # boxes will only be checked for updates when the user runs 61 | # `vagrant box outdated`. This is not recommended. 62 | # config.vm.box_check_update = false 63 | 64 | # Create a forwarded port mapping which allows access to a specific port 65 | # within the machine from a port on the host machine. In the example below, 66 | # accessing "localhost:8080" will access port 80 on the guest machine. 67 | # config.vm.network "forwarded_port", guest: 80, host: 8080 68 | 69 | # Create a private network, which allows host-only access to the machine 70 | # using a specific IP. 71 | # config.vm.network "private_network", ip: "192.168.33.10" 72 | 73 | # Create a public network, which generally matched to bridged network. 74 | # Bridged networks make the machine appear as another physical device on 75 | # your network. 76 | # config.vm.network "public_network" 77 | 78 | # Share an additional folder to the guest VM. The first argument is 79 | # the path on the host to the actual folder. The second argument is 80 | # the path on the guest to mount the folder. And the optional third 81 | # argument is a set of non-required options. 82 | # config.vm.synced_folder "../data", "/vagrant_data" 83 | 84 | # Provider-specific configuration so you can fine-tune various 85 | # backing providers for Vagrant. These expose provider-specific options. 86 | # Example for VirtualBox: 87 | # 88 | # config.vm.provider "virtualbox" do |vb| 89 | # # Display the VirtualBox GUI when booting the machine 90 | # vb.gui = true 91 | # 92 | # # Customize the amount of memory on the VM: 93 | # vb.memory = "1024" 94 | # end 95 | # 96 | # View the documentation for the provider you are using for more 97 | # information on available options. 98 | 99 | # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies 100 | # such as FTP and Heroku are also available. See the documentation at 101 | # https://docs.vagrantup.com/v2/push/atlas.html for more information. 102 | # config.push.define "atlas" do |push| 103 | # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" 104 | # end 105 | 106 | # Enable provisioning with a shell script. Additional provisioners such as 107 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the 108 | # documentation for more information about their specific syntax and use. 109 | config.vm.provision "shell", inline: <<-SHELL 110 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 111 | echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" >> ~/kubernetes.list 112 | sudo mv ~/kubernetes.list /etc/apt/sources.list.d 113 | sudo apt-get update 114 | # Install docker if you don't have it already. 115 | sudo apt-get install -y docker.io 116 | apt-get install -y kubelet kubeadm kubectl kubernetes-cni 117 | SHELL 118 | end 119 | -------------------------------------------------------------------------------- /lab/podman/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.require_version ">= 1.6.0" 5 | 6 | boxes = [ 7 | { 8 | :name => "docker-host", 9 | :eth1 => "192.168.205.10", 10 | :script => "./docker.sh" 11 | }, 12 | { 13 | :name => "podman-host", 14 | :eth1 => "192.168.205.11", 15 | :script => './podman.sh' 16 | } 17 | ] 18 | 19 | Vagrant.configure(2) do |config| 20 | 21 | config.vm.box = "centos/7" 22 | boxes.each do |opts| 23 | config.vm.define opts[:name] do |config| 24 | config.vm.hostname = opts[:name] 25 | config.vm.network :private_network, ip: opts[:eth1] 26 | config.vm.provision "shell", privileged: false, path: opts[:script] 27 | end 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /lab/podman/docker.sh: -------------------------------------------------------------------------------- 1 | #/bin/sh 2 | 3 | # install some tools 4 | sudo yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 5 | sudo yum install -y git vim gcc telnet psmisc jq 6 | 7 | # install docker 8 | curl -fsSL get.docker.com -o get-docker.sh 9 | sh get-docker.sh 10 | 11 | # if [ ! $(getent group docker) ]; then 12 | # sudo groupadd docker 13 | # else 14 | # echo "docker user group already exists" 15 | # fi 16 | 17 | # sudo gpasswd -a $USER docker 18 | # sudo systemctl start docker 19 | 20 | rm -rf get-docker.sh 21 | -------------------------------------------------------------------------------- /lab/podman/podman.sh: -------------------------------------------------------------------------------- 1 | sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_7/devel:kubic:libcontainers:stable.repo 2 | sudo yum -y install podman 3 | sudo echo 'vagrant:100000:65536' >>/etc/subuid 4 | sudo echo 'vagrant:100000:65536' >>/etc/subgid 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Sphinx==1.4.8 2 | sphinx-rtd-theme==0.1.9 3 | --------------------------------------------------------------------------------