├── .github
└── workflows
│ └── workflow.yaml
├── .gitignore
├── CODE_OF_CONDUCT.md
├── Pipfile
├── Pipfile.lock
├── Procfile
├── README.md
├── Staticfile
├── bin
├── gh-md-toc
├── replace-toc
└── wc-summary
├── ci
├── pipeline.yml
├── repipe
├── scripts
│ └── mkdocs-build
└── settings.yml
├── deployment
├── prod-values.yml
└── staging-values.yml
├── docs
├── ask-for-help
│ └── suggestions.md
├── cloud-config-updates.md
├── complete-deployment-manifest.md
├── deployment-manifests-part-1.md
├── deployment-updates.md
├── deployments.md
├── disks.md
├── images
│ ├── aws
│ │ ├── aws-public-ami.png
│ │ └── aws-subnet.png
│ ├── bookcover
│ │ ├── book-cover-being-read.png
│ │ ├── book-cover-library.png
│ │ ├── bookcover-3d.png
│ │ └── bookcover.png
│ ├── bosh-io-stemcell-sizes.png
│ ├── favicon
│ │ ├── browserconfig.xml
│ │ ├── favicon-114.png
│ │ ├── favicon-120.png
│ │ ├── favicon-144.png
│ │ ├── favicon-150.png
│ │ ├── favicon-152.png
│ │ ├── favicon-16.png
│ │ ├── favicon-160.png
│ │ ├── favicon-180.png
│ │ ├── favicon-192.png
│ │ ├── favicon-310.png
│ │ ├── favicon-32.png
│ │ ├── favicon-57.png
│ │ ├── favicon-60.png
│ │ ├── favicon-64.png
│ │ ├── favicon-70.png
│ │ ├── favicon-72.png
│ │ ├── favicon-76.png
│ │ ├── favicon-96.png
│ │ ├── favicon.ico
│ │ └── faviconit-instructions.txt
│ ├── gcp
│ │ ├── gcp-vpc-networks-bosh.png
│ │ ├── gcp-vpc-networks-firewall-rules.png
│ │ └── gcp-vpc-networks-route-details.png
│ ├── handdrawn
│ │ └── app-stack.jpg
│ ├── swlogo.png
│ ├── virtualbox
│ │ ├── vbox-no-envs.png
│ │ └── vbox-running-bosh-env.png
│ ├── zookeeper-deployment-aws.png
│ └── zookeeper-deployment-google.png
├── index.md
├── instances.md
├── introduction.md
├── networking.md
├── print.css
├── properties.md
├── releases.md
├── service-discovery.md
├── spread-the-word
│ └── meetups.md
├── stemcells.md
├── stylesheets
│ └── extra.css
├── targeting-bosh-envs.md
├── todo.md
├── tutorials
│ └── bosh-lite-virtualbox.md
└── why-bosh.md
├── manifest.yml
├── mkdocs.yml
├── paketo
└── pack-build
├── pip.env
└── theme-overrides
├── main.html
└── partials
└── footer.html
/.github/workflows/workflow.yaml:
--------------------------------------------------------------------------------
1 | on:
2 | push:
3 | branches:
4 | - master
5 | - staging
6 |
7 | jobs:
8 | DeployStaging:
9 | name: Deploy Staging
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout branch
13 | uses: actions/checkout@v2
14 |
15 | - name: Save Kubeconfig
16 | env:
17 | KUBE_CONFIG: ${{ secrets.KUBE_CONFIG }}
18 | run: echo "$KUBE_CONFIG" > $GITHUB_WORKSPACE/.kubeconfig
19 |
20 | - name: Setup Helm
21 | uses: stefanprodan/kube-tools@v1
22 | with:
23 | command: |
24 | helmv3 repo add starkandwayne https://helm.starkandwayne.com
25 | helmv3 repo update
26 |
27 | - name: Deploy to K8s
28 | uses: stefanprodan/kube-tools@v1
29 | env:
30 | KUBECONFIG: ${{ github.workspace }}/.kubeconfig
31 | with:
32 | command: |
33 | helmv3 -n ultimateguides-staging upgrade --install ultimateguidetobosh starkandwayne/git-website -f $GITHUB_WORKSPACE/deployment/staging-values.yml --set app.commitSHA=${{ github.sha }}
34 |
35 | - name: Smoke Test
36 | uses: stefanprodan/kube-tools@v1
37 | env:
38 | KUBECONFIG: ${{ github.workspace }}/.kubeconfig
39 | with:
40 | command: |
41 | kubectl -n ultimateguides-staging rollout status deployment/ultimateguidetobosh-website
42 |
43 | DeployProd:
44 | name: Deploy Prod
45 | needs: [DeployStaging]
46 | if: github.ref == 'refs/heads/master'
47 | runs-on: ubuntu-latest
48 | steps:
49 | - name: Checkout branch
50 | uses: actions/checkout@v2
51 |
52 | - name: Save Kubeconfig
53 | env:
54 | KUBE_CONFIG: ${{ secrets.KUBE_CONFIG }}
55 | run: echo "$KUBE_CONFIG" > $GITHUB_WORKSPACE/.kubeconfig
56 |
57 | - name: Setup Helm
58 | uses: stefanprodan/kube-tools@v1
59 | with:
60 | command: |
61 | helmv3 repo add starkandwayne https://helm.starkandwayne.com
62 | helmv3 repo update
63 |
64 | - name: Deploy to K8s
65 | uses: stefanprodan/kube-tools@v1
66 | env:
67 | KUBECONFIG: ${{ github.workspace }}/.kubeconfig
68 | with:
69 | command: |
70 | helmv3 -n ultimateguides-prod upgrade --install ultimateguidetobosh starkandwayne/git-website -f $GITHUB_WORKSPACE/deployment/prod-values.yml --set app.commitSHA=${{ github.sha }}
71 |
72 | - name: Smoke Test
73 | uses: stefanprodan/kube-tools@v1
74 | env:
75 | KUBECONFIG: ${{ github.workspace }}/.kubeconfig
76 | with:
77 | command: |
78 | kubectl -n ultimateguides-prod rollout status deployment/ultimateguidetobosh-website
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | tmp
3 | site
4 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | ## Code of Conduct
2 |
3 | ### Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to making participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, gender identity and expression, level of experience,
9 | nationality, personal appearance, race, religion, or sexual identity and
10 | orientation.
11 |
12 | ### Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | * Using welcoming and inclusive language
18 | * Being respectful of differing viewpoints and experiences
19 | * Gracefully accepting constructive criticism
20 | * Focusing on what is best for the community
21 | * Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | * The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | * Trolling, insulting/derogatory comments, and personal or political attacks
28 | * Public or private harassment
29 | * Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | * Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ### Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, pull requests, and other
42 | contributions that are not aligned to this Code of Conduct, or to ban
43 | temporarily or permanently any contributor for other behaviors that they deem
44 | inappropriate, threatening, offensive, or harmful.
45 |
46 | ### Scope
47 |
48 | This Code of Conduct applies both within project spaces and in public spaces
49 | when an individual is representing the project or its community. Examples of
50 | representing a project or community include using an official project e-mail
51 | address, posting via an official social media account, or acting as an appointed
52 | representative at an online or offline event. Representation of a project may be
53 | further defined and clarified by project maintainers.
54 |
55 | ### Enforcement
56 |
57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
58 | reported by contacting the project team at conduct@starkandwayne.com. All
59 | complaints will be reviewed and investigated and will result in a response that
60 | is deemed necessary and appropriate to the circumstances. The project team is
61 | obligated to maintain confidentiality with regard to the reporter of an incident.
62 | Further details of specific enforcement policies may be posted separately.
63 |
64 | Project maintainers who do not follow or enforce the Code of Conduct in good
65 | faith may face temporary or permanent repercussions as determined by other
66 | members of the project's leadership.
67 |
68 | ### Attribution
69 |
70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71 | available at [http://contributor-covenant.org/version/1/4][version]
72 |
73 | [homepage]: http://contributor-covenant.org
74 | [version]: http://contributor-covenant.org/version/1/4/
--------------------------------------------------------------------------------
/Pipfile:
--------------------------------------------------------------------------------
1 | [[source]]
2 | name = "pypi"
3 | url = "https://pypi.org/simple"
4 | verify_ssl = true
5 |
6 | [dev-packages]
7 |
8 | [packages]
9 | pymdown-extensions = "*"
10 | markdown = "*"
11 | mkdocs = "*"
12 | mkdocs-material = "*"
13 |
14 | [requires]
15 | python_version = "3"
16 |
--------------------------------------------------------------------------------
/Pipfile.lock:
--------------------------------------------------------------------------------
1 | {
2 | "_meta": {
3 | "hash": {
4 | "sha256": "62258e442f2765f4807388db3ba9a8d30b0a927f2c35e94183bd88dd48214bef"
5 | },
6 | "pipfile-spec": 6,
7 | "requires": {
8 | "python_version": "3"
9 | },
10 | "sources": [
11 | {
12 | "name": "pypi",
13 | "url": "https://pypi.org/simple",
14 | "verify_ssl": true
15 | }
16 | ]
17 | },
18 | "default": {
19 | "click": {
20 | "hashes": [
21 | "sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3",
22 | "sha256:410e932b050f5eed773c4cda94de75971c89cdb3155a72a0831139a79e5ecb5b"
23 | ],
24 | "markers": "python_version >= '3.6'",
25 | "version": "==8.0.3"
26 | },
27 | "ghp-import": {
28 | "hashes": [
29 | "sha256:5f8962b30b20652cdffa9c5a9812f7de6bcb56ec475acac579807719bf242c46",
30 | "sha256:947b3771f11be850c852c64b561c600fdddf794bab363060854c1ee7ad05e071"
31 | ],
32 | "version": "==2.0.2"
33 | },
34 | "importlib-metadata": {
35 | "hashes": [
36 | "sha256:53ccfd5c134223e497627b9815d5030edf77d2ed573922f7a0b8f8bb81a1c100",
37 | "sha256:75bdec14c397f528724c1bfd9709d660b33a4d2e77387a3358f20b848bb5e5fb"
38 | ],
39 | "markers": "python_version >= '3.6'",
40 | "version": "==4.8.2"
41 | },
42 | "jinja2": {
43 | "hashes": [
44 | "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8",
45 | "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"
46 | ],
47 | "markers": "python_version >= '3.6'",
48 | "version": "==3.0.3"
49 | },
50 | "markdown": {
51 | "hashes": [
52 | "sha256:1fafe3f1ecabfb514a5285fca634a53c1b32a81cb0feb154264d55bf2ff22c17",
53 | "sha256:c467cd6233885534bf0fe96e62e3cf46cfc1605112356c4f9981512b8174de59"
54 | ],
55 | "index": "pypi",
56 | "version": "==3.2.2"
57 | },
58 | "markupsafe": {
59 | "hashes": [
60 | "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298",
61 | "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64",
62 | "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b",
63 | "sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194",
64 | "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567",
65 | "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff",
66 | "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724",
67 | "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74",
68 | "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646",
69 | "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35",
70 | "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6",
71 | "sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a",
72 | "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6",
73 | "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad",
74 | "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26",
75 | "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38",
76 | "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac",
77 | "sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7",
78 | "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6",
79 | "sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047",
80 | "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75",
81 | "sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f",
82 | "sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b",
83 | "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135",
84 | "sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8",
85 | "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a",
86 | "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a",
87 | "sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1",
88 | "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9",
89 | "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864",
90 | "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914",
91 | "sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee",
92 | "sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f",
93 | "sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18",
94 | "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8",
95 | "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2",
96 | "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d",
97 | "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b",
98 | "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b",
99 | "sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86",
100 | "sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6",
101 | "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f",
102 | "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb",
103 | "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833",
104 | "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28",
105 | "sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e",
106 | "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415",
107 | "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902",
108 | "sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f",
109 | "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d",
110 | "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9",
111 | "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d",
112 | "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145",
113 | "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066",
114 | "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c",
115 | "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1",
116 | "sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a",
117 | "sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207",
118 | "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f",
119 | "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53",
120 | "sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd",
121 | "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134",
122 | "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85",
123 | "sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9",
124 | "sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5",
125 | "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94",
126 | "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509",
127 | "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51",
128 | "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"
129 | ],
130 | "markers": "python_version >= '3.6'",
131 | "version": "==2.0.1"
132 | },
133 | "mergedeep": {
134 | "hashes": [
135 | "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8",
136 | "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"
137 | ],
138 | "markers": "python_version >= '3.6'",
139 | "version": "==1.3.4"
140 | },
141 | "mkdocs": {
142 | "hashes": [
143 | "sha256:89f5a094764381cda656af4298727c9f53dc3e602983087e1fe96ea1df24f4c1",
144 | "sha256:a1fa8c2d0c1305d7fc2b9d9f607c71778572a8b110fb26642aa00296c9e6d072"
145 | ],
146 | "index": "pypi",
147 | "version": "==1.2.3"
148 | },
149 | "mkdocs-material": {
150 | "hashes": [
151 | "sha256:79bf2951e5b9d435b8e1c0c3340a642f8359d90656b9344e64d9a2b7b6d2a1bb",
152 | "sha256:8facb50db21fc0152ea5459ef67298512370503816dd2a6a2887f67330f61ed2"
153 | ],
154 | "index": "pypi",
155 | "version": "==6.0.1"
156 | },
157 | "mkdocs-material-extensions": {
158 | "hashes": [
159 | "sha256:a82b70e533ce060b2a5d9eb2bc2e1be201cf61f901f93704b4acf6e3d5983a44",
160 | "sha256:bfd24dfdef7b41c312ede42648f9eb83476ea168ec163b613f9abd12bbfddba2"
161 | ],
162 | "markers": "python_version >= '3.6'",
163 | "version": "==1.0.3"
164 | },
165 | "packaging": {
166 | "hashes": [
167 | "sha256:096d689d78ca690e4cd8a89568ba06d07ca097e3306a4381635073ca91479966",
168 | "sha256:14317396d1e8cdb122989b916fa2c7e9ca8e2be9e8060a6eff75b6b7b4d8a7e0"
169 | ],
170 | "markers": "python_version >= '3.6'",
171 | "version": "==21.2"
172 | },
173 | "pygments": {
174 | "hashes": [
175 | "sha256:b8e67fe6af78f492b3c4b3e2970c0624cbf08beb1e493b2c99b9fa1b67a20380",
176 | "sha256:f398865f7eb6874156579fdf36bc840a03cab64d1cde9e93d68f46a425ec52c6"
177 | ],
178 | "markers": "python_version >= '3.5'",
179 | "version": "==2.10.0"
180 | },
181 | "pymdown-extensions": {
182 | "hashes": [
183 | "sha256:9ba704052d4bdc04a7cd63f7db4ef6add73bafcef22c0cf6b2e3386cf4ece51e",
184 | "sha256:a3689c04f4cbddacd9d569425c571ae07e2673cc4df63a26cdbf1abc15229137"
185 | ],
186 | "index": "pypi",
187 | "version": "==8.0.1"
188 | },
189 | "pyparsing": {
190 | "hashes": [
191 | "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
192 | "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
193 | ],
194 | "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
195 | "version": "==2.4.7"
196 | },
197 | "python-dateutil": {
198 | "hashes": [
199 | "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86",
200 | "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"
201 | ],
202 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
203 | "version": "==2.8.2"
204 | },
205 | "pyyaml": {
206 | "hashes": [
207 | "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293",
208 | "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b",
209 | "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57",
210 | "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b",
211 | "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4",
212 | "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07",
213 | "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba",
214 | "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9",
215 | "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287",
216 | "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513",
217 | "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0",
218 | "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0",
219 | "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92",
220 | "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f",
221 | "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2",
222 | "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc",
223 | "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c",
224 | "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86",
225 | "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4",
226 | "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c",
227 | "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34",
228 | "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b",
229 | "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c",
230 | "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb",
231 | "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737",
232 | "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3",
233 | "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d",
234 | "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53",
235 | "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78",
236 | "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803",
237 | "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a",
238 | "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174",
239 | "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"
240 | ],
241 | "markers": "python_version >= '3.6'",
242 | "version": "==6.0"
243 | },
244 | "pyyaml-env-tag": {
245 | "hashes": [
246 | "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb",
247 | "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"
248 | ],
249 | "markers": "python_version >= '3.6'",
250 | "version": "==0.1"
251 | },
252 | "six": {
253 | "hashes": [
254 | "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
255 | "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"
256 | ],
257 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
258 | "version": "==1.16.0"
259 | },
260 | "watchdog": {
261 | "hashes": [
262 | "sha256:25fb5240b195d17de949588628fdf93032ebf163524ef08933db0ea1f99bd685",
263 | "sha256:3386b367e950a11b0568062b70cc026c6f645428a698d33d39e013aaeda4cc04",
264 | "sha256:3becdb380d8916c873ad512f1701f8a92ce79ec6978ffde92919fd18d41da7fb",
265 | "sha256:4ae38bf8ba6f39d5b83f78661273216e7db5b00f08be7592062cb1fc8b8ba542",
266 | "sha256:8047da932432aa32c515ec1447ea79ce578d0559362ca3605f8e9568f844e3c6",
267 | "sha256:8f1c00aa35f504197561060ca4c21d3cc079ba29cf6dd2fe61024c70160c990b",
268 | "sha256:922a69fa533cb0c793b483becaaa0845f655151e7256ec73630a1b2e9ebcb660",
269 | "sha256:9693f35162dc6208d10b10ddf0458cc09ad70c30ba689d9206e02cd836ce28a3",
270 | "sha256:a0f1c7edf116a12f7245be06120b1852275f9506a7d90227648b250755a03923",
271 | "sha256:a36e75df6c767cbf46f61a91c70b3ba71811dfa0aca4a324d9407a06a8b7a2e7",
272 | "sha256:aba5c812f8ee8a3ff3be51887ca2d55fb8e268439ed44110d3846e4229eb0e8b",
273 | "sha256:ad6f1796e37db2223d2a3f302f586f74c72c630b48a9872c1e7ae8e92e0ab669",
274 | "sha256:ae67501c95606072aafa865b6ed47343ac6484472a2f95490ba151f6347acfc2",
275 | "sha256:b2fcf9402fde2672545b139694284dc3b665fd1be660d73eca6805197ef776a3",
276 | "sha256:b52b88021b9541a60531142b0a451baca08d28b74a723d0c99b13c8c8d48d604",
277 | "sha256:b7d336912853d7b77f9b2c24eeed6a5065d0a0cc0d3b6a5a45ad6d1d05fb8cd8",
278 | "sha256:bd9ba4f332cf57b2c1f698be0728c020399ef3040577cde2939f2e045b39c1e5",
279 | "sha256:be9be735f827820a06340dff2ddea1fb7234561fa5e6300a62fe7f54d40546a0",
280 | "sha256:cca7741c0fcc765568350cb139e92b7f9f3c9a08c4f32591d18ab0a6ac9e71b6",
281 | "sha256:d0d19fb2441947b58fbf91336638c2b9f4cc98e05e1045404d7a4cb7cddc7a65",
282 | "sha256:e02794ac791662a5eafc6ffeaf9bcc149035a0e48eb0a9d40a8feb4622605a3d",
283 | "sha256:e0f30db709c939cabf64a6dc5babb276e6d823fd84464ab916f9b9ba5623ca15",
284 | "sha256:e92c2d33858c8f560671b448205a268096e17870dcf60a9bb3ac7bfbafb7f5f9"
285 | ],
286 | "markers": "python_version >= '3.6'",
287 | "version": "==2.1.6"
288 | },
289 | "zipp": {
290 | "hashes": [
291 | "sha256:71c644c5369f4a6e07636f0aa966270449561fcea2e3d6747b8d23efaa9d7832",
292 | "sha256:9fe5ea21568a0a70e50f273397638d39b03353731e6cbbb3fd8502a33fec40bc"
293 | ],
294 | "markers": "python_version >= '3.6'",
295 | "version": "==3.6.0"
296 | }
297 | },
298 | "develop": {}
299 | }
300 |
--------------------------------------------------------------------------------
/Procfile:
--------------------------------------------------------------------------------
1 | build:mkdocs build
2 | web:mkdocs serve
3 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Ultimate Guide to BOSH
2 |
3 | [BOSH](https://bosh.io) is an open source tool for release engineering,
4 | deployment, lifecycle management, and monitoring of distributed systems.
5 |
6 | It's incredible. Huge companies are using it. Tiny companies are using it.
7 | You too could be using it.
8 |
9 | This is the [Ultimate Guide to BOSH](https://ultimateguidetobosh.com).
10 |
11 | 
12 |
13 | It will place you in the middle of daily life with BOSH and gradually guide you
14 | toward understanding, and then deploying your own systems, and then through to
15 | deep understanding. You'll become a raving fan.
16 |
17 | The guide is currently hosted at https://ultimateguidetobosh.com/. Each section
18 | finishes with a Next link to the next section. Press 'f' to activate search
19 | dropdown.
20 |
21 | ## Contributions
22 |
23 | * [CI pipeline](https://ci.starkandwayne.com/teams/main/pipelines/ultimate-guide-to-bosh) deploys new commits to `master` branch to https://ultimateguidetobosh.com
24 |
25 | ## Local Development
26 |
27 | This guide is built using [`mkdocs`](http://www.mkdocs.org/) using the material
28 | theme. Once installed, you can continuously build and serve the tutorial
29 | locally with:
30 |
31 | ```plain
32 | mkdocs serve
33 | ```
34 |
35 | ## Manual deployment
36 |
37 | ```
38 | mkdocs build
39 | cd site
40 |
41 | gsutil -m rsync -r -x '\.git.*' . gs://ultimateguidetobosh-com-website
42 |
43 | ```
44 |
45 | View the site and live changes at https://localhost:8000.
46 |
--------------------------------------------------------------------------------
/Staticfile:
--------------------------------------------------------------------------------
1 | root: site
2 |
--------------------------------------------------------------------------------
/bin/gh-md-toc:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # From https://github.com/ekalinin/github-markdown-toc
4 | #
5 | # Steps:
6 | #
7 | # 1. Download corresponding html file for some README.md:
8 | # curl -s $1
9 | #
10 | # 2. Discard rows where no substring 'user-content-' (github's markup):
11 | # awk '/user-content-/ { ...
12 | #
13 | # 3.1 Get last number in each row like ' ... sitemap.js.*<\/h/)+2, RLENGTH-5)
22 | #
23 | # 5. Find anchor and insert it inside "(...)":
24 | # substr($0, match($0, "href=\"[^\"]+?\" ")+6, RLENGTH-8)
25 | #
26 |
27 | gh_toc_version="0.4.8"
28 |
29 | gh_user_agent="gh-md-toc v$gh_toc_version"
30 |
31 | #
32 | # Download rendered into html README.md by its url.
33 | #
34 | #
35 | gh_toc_load() {
36 | local gh_url=$1
37 |
38 | if type curl &>/dev/null; then
39 | curl --user-agent "$gh_user_agent" -s "$gh_url"
40 | elif type wget &>/dev/null; then
41 | wget --user-agent="$gh_user_agent" -qO- "$gh_url"
42 | else
43 | echo "Please, install 'curl' or 'wget' and try again."
44 | exit 1
45 | fi
46 | }
47 |
48 | #
49 | # Converts local md file into html by GitHub
50 | #
51 | # ➥ curl -X POST --data '{"text": "Hello world github/linguist#1 **cool**, and #1!"}' https://api.github.com/markdown
52 | #
Hello world github/linguist#1 cool, and #1!
'"
53 | gh_toc_md2html() {
54 | local gh_file_md=$1
55 | URL=https://api.github.com/markdown/raw
56 | TOKEN="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/token.txt"
57 | if [ -f "$TOKEN" ]; then
58 | URL="$URL?access_token=$(cat $TOKEN)"
59 | fi
60 | curl -s --user-agent "$gh_user_agent" \
61 | --data-binary @"$gh_file_md" -H "Content-Type:text/plain" \
62 | $URL
63 | }
64 |
65 | #
66 | # Is passed string url
67 | #
68 | gh_is_url() {
69 | case $1 in
70 | https* | http*)
71 | echo "yes";;
72 | *)
73 | echo "no";;
74 | esac
75 | }
76 |
77 | #
78 | # TOC generator
79 | #
80 | gh_toc(){
81 | local gh_src=$1
82 | local gh_src_copy=$1
83 | local gh_ttl_docs=$2
84 |
85 | if [ "$gh_src" = "" ]; then
86 | echo "Please, enter URL or local path for a README.md"
87 | exit 1
88 | fi
89 |
90 |
91 | # Show "TOC" string only if working with one document
92 | if [ "$gh_ttl_docs" = "1" ]; then
93 |
94 | echo "Table of Contents"
95 | echo "================="
96 | echo ""
97 | gh_src_copy=""
98 |
99 | fi
100 |
101 | if [ "$(gh_is_url "$gh_src")" == "yes" ]; then
102 | gh_toc_load "$gh_src" | gh_toc_grab "$gh_src_copy"
103 | else
104 | gh_toc_md2html "$gh_src" | gh_toc_grab "$gh_src_copy"
105 | fi
106 | }
107 |
108 | #
109 | # Grabber of the TOC from rendered html
110 | #
111 | # $1 — a source url of document.
112 | # It's need if TOC is generated for multiple documents.
113 | #
114 | gh_toc_grab() {
115 | # if closed is on the new line, then move it on the prev line
116 | # for example:
117 | # was: The command foo1
118 | #
119 | # became: The command foo1
120 | sed -e ':a' -e 'N' -e '$!ba' -e 's/\n<\/h/<\/h/g' |
121 | # find strings that corresponds to template
122 | grep -E -o '//' | sed 's/<\/code>//' |
125 | # now all rows are like:
126 | # ... .*<\/h/)+2, RLENGTH-5)"](" gh_url substr($0, match($0, "href=\"[^\"]+?\" ")+6, RLENGTH-8) ")"}' | sed 'y/+/ /; s/%/\\x/g')"
131 | }
132 |
133 | #
134 | # Returns filename only from full path or url
135 | #
136 | gh_toc_get_filename() {
137 | echo "${1##*/}"
138 | }
139 |
140 | #
141 | # Options hendlers
142 | #
143 | gh_toc_app() {
144 | local app_name="gh-md-toc"
145 |
146 | if [ "$1" = '--help' ] || [ $# -eq 0 ] ; then
147 | echo "GitHub TOC generator ($app_name): $gh_toc_version"
148 | echo ""
149 | echo "Usage:"
150 | echo " $app_name src [src] Create TOC for a README file (url or local path)"
151 | echo " $app_name - Create TOC for markdown from STDIN"
152 | echo " $app_name --help Show help"
153 | echo " $app_name --version Show version"
154 | return
155 | fi
156 |
157 | if [ "$1" = '--version' ]; then
158 | echo "$gh_toc_version"
159 | return
160 | fi
161 |
162 | if [ "$1" = "-" ]; then
163 | if [ -z "$TMPDIR" ]; then
164 | TMPDIR="/tmp"
165 | elif [ -n "$TMPDIR" -a ! -d "$TMPDIR" ]; then
166 | mkdir -p "$TMPDIR"
167 | fi
168 | local gh_tmp_md
169 | gh_tmp_md=$(mktemp $TMPDIR/tmp.XXXXXX)
170 | while read input; do
171 | echo "$input" >> "$gh_tmp_md"
172 | done
173 | gh_toc_md2html "$gh_tmp_md" | gh_toc_grab ""
174 | return
175 | fi
176 |
177 | for md in "$@"
178 | do
179 | echo ""
180 | gh_toc "$md" "$#"
181 | done
182 |
183 | echo ""
184 | echo "Created by [gh-md-toc](https://github.com/ekalinin/github-markdown-toc)"
185 | }
186 |
187 | #
188 | # Entry point
189 | #
190 | gh_toc_app "$@"
191 |
--------------------------------------------------------------------------------
/bin/replace-toc:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
4 | cd $DIR/..
5 |
6 | toc=$(cat README.md | bin/gh-md-toc -)
7 |
8 | perl -0777 -i -pe "s/(# TOC\\n).*(\\n# Introduction)/\$1\n${toc}\n\nNOTE: update TOC using \`bin\/replace-toc\`\n\$2/s" README.md
9 |
--------------------------------------------------------------------------------
/bin/wc-summary:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Shows how many words added (to README.md) today
4 |
5 | RED='\033[0;31m'
6 | GREEN='\033[0;32m'
7 | NC='\033[0m' # No Color
8 |
9 | last_commit_yesterday=$(git log --since=yesterday.midnight --before today.midnight --oneline | head -n1 | awk '{print $1}')
10 | if [[ -z $last_commit_yesterday ]]; then
11 | last_commit_yesterday=$(git log --since "1 days ago" --oneline | tail -n1 | awk '{print $1}')
12 | fi
13 | wordcount_head=$(git show ${last_commit_yesterday}:mkdocs.yml | grep ".md$" | sed -e "s%.*:%%" | xargs -L1 -I {} echo "${last_commit_yesterday}:docs/{}" | paste -sd " " - | xargs git show | wc -w | awk '{print $1}')
14 | wordcount_latest=$(cat docs/*.md docs/**/*.md | wc -w | awk '{print $1}')
15 | wordcount_diff=`expr $wordcount_latest - $wordcount_head`
16 |
17 | echo "${wordcount_latest} words total"
18 | colour=$NC
19 | if [[ $wordcount_diff -gt 0 ]]; then
20 | printf "${GREEN}${wordcount_diff}${NC} words added today\n"
21 | elif [[ $wordcount_diff -lt 0 ]]; then
22 | printf "${RED}${wordcount_diff}${NC} words removed today\n"
23 | fi
24 |
--------------------------------------------------------------------------------
/ci/pipeline.yml:
--------------------------------------------------------------------------------
1 | ---
2 | meta:
3 | name: (( param "Please name your pipeline" ))
4 | pipeline: (( grab meta.name ))
5 | target: (( param "Please identify the name of the target Concourse CI" ))
6 | # url: (( param "Please specify the full url of the target Concourse CI" ))
7 |
8 | image:
9 | name: starkandwayne/concourse
10 | tag: latest
11 |
12 | github:
13 | uri: (( concat "git@github.com:" meta.github.owner "/" meta.github.repo ))
14 | owner: (( param "Please specify the name of the user / organization that owns the Github repository" ))
15 | repo: (( param "Please specify the name of the Github repository" ))
16 | branch: master
17 | private_key: (( param "Please generate an SSH Deployment Key for this repo and specify it here" ))
18 | access_token: (( param "Please generate a Personal Access Token to be used for creating github releases (do you have a ci-bot?)" ))
19 |
20 | cf:
21 | username: (( param "please provide cf.username" ))
22 | password: (( param "please provide cf.password" ))
23 | organization: (( param "please provide cf.organization" ))
24 | space-production: (( param "please provide cf.space-production" ))
25 |
26 | groups:
27 | - name: (( grab meta.pipeline ))
28 | jobs:
29 | - production
30 |
31 | jobs:
32 | - name: production
33 | public: true
34 | serial: true
35 | plan:
36 | - get: git
37 | trigger: true
38 | - task: mkdocs-build
39 | config:
40 | platform: linux
41 | image_resource:
42 | type: docker-image
43 | source:
44 | repository: (( grab meta.image.name ))
45 | tag: (( grab meta.image.tag ))
46 | inputs:
47 | - { name: git }
48 | outputs:
49 | - { name: site }
50 | run:
51 | path: ./git/ci/scripts/mkdocs-build
52 | args: []
53 | params:
54 | REPO_ROOT: git
55 | SITE_ROOT: site
56 | - put: deploy-prod
57 | params:
58 | manifest: site/manifest.yml
59 | path: site
60 | current_app_name: (( grab meta.name ))
61 |
62 | resource_types: []
63 |
64 | resources:
65 | - name: git
66 | type: git
67 | source:
68 | uri: (( grab meta.github.uri ))
69 | branch: (( grab meta.github.branch ))
70 | private_key: (( grab meta.github.private_key ))
71 |
72 | - name: deploy-prod
73 | type: cf
74 | source:
75 | api: https://api.run.pivotal.io
76 | skip_cert_check: false
77 | username: (( grab meta.cf.username ))
78 | password: (( grab meta.cf.password ))
79 | organization: (( grab meta.cf.organization ))
80 | space: (( grab meta.cf.space-production ))
81 |
--------------------------------------------------------------------------------
/ci/repipe:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # ci/repipe
4 | #
5 | # Script for merging together pipeline configuration files
6 | # (via Spruce!) and configuring Concourse.
7 | #
8 | # author: James Hunt
9 | # Dennis Bell
10 | # created: 2016-03-04
11 |
12 | need_command() {
13 | local cmd=${1:?need_command() - no command name given}
14 |
15 | if [[ ! -x "$(command -v $cmd)" ]]; then
16 | echo >&2 "${cmd} is not installed."
17 | if [[ "${cmd}" == "spruce" ]]; then
18 | echo >&2 "Please download it from https://github.com/geofffranks/spruce/releases"
19 | fi
20 | exit 2
21 | fi
22 | }
23 |
24 | NO_FLY=
25 | SAVE_MANIFEST=
26 | VALIDATE_PIPELINE=
27 | NON_INTERACTIVE=
28 |
29 | cleanup() {
30 | rm -f save-manifest.yml
31 | if [[ -n ${SAVE_MANIFEST} && -e .deploy.yml ]]; then
32 | mv .deploy.yml save-manifest.yml
33 | fi
34 | rm -f .deploy.yml
35 | }
36 |
37 | usage() {
38 | echo Command line arguments:
39 | echo "no-fly Do not execute any fly commands"
40 | echo "save-manifest Save manifest to file save-manifest"
41 | echo "validate Validatei pipeline instead of set pipeline"
42 | echo "validate-strict Validate pipeline with strict mode"
43 | echo "non-interactive Run set-pipeline in non-interactive mode"
44 | }
45 |
46 | for arg do
47 | case "${arg}" in
48 | no-fly|no_fly) NO_FLY="yes" ;;
49 | save-manifest|save_manifest) SAVE_MANIFEST="yes" ;;
50 | validate) VALIDATE_PIPELINE="normal" ;;
51 | validate-strict|validate_strict) VALIDATE_PIPELINE="strict" ;;
52 | non-interactive|non_interactive) NON_INTERACTIVE="--non-interactive" ;;
53 | help|-h|--help) usage; exit 0 ;;
54 | *) echo Invalid argument
55 | usage
56 | exit 1
57 | esac
58 | done
59 |
60 | cd $(dirname $BASH_SOURCE[0])
61 | echo "Working in $(pwd)"
62 | need_command spruce
63 |
64 | # Allow for target-specific settings
65 | settings_file="$(ls -1 settings.yml ${CONCOURSE_TARGET:+"settings-${CONCOURSE_TARGET}.yml"} 2>/dev/null | tail -n1)"
66 | if [[ -z "$settings_file" ]]
67 | then
68 | echo >&2 "Missing local settings in ci/settings.yml${CONCOURSE_TARGET:+" or ci/settings-${CONCOURSE_TARGET}.yml"}!"
69 | exit 1
70 | fi
71 |
72 | echo >&2 "Using settings found in ${settings_file}"
73 |
74 | set -e
75 | trap "cleanup" QUIT TERM EXIT INT
76 | spruce merge pipeline.yml ${settings_file} > .deploy.yml
77 | PIPELINE=$(spruce json .deploy.yml | jq -r '.meta.pipeline // ""')
78 | if [[ -z ${PIPELINE} ]]; then
79 | echo >&2 "Missing pipeline name in ci/settings.yml!"
80 | exit 1
81 | fi
82 |
83 | TARGET_FROM_SETTINGS=$(spruce json .deploy.yml | jq -r '.meta.target // ""')
84 | if [[ -z ${CONCOURSE_TARGET} ]]; then
85 | TARGET=${TARGET_FROM_SETTINGS}
86 | elif [[ "$CONCOURSE_TARGET" != "$TARGET_FROM_SETTINGS" ]]
87 | then
88 | echo >&2 "Target in {$settings_file} differs from target in \$CONCOURSE_TARGET"
89 | echo >&2 " \$CONCOURSE_TARGET: $CONCOURSE_TARGET"
90 | echo >&2 " Target in file: $TARGET_FROM_SETTINGS"
91 | exit 1
92 | else
93 | TARGET=${CONCOURSE_TARGET}
94 | fi
95 |
96 | if [[ -z ${TARGET} ]]; then
97 | echo >&2 "Missing Concourse Target in ci/settings.yml!"
98 | exit 1
99 | fi
100 |
101 | fly_cmd="${FLY_CMD:-fly}"
102 |
103 | [[ -n ${NO_FLY} ]] && { echo no fly execution requested ; exit 0; }
104 |
105 | case "${VALIDATE_PIPELINE}" in
106 | normal) fly_opts="validate-pipeline" ;;
107 | strict) fly_opts="validate-pipeline --strict" ;;
108 | *) fly_opts="set-pipeline ${NON_INTERACTIVE} --pipeline ${PIPELINE}" ;;
109 | esac
110 |
111 | set +x
112 | $fly_cmd --target ${TARGET} ${fly_opts} --config .deploy.yml
113 | [[ -n ${VALIDATE_PIPELINE} ]] && exit 0
114 | $fly_cmd --target ${TARGET} unpause-pipeline --pipeline ${PIPELINE}
115 |
--------------------------------------------------------------------------------
/ci/scripts/mkdocs-build:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export LC_ALL=C.UTF-8
4 | export LANG=C.UTF-8
5 | apt-get update
6 | apt-get install -y python3-pip
7 | pip3 install 'pymdown-extensions<=8.0' 'Markdown<=3.2.2' 'mkdocs<=1.1.2' 'mkdocs-material<=5.5.9'
8 |
9 | git clone ${REPO_ROOT} ${SITE_ROOT}
10 | cd ${SITE_ROOT}
11 | mkdocs build
12 |
--------------------------------------------------------------------------------
/ci/settings.yml:
--------------------------------------------------------------------------------
1 | meta:
2 | name: ultimate-guide-to-bosh
3 | # fly -t pipes@cfcommunity login -c https://pipes.starkandwayne.com -n cfcommunity
4 | target: pipes@cfcommunity
5 | # url: https://pipes.starkandwayne.com
6 |
7 | cf:
8 | uri: https://api.run.pivotal.io
9 | organization: starkandwayne
10 | space-production: ultimate-guide-to-bosh
11 | username: ((cf-pws.username))
12 | password: ((cf-pws.password))
13 |
14 | github:
15 | owner: starkandwayne
16 | repo: ultimate-guide-to-bosh
17 | branch: master
18 | private_key: ((github-private-key))
19 | access_token: ((github-access-token))
20 |
--------------------------------------------------------------------------------
/deployment/prod-values.yml:
--------------------------------------------------------------------------------
1 | ingress:
2 | hosts:
3 | - ultimateguidetobosh.com
4 | - www.ultimateguidetobosh.com
5 | - ultimateguidetobosh.k8s.lke.starkandwayne.com
6 | tls:
7 | enabled: true
8 | certIssuer: "lab-ca-issuer"
9 |
10 | initImage:
11 | repository: python
12 | tag: 3.7
13 |
14 | app:
15 | org: starkandwayne
16 | repo: ultimate-guide-to-bosh
17 |
18 | setup_script: |
19 | #!/bin/sh
20 | pip install 'pymdown-extensions<=8.0' 'Markdown<=3.2.2' 'mkdocs<=1.1.2' 'mkdocs-material<=5.5.9'
21 | mkdocs build
22 | mv /tmp/website/site/* /var/www/
--------------------------------------------------------------------------------
/deployment/staging-values.yml:
--------------------------------------------------------------------------------
1 | ingress:
2 | hosts:
3 | - staging.ultimateguidetobosh.com
4 | - staging.www.ultimateguidetobosh.com
5 | - staging.ultimateguidetobosh.k8s.lke.starkandwayne.com
6 | tls:
7 | enabled: true
8 | certIssuer: "lab-ca-issuer"
9 |
10 | initImage:
11 | repository: python
12 | tag: 3.7
13 |
14 | app:
15 | org: starkandwayne
16 | repo: ultimate-guide-to-bosh
17 |
18 | setup_script: |
19 | #!/bin/sh
20 | pip install 'pymdown-extensions<=8.0' 'Markdown<=3.2.2' 'mkdocs<=1.1.2' 'mkdocs-material<=5.5.9'
21 | mkdocs build
22 | mv /tmp/website/site/* /var/www/
--------------------------------------------------------------------------------
/docs/ask-for-help/suggestions.md:
--------------------------------------------------------------------------------
1 | # Ask for help
2 |
3 | When you're initially getting started with BOSH and later when you are using BOSH to manage your production systems you may want assistance.
4 |
5 | ## Who creates BOSH?
6 |
7 | BOSH is a freely-available open-source project that belongs to the Cloud Foundry Foundation. It continues to have full-time contributors from foundation members such as Pivotal, IBM, and others; as well as contributions to core BOSH and the huge array of community projects surrounding BOSH.
8 |
9 | ## Is there documentation?
10 |
11 | Oh yes there is documentation.
12 |
13 | https://bosh.io/docs is a comprehensive suite of documentation for deploying a BOSH environment to various cloud infrastructures, using BOSH, and debugging systems.
14 |
15 | You also have the Ultimate Guide to BOSH right in front of you. It has been written to be read in a linear, sequential manner so that you progressively learn new concepts and never feel overwhelmed. If you want to search for specific topics, press `f` or click on the "Search" field on the https://ultimateguidetobosh.com website.
16 |
17 | ## Join the Slack channel
18 |
19 | Register at https://slack.cloudfoundry.org, login, and join the `#bosh` channel.
20 |
21 | Dmitriy Kalanin (`@dkalinin`) is the product manager for the BOSH project, its core repositories, and the https://bosh.io website and its subsystems. He also has a range of personal Github projects relating to BOSH at https://github.com/cppforlife. His Github username should be a warning: be careful what you tattoo on your social media usernames.
22 |
23 | There are many other BOSH Core and general community members who might be available and might be able to answer your questions.
24 |
25 | The downside of problems and resolutions being discussed in Slack can be that the thread is lost those who did not read it at the time. Google searches also do not find Slack conversations and their answers.
26 |
27 | ## Github Issues
28 |
29 | Nearly every repository in the BOSH ecosystem is on Github, so I'll use the phrase "Github Issues" to cover "creating issues/tickets on a source code repository".
30 |
31 | I personally like to create Github Issues. I've created many issues where I'm then subsequently discovered the solution, have updated the issue with the fix, and closed the issue. Or I updated the issue with a plausible workaround, yet left the issue open in case Dmitriy or someone else has a better solution.
32 |
33 | Creating Github Issues requires that you choose a project source repository into which to create the issue. Typically you will choose between:
34 |
35 | * https://github.com/cloudfoundry/bosh-cli for questions relating to the `bosh` CLI and its subcommands
36 | * https://github.com/cloudfoundry/bosh-deployment for questions relating to deploying or upgrading your BOSH environment, its BOSH director, and other collocated systems (UAA, Credhub)
37 | * https://github.com/cloudfoundry/bosh for questions relating to the behaviour of a running BOSH environment, its BOSH director, or your running instances
38 | * https://github.com/cloudfoundry/bosh-agent if you specifically know that the bug or problem relates to the BOSH agent running on each BOSH instance
39 | * https://github.com/starkandwayne/ultimate-guide-to-bosh for issues or problems with this book
40 |
41 | If you are having issues or questions about individual BOSH releases or deployment manifests, then I recommend you create issues with their own respective Github issues.
42 |
43 | ## Consulting and commercial assistance
44 |
45 | Stark & Wayne is a boutique consultancy that offers you work with you for short and long term engagements to roll out BOSH, migrate existing systems, and deploy new systems. I founded Stark & Wayne in 2012 and it has grown from strength to strength with the rising tide of Cloud Foundry and the shared success of all Cloud Foundry Foundation members.
46 |
47 | We would very much like the opportunity to help you on your journey with BOSH, CI/CD, and devops.
48 |
--------------------------------------------------------------------------------
/docs/cloud-config-updates.md:
--------------------------------------------------------------------------------
1 | # Cloud Config Updates
2 |
3 | The `bosh` CLI includes a `bosh update-cloud-config path/to/new-cloud-config.yml` command.
4 |
5 | One approach to curating the `cloud-config` is to:
6 |
7 | * download and save it to a file
8 |
9 | ```
10 | bosh cloud-config > cloud.yml
11 | ```
12 |
13 | * edit it
14 |
15 | ```
16 | vi cloud.yml
17 | ```
18 |
19 | * upload the changes
20 |
21 | ```
22 | bosh update-cloud-config cloud.yml
23 | ```
24 |
25 | This will show you the changes and you press `y` to continue:
26 |
27 | ```
28 | disk_types:
29 | - name: default
30 | - disk_size: 3000
31 | + disk_size: 5000
32 | ```
33 |
34 | Once cloud config is updated, all existing deployments will be considered outdated, as indicated by `bosh deployments` command.
35 |
36 | ```
37 | bosh deployments
38 | ```
39 |
40 | The output would show that all deployments now have an outdated `cloud-config`:
41 |
42 | ```
43 | Name Release(s) Stemcell(s) Team(s) Cloud Config
44 | zookeeper zookeeper/0.0.7 bosh-...-ubuntu-trusty-go_agent/... - outdated
45 | ```
46 |
47 | When you next deploy the `zookeeper` deployment it will show that the deployment's `disk_types` (merged in from the `cloud-config`) will be changing:
48 |
49 | ```
50 | $ bosh deploy zookeeper-release/manifests/zookeeper.yml
51 |
52 | disk_types:
53 | - name: default
54 | - disk_size: 3000
55 | + disk_size: 5000
56 |
57 | Continue? [yN]:
58 | ```
59 |
60 | After a deployment has been re-deployed, the `bosh deployments` output will show it has the latest `cloud-config`:
61 |
62 | ```
63 | $ bosh deployments
64 |
65 | Name Release(s) Stemcell(s) Team(s) Cloud Config
66 | zookeeper zookeeper/0.0.7 bosh-...-ubuntu-trusty-go_agent/... - latest
67 | ```
68 |
69 | ## Redeploy without a manifest
70 |
71 | To re-deploy a deployment you will require the deployment manifest in a local file:
72 |
73 | ```
74 | bosh deploy path/to/manifest.yml
75 | ```
76 |
77 | But when the only changes to a deployment are in the shared `cloud-config`, we can try using the previously successfully deployed manifest that is stored within the BOSH director:
78 |
79 | ```
80 | bosh deploy <(bosh manifest)
81 | ```
82 |
83 | I'm not suggesting this is a "good idea". But its definitely "an idea". Remember to double check the proposed changes to the deployment.
84 |
--------------------------------------------------------------------------------
/docs/complete-deployment-manifest.md:
--------------------------------------------------------------------------------
1 | # Complete Deployment Manifest
2 |
3 | In the preceding sections of the Ultimate Guide to BOSH we have slowly introduced all the concepts of deploying systems with a BOSH environment, and all the sections of a BOSH deployment manifest.
4 |
5 | We can now review the entire working deployment manifest for our `zookeeper` example from https://github.com/cppforlife/zookeeper-release. All references to this file in this section will be from a parent folder into which this repository has been cloned:
6 |
7 | ```bash
8 | git clone https://github.com/cppforlife/zookeeper-release
9 | cat zookeeper-release/manifests/zookeeper.yml
10 | ```
11 |
12 | ```yaml
13 | ---
14 | name: zookeeper
15 |
16 | releases:
17 | - name: zookeeper
18 | version: 0.0.7
19 | url: git+https://github.com/cppforlife/zookeeper-release
20 |
21 | stemcells:
22 | - alias: default
23 | os: ubuntu-trusty
24 | version: latest
25 |
26 | update:
27 | canaries: 2
28 | max_in_flight: 1
29 | canary_watch_time: 5000-60000
30 | update_watch_time: 5000-60000
31 |
32 | instance_groups:
33 | - name: zookeeper
34 | azs: [z1, z2, z3]
35 | instances: 5
36 | jobs:
37 | - name: zookeeper
38 | release: zookeeper
39 | properties: {}
40 | - name: status
41 | release: zookeeper
42 | properties: {}
43 | vm_type: default
44 | stemcell: default
45 | persistent_disk: 10240
46 | networks:
47 | - name: default
48 |
49 | - name: smoke-tests
50 | azs: [z1]
51 | lifecycle: errand
52 | instances: 1
53 | jobs:
54 | - name: smoke-tests
55 | release: zookeeper
56 | properties: {}
57 | vm_type: default
58 | stemcell: default
59 | networks:
60 | - name: default
61 | ```
62 |
63 | ## Use of Deployment Manifest
64 |
65 | The deployment manifest is used by `bosh deploy` command to instruct your BOSH environment to create or update a deployment.
66 |
67 | You can provide a full-formed manifest:
68 |
69 | ```
70 | bosh deploy zookeeper-release/manifests/zookeeper.yml
71 | ```
72 |
73 | Alternately, you can amend a base manifest with `-o` [Operator files](/deployment-updates/#operator-files) and `-v` [Variables](/deployment-updates/#deployment-manifest-variables). For example, to modify the deployment name of the uploaded deployment manifest to use the current `$BOSH_DEPLOYMENT` value to create an Operator file:
74 |
75 | ```
76 | export BOSH_DEPLOYMENT=zookeeper-demo
77 | cat > change-deployment-name.yml < bosh ssh zookeeper/0
250 | $ cd /var/vcap/jobs/zookeeper
251 | $ tree
252 | .
253 | ├── bin
254 | │ ├── ctl
255 | │ └── pre-start
256 | ├── config
257 | │ ├── configuration.xsl
258 | │ ├── log4j.properties
259 | │ ├── myid
260 | │ └── zoo.cfg
261 | ├── monit
262 | └── packages
263 | ├── java
264 | └── zookeeper
265 | ```
266 |
267 | To recap, `/var/vcap/jobs/zookeeper/monit` describes how to start/stop `zookeeper` and what process ID (PID) to watch to ensure that ZooKeeper is still running. Monit will invoke `/var/vcap/jobs/zookeeper/bin/ctl start` to start or restart the local `zookeeper` process.
268 |
269 | An abridged version of `/var/vcap/jobs/zookeeper/bin/ctl` to start ZooKeeper looks like:
270 |
271 | ```bash
272 | export ZOOCFGDIR=/var/vcap/jobs/zookeeper/config
273 | exec chpst -u vcap:vcap \
274 | /var/vcap/packages/zookeeper/bin/zkServer.sh start-foreground
275 | ```
276 |
277 | The `$ZOOCFGDIR` environment variable is special to the `/var/vcap/packages/zookeeper/bin/zkServer.sh` script. This script will look for `zoo.cfg` in the `$ZOOCFGDIR` folder. Notice that `$ZOOCFGDIR` is a folder within the job template above: `config/zoo.cfg`.
278 |
279 | The contents of `/var/vcap/jobs/zookeeper/config/zoo.cfg` include the following configuration that allows the `zookeeper/0` instance to discover the other four instances:
280 |
281 | ```
282 | server.0=10.0.0.5:2888:3888
283 | server.1=10.0.0.6:2888:3888
284 | server.2=10.0.0.7:2888:3888
285 | server.3=10.0.0.8:2888:3888
286 | server.4=10.0.0.9:2888:3888
287 |
288 | clientPort=2181
289 | ```
290 |
291 | At a glance, Apache ZooKeeper will expect to communicate with its peer nodes on ports `2888` and `3888`, and the five cloud servers running the zookeeper processes have IP addresses `10.0.0.5` thru `10.0.0.9`. Client applications that want to use our Apache ZooKeeper system will communicate via port `2181`.
292 |
293 | This `config/zoo.cfg` configuration is meaningful to Apache ZooKeeper and the `zkServer.sh` start script. The contents of the configuration file are not meaningful to BOSH, but the file was created by BOSH.
294 |
295 | The deployment manifest `zookeeper.yml` did not need to explicitly allocate the five IP addresses above (also found by running `bosh instances`), nor did the manifest need to explicitly document them for the `zookeeper` job template. Instead, all the IP addresses for all members of the `zookeeper` instance group were automatically provided to the `zookeeper` job template before `monit` attempted to start any processes. We will look at how to write your own job templates in your own BOSH releases in later sections.
296 |
297 | The more urgent piece of information you will want to know why were these five `zookeeper` instances allocated those five IP addresses, why are the five underlying cloud servers allowed to talk to each other, is anything special required for them to communicate over ports `2888` and `3888` but prevent other systems from accessing these ports, and how are client applications allowed to access these five cloud servers over port `2181`.
298 |
299 | And if you're confused at all by that last paragraph, then you are ready for the next section.
300 |
--------------------------------------------------------------------------------
/docs/deployment-updates.md:
--------------------------------------------------------------------------------
1 | # Deployment Updates
2 |
3 | One of the fabulous features of BOSH is the ability to change, scale, and evolve your deployments throughout their multi-year lifespans.
4 |
5 | ## Scaling deployments
6 |
7 | Over time, your long-lived BOSH deployments may need to be scaled up to cope with increased traffic or accrued data. Or you might discover that the initial deployment uses more resources than necessary and you want to downsize the deployment.
8 |
9 | You can change the scale of your deployments using the same methodology for making any changes to a deployment:
10 |
11 | 1. Modify the deployment manifest
12 | 2. Deploy the manifest
13 |
14 | ```
15 | bosh deploy path/to/manifest.yml
16 | ```
17 |
18 | For example, we can change our `zookeeper` cluster down to 3 instance, whilst simultaneously changing the instance size's `vm_type` attribute and increasing the `persistent_disk` size. The abridged `zookeeper-release/manifests/zookeeper.yml` deployment manifest showing the changed attributes:
19 |
20 | ```yaml
21 | ---
22 | name: zookeeper
23 |
24 | instance_groups:
25 | - name: zookeeper
26 | instances: 3
27 | vm_type: large
28 | jobs:
29 | - name: zookeeper
30 | release: zookeeper
31 | - name: status
32 | release: zookeeper
33 | persistent_disk: 20480
34 |
35 | - name: smoke-tests
36 | ...
37 | ```
38 |
39 | After making these changes and saving the file, you would then run:
40 |
41 | ```
42 | > bosh deploy zookeeper-release/manifests/zookeeper.yml
43 | ```
44 |
45 | The `bosh` CLI will display the various proposed changes. Once you confirm by pressing `y`, the new deployment manifest will be uploaded to your BOSH director, which will perform the changes.
46 |
47 | ## Modifying the Running Deployment Manifest
48 |
49 | If you do not have the original deployment manifest available when you want to modify it, you have the option to download the last deployment manifest that was successfully deployed.
50 |
51 | ```
52 | bosh manifest > manifest.yml
53 | ```
54 |
55 | After modifying this file, you could then apply the changes with `bosh deploy`:
56 |
57 | ```
58 | bosh deploy manifest.yml
59 | ```
60 |
61 | ## Operator Files
62 |
63 | It can be common that the deployment manifest comes from a source repository that you do not maintain. Our common example `zookeeper-release/manifests/zookeeper.yml` is a file within the https://github.com/cppforlife/zookeeper-release repository. If you directly modify this file you will not be able to commit the changes to this repository.
64 |
65 | Instead, the `bosh deploy` command allows you to apply "operators" to the base manifest; that is, changes or amendments. We use the `bosh deploy -o path/changes.yml` flag for each operator file.
66 |
67 | An example operator file that will change the number of `zookeeper` instances from 5 to 3 is:
68 |
69 | ```yaml
70 | - type: replace
71 | path: /instance_groups/name=zookeeper/instances
72 | value: 3
73 | ```
74 |
75 | Another example - either in a new file or appended to the file above - to change the `persistent_disk` attribute for each `zookeeper` instance:
76 |
77 | ```yaml
78 | - type: replace
79 | path: /instance_groups/name=zookeeper/persistent_disk
80 | value: 20480
81 | ```
82 |
83 | Finally, to change the `vm_type`:
84 |
85 | ```yaml
86 | - type: replace
87 | path: /instance_groups/name=zookeeper/vm_type
88 | value: large
89 | ```
90 |
91 | If these three YAML snippets were in a file `zookeeper-scale.yml`, then to apply the operators to our deployment manifest:
92 |
93 | ```
94 | > bosh deploy zookeeper-release/manifests/zookeeper.yml -o zookeeper-scale.yml
95 | ```
96 |
97 | ### Operator paths
98 |
99 | There are two `type` options in each Operator: `replace`, and `remove`. The former will update or insert an item in the final YAML file. The latter will remove an item from the final YAML file. The item to be modified is determined by the `path` expression.
100 |
101 | The `path` expression describes a walk up a YAML tree, so to speak. Consider the example YAML file ("tree"):
102 |
103 | ```yaml
104 | name: zookeeper
105 |
106 | instance_groups:
107 | - name: zk
108 | instances: 5
109 | networks:
110 | - name: default
111 | ```
112 |
113 | The following `path` expressions and their corresponding items from the YAML file:
114 |
115 | * `path: /name`
116 | ```
117 | zookeeper
118 | ```
119 | * `path: /instance_groups`
120 | ```
121 | - name: zk
122 | instances: 5
123 | networks:
124 | - name: default
125 | ```
126 | * `path: /instance_groups/0` - index 0 in the `/instance_groups` array
127 | ```
128 | name: zk
129 | instances: 5
130 | networks:
131 | - name: default
132 | ```
133 | * `path: /instance_groups/name=zk` - the item of the `/instance_groups` array with `name: zk`
134 | ```
135 | name: zk
136 | instances: 5
137 | networks:
138 | - name: default
139 | ```
140 | * `path: /instance_groups/name=zk/instances` - the value of `instances` from the preceding item
141 | ```
142 | 5
143 | ```
144 | * `path: /instance_groups/name=zk/networks` - the value of `networks` is an array of one item:
145 | ```
146 | - name: default
147 | ```
148 | * `path: /instance_groups/name=zk/networks/name=default` - specifically selects the `networks` item with `name: default`:
149 | ```
150 | name: default
151 | ```
152 |
153 | ### Why not use XPath or CSS selectors?
154 |
155 | Walking a structured document such as a YAML/JSON/XML file to select an item has other syntaxes that you might have seen before. Your rounded knowledge of computers will do you well in life. But I'm sorry to say, `bosh deploy` doesn't use those syntaxes.
156 |
157 | The Operator `path` syntax comes from the JSONPath spec. TODO link.
158 |
159 | ### More Examples of Operator Files
160 |
161 | Support for Operator files is provided by a library https://github.com/cppforlife/go-patch. See TODO for an extensive list of examples.
162 |
163 |
164 | ## Assign Cloud Config Options to Deployment Manifest
165 |
166 | Another use of Operator files is to assign the available options within your `bosh cloud-config` to the deployment manifest. A base deployment manifest will not know in advance the available `vm_types` or `networks` within your Cloud Config.
167 |
168 | In the previous section we modified the `vm_type: large` where `large` was known to us in advance as an available option from our `vm_types`.
169 |
170 | To see the list of available `vm_types`:
171 |
172 | ```
173 | > bosh int <(bosh cloud-config) --path /vm_types
174 | ```
175 |
176 | Similarly, to see the list of available `networks`:
177 |
178 | ```
179 | > bosh int <(bosh cloud-config) --path /networks
180 | ```
181 |
182 | You would then create an Operator file to assign your chosen option to each instance group. For our `zookeeper` deployment with its two instance groups `zookeeper` and `smoke-tests`:
183 |
184 | ```yaml
185 | - type: replace
186 | path: /instance_groups/name=zookeeper/vm_type
187 | value: large
188 |
189 | - type: replace
190 | path: /instance_groups/name=zookeeper/networks/name=default/name
191 | value: private
192 |
193 | - type: replace
194 | path: /instance_groups/name=smoke-tests/vm_type
195 | value: tiny
196 |
197 | - type: replace
198 | path: /instance_groups/name=smoke-tests/networks/name=default/name
199 | value: private
200 | ```
201 |
202 | Note, for completeness of your Operator file education, the two `networks` operator expressions could also be implemented as either of the follow examples:
203 |
204 | ```yaml
205 | - type: replace
206 | path: /instance_groups/name=zookeeper/networks
207 | value:
208 | - name: private
209 |
210 | - type: replace
211 | path: /instance_groups/name=smoke-tests/networks/name=default
212 | value:
213 | name: private
214 | ```
215 |
216 | The former example will replace the entire `/instance_groups/name=zookeeper/networks` with an explicit declaration of the `networks` for that instance group.
217 |
218 | The latter example will isolate and replace the specific `networks` item with the `name: default`.
219 |
220 | Since the original example YAML file only had one `networks` array item all three examples result in the same modification.
221 |
222 | ## Update Job Template Properties
223 |
224 | Most job templates have optional or required properties. So far in our `zookeeper` deployment examples we have not explicitly overridden any properties, rather have enjoyed the mystery of the unknown default properties.
225 |
226 | We can provide a property within an instance group with the optional `properties` attribute. For example, the `zookeeper` job template has a property `max_client_connections` with a default value `60`. To explicitly declare this property with a value of `120`, we can either modify the deployment manifest directly:
227 |
228 | ```yaml hl_lines="7 8"
229 | instance_groups:
230 | - name: zookeeper
231 | instances: 5
232 | jobs:
233 | - name: zookeeper
234 | release: zookeeper
235 | properties:
236 | max_client_connections: 120
237 | ```
238 |
239 | Or we can use an Operator file to add the missing `properties.max_client_connections` attribute:
240 |
241 | ``` hl_lines="2"
242 | - type: replace
243 | path: /instance_groups/name=zookeeper/jobs/name=zookeeper/properties?/max_client_connections
244 | value: 120
245 | ```
246 |
247 | Note the special use of `?` question mark in `/properties?/max_client_connections`. This means, "if `/properties` attribute is missing, then please create it first". Operator files are nothing if not courteous. They will not insert new attributes into a YAML file without permission from the `?` postfix operator.
248 |
249 | ## Deployment Manifest Variables
250 |
251 | In addition to `-o` Operator files, the `bosh deploy` command also allows you to pass in variables with the `-v` flag. Variables are a simpler way to specify values than the relatively complex Operator file syntax. But you can only provide variables if the deployment manifest wants them.
252 |
253 | A deployment manifest will request variables with the double parentheses syntax `((var-name))`.
254 |
255 | Consider an abridged variation of our `zookeeper-release/manifests/zookeeper.yml` file where the `properties.max_client_connections` attribute is declared, but the value is a variable:
256 |
257 | ```yaml
258 | name: zookeeper
259 |
260 | instance_groups:
261 | - name: zookeeper
262 | instances: 5
263 | jobs:
264 | - name: zookeeper
265 | release: zookeeper
266 | properties:
267 | max_client_connections: ((zk-max-client-connections))
268 | ```
269 |
270 | Your `bosh deploy` command will now require that you provide the value for this variable:
271 |
272 | ```
273 | > bosh deploy zookeeper-release/manifests/zookeeper.yml \
274 | -v zk-max-client-connections=120
275 | ```
276 |
277 | If you forget to provide a required variable, the BOSH director may error with output similar to:
278 |
279 | ```
280 | > bosh deploy zookeeper-release/manifests/zookeeper.yml
281 | ...
282 | Task 4 | 04:17:27 | Preparing deployment: Preparing deployment (00:00:01)
283 | L Error: Failed to fetch variable '/.../zookeeper/zk-max-client-connections'
284 | from config server: Director is not configured with a config server
285 | ```
286 |
287 | TODO example error on a credhub BOSH
288 |
289 | The benefit of variables is they are easier to use than creating new Operator files. The downside of variables occurs if a deployment manifests uses variables, but a default value would have been fine. It can be tedious providing explicit variables for a new deployment at a time when you just don't know or care what good values might be.
290 |
291 | Good defaults are better than variables. Operators can always modify or set values later with Operator files. The file are even named after us!
292 |
293 | ## Updates Are Not Atomic
294 |
295 | When you run `bosh deploy`, the changes you've requested will not just atomically and magically be performed. They will take some amount of time and be performed in some specific order. There will be times during `bosh deploy` where some job templates may have been stopped and have not yet been restarted.
296 |
297 | ## Update Batches
298 |
299 | The final top-level section of a deployment manifest is `update` and it specifies the batches of instances to update, and how long should the BOSH director wait before timing out and declaring that a `bosh deploy` has failed.
300 |
301 | The `update` section for our `zookeeper-release/manifests/zookeeper.yml` manifest is:
302 |
303 | ```yaml
304 | update:
305 | canaries: 2
306 | max_in_flight: 1
307 | canary_watch_time: 5000-60000
308 | update_watch_time: 5000-60000
309 | ```
310 |
311 | Remember that we are deploying or updating five `zookeeper` instances. The `update.canaries` and `update.max_in_flight` values determine the batch sizes. Do we deploy/update one instance at a time, all of the instances at the same time, or in small batches?
312 |
313 | In the `zookeeper` example, we will start any `bosh deploy` by updating the first two instances together. The remaining three instances will continue running as were previously happily doing. The first two instances being updated might fail. They are like a canary in a coal mine. If we update two instances and they fail, we will still have three instances running in our cluster.
314 |
315 | If the canaries fail to update, the deployment will cease. As the Operator, you will commence the professional tradecraft of debugging.
316 |
317 | If the canaries successfully update, then the instance group will be temporarily comprised of two updated instances and three older instances. It is important that the job templates running on these five instances are capable of running in this temporary configuration. The developers of the `zookeeper` BOSH release and job templates would have tested this, since they were the ones who suggested the `canaries: 2` update attribute.
318 |
319 | The next batch of instances to be updated will be of size 1, the `max_in_flight` value. Then another batch of 1, and then the final `max_in_flight` batch of 1 instance. If any batch of instances fails to update the `bosh deploy` activity will cease.
320 |
321 | Your deployment manifest can also have an `update` section in any instance group, which will override the top-level global `update`.
322 |
323 | The `bosh deploy` command also has options that will override the `update` manifest attribute:
324 |
325 | ```
326 | > bosh deploy -h
327 | ...
328 | [deploy command options]
329 | ...
330 | --canaries= Override manifest values for canaries
331 | --max-in-flight= Override manifest values for max_in_flight
332 | ```
333 |
334 | ## Renaming An Instance Group
335 |
336 | Over the lifetime of a deployment you might merge or split out job templates between instance groups. You might then want to rename the instance groups. This can be done using the deployment manifest.
337 |
338 | **But**, if you simply change the name of the instance group the BOSH director will not know you wanted to rename an existing instance group. It will destroy the previous instance group, orphan its persistent disks, and then provision a new instance group with new persistent disks.
339 |
340 | To **rename** the `zookeeper` instance group in our `zookeeper` deployment manifest, we add the `migrated_from` attribute to our deployment manifest.
341 |
342 | ```yaml
343 | instance_groups:
344 | - name: zk
345 | migrated_from:
346 | - name: zookeeper
347 | - name: zookeeper-instances
348 | ...
349 | ```
350 |
351 | When we next run `bosh deploy`, the BOSH director will harmlessly rename any `zookeeper` or `zookeeper-instances` instances to their new name `zk`. On subsequent `bosh deploy` operations, with the instance group now called `zk`, the `migrated_from` attribute will be ignored.
352 |
--------------------------------------------------------------------------------
/docs/deployments.md:
--------------------------------------------------------------------------------
1 | # Deployments
2 |
3 | Let's begin!
4 |
5 | The highest level concept of BOSH is the "deployment" of a system. The purpose of BOSH is to continuously run one or more deployments. For example, a cluster of servers that form a ZooKeeper cluster is a deployment of the ZooKeeper system.
6 |
7 | In [Joyful operations](/introduction/#joyful-operations) we began by creating a deployment:
8 |
9 | ```
10 | > export BOSH_DEPLOYMENT=zookeeper
11 | > bosh deploy zookeeper-release/manifests/zookeeper.yml
12 | ```
13 |
14 | And we finished the lifecycle of that system by deleting the BOSH deployment:
15 |
16 | ```
17 | > bosh delete-deployment
18 | ```
19 |
20 | ## New Deployments
21 |
22 | When we ask BOSH to provision a new system, BOSH takes upon the entire responsibility for making this happen:
23 |
24 | * BOSH will communicate with your cloud infrastructure API to request new servers/virtual machines (called "instances")
25 | * BOSH manages the base machine image used for each virtual machine (called "stemcells")
26 | * BOSH will allocate an available IP address for each instance
27 | * BOSH will communicate with your cloud infrastructure API to request persistent disk volumes (we will see soon that the `zookeeper.yml` manifest requires a persistent disk volume for each instance in the deployment)
28 | * BOSH will request that the disk volumes are attached to the instances
29 |
30 | Once the instances are provisioned and the disks are attached, BOSH then starts communicating with each instance:
31 |
32 | * BOSH will format disk volumes if necessary
33 | * BOSH will download the software required (called "packages")
34 | * BOSH will construct configuration files for the packages and commence running the software (called "job templates")
35 | * BOSH provides service discovery information to each job template about the location and credentials of other instances (called "links")
36 |
37 | At this point, it becomes the installed software's responsibility to do things that it needs to do. It now has been given a brand new instance running on a hardened base operating system, with a mounted persistent disk for it to store data, and has been configured with the information for forming a cluster with its peers, and connecting as a client to any other systems.
38 |
39 | ## New Deployments of ZooKeeper
40 |
41 | Let's revisit each of these actions for the specific case of our 5-instance deployment of ZooKeeper running on AWS.
42 |
43 | ```
44 | > export BOSH_DEPLOYMENT=zookeeper
45 | > bosh deploy zookeeper-release/manifests/zookeeper.yml
46 | ```
47 |
48 | Inside `zookeeper.yml` is the description of a group of five instances, each with a 10GB persistent disk volume (we will review the contents of this file soon).
49 |
50 | * BOSH sends requests to AWS API for five EC2 VMs, using a specified Amazon Machine Image (AMI) as the base file system/operating system
51 | * BOSH will manage the allocation of IPs within the VPC subnet rather than using DHCP (more on networking later)
52 | * BOSH sends requests to AWS for five EBS volumes and then attaches each one to a different EC2 VM
53 |
54 | Each of the AWS EC2 VMs will eventually "call home" to BOSH saying that they are awake and ready.
55 |
56 | BOSH then begins preparing them for their role of "ZooKeeper" instance.
57 |
58 | * BOSH downloads special BOSH packages of Apache ZooKeeper, plus the Java JDK which is a dependency for running ZooKeeper.
59 | * BOSH downloads special BOSH job templates that describe how to configure and run a single node of ZooKeeper on each instance
60 | * BOSH provides each ZooKeeper job template with the IP address, client port, quorum port and leader election port for every other member of the deployment (these are ZooKeeper specific requirements for a cluster of ZooKeeper instances)
61 |
62 | ## BOSH Architecture, Part 1
63 |
64 | In the previous sections, I've made reference to a `bosh` CLI but have otherwise danced around the topic of, "What is BOSH really?"
65 |
66 | From now onward, I will stop simplistically saying, "BOSH does a thing," and start to be consistently discerning about which aspect of BOSH is doing something.
67 |
68 | Right now think of BOSH as three things:
69 |
70 | * BOSH CLI - the `bosh` command being referenced in the earlier examples. The CLI is a client to the:
71 | * BOSH director - an HTTP API that receives requests from the CLI and either communicates directly with instances or with your cloud infrastructure. Communication with your cloud infrastructure is via a:
72 | * Cloud Provider Interface (CPI) - the specific implementation of how a BOSH director communicates with AWS, GCP, vSphere, OpenStack, or any other target.
73 |
74 | ## CPI - The Ultimate Cloud Provider Interface Abstraction
75 |
76 | The CLI, the director, and a CPI are the basic components that bring a deployment to life on your target cloud infrastructure.
77 |
78 | For our ZooKeeper example, we began with:
79 |
80 | ```
81 | > bosh deploy zookeeper-release/manifests/zookeeper.yml
82 | ```
83 |
84 | The BOSH CLI loads the `zookeeper.yml` file from your local machine (which originally came from a [Github repository](https://github.com/cppforlife/zookeeper-release/blob/master/manifests/zookeeper.yml) in the [Joyful operations](/introduction/#joyful-operations) section above).
85 |
86 | The BOSH CLI forwards this file on to the BOSH director.
87 |
88 | The BOSH director decides that it is a new deployment (it has a name that the BOSH director does not know yet). The BOSH director decides it needs to provision five new virtual machines and five persistent disks (we will investigate the contents of `zookeeper.yml` soon). The BOSH director delegates this activity to the BOSH CPI for AWS (where we are attempting to deploy ZooKeeper in our example).
89 |
90 | The BOSH CPI is a local command line application hosted inside the BOSH director. You will never need to touch it, find it, or run it manually. But it can be helpful to understand its nature. A CPI - the abstraction for how a BOSH director can interact with any cloud infrastructure - is just a CLI. The BOSH director - a long-running HTTP API process - calls out to the CPI executable and invokes commands using a JSON payload. When the CPI completes its task - creating a VM, creating a disk, etc - it will return JSON with success/failure information.
91 |
92 | For ZooKeeper running on AWS, our BOSH director will be running with the AWS CPI CLI (TLA BINGO - three, three letter acronyms in a row) installed on the same server. The combination of the BOSH director and a collocated CPI CLI is the magic of how a BOSH director can be configured to communicate with any cloud infrastructure. The CPI CLIs can be written in different programming languages than BOSH director, and be maintained by different engineering teams at different companies. It is a wonderful, powerful design pattern.
93 |
94 | This will be the last time we will reference the CPIs for a long time. They exist. They allow a BOSH director to interact with any cloud infrastructure. There are many of them already implemented (AWS, Google Compute Platform, Microsoft Azure, VMWare vSphere, OpenStack, IBM SoftLayer, VirtualBox, Warden/Garden, Docker).
95 |
96 | And you will mostly never need to know about them.
97 |
98 | Here is the command for deploying five Amazon EC2 servers running ZooKeeper, backed by Amazon EBS volumes, running inside Amazon VPC networking:
99 |
100 | ```
101 | > bosh deploy zookeeper-release/manifests/zookeeper.yml
102 | ```
103 |
104 | In the AWS console, your list of EC2 servers (including the BOSH director VM) might look like:
105 |
106 | 
107 |
108 | Here is the command for deploying five Google Compute VM Instances, backed by Google Compute Disks, running inside GCP networking, installed and configured to be a ZooKeeper cluster:
109 |
110 | ```
111 | > bosh deploy zookeeper-release/manifests/zookeeper.yml
112 | ```
113 |
114 | In the Google Cloud Platform console, your list of VM instances (including a NAT VM, bastion VM, and BOSH director VM) might look like:
115 |
116 | 
117 |
118 | Never used VMWare vSphere before? Here is the command for deploying five ESXi virtual machines using a concept of persistent disks, on any cluster of physical servers in the world. And they will be ZooKeeper:
119 |
120 | ```
121 | > bosh deploy zookeeper-release/manifests/zookeeper.yml
122 | ```
123 |
124 | In VMWare vCenter, your deployment will not specifically look like anything. vSphere is a crazy mess to me.
125 |
126 | For sure there are distinctions in deploying any system to any infrastructure that need to be made, but the command above is valid and will work once we have a running BOSH director configured with a CPI. That's fantastic.
127 |
--------------------------------------------------------------------------------
/docs/disks.md:
--------------------------------------------------------------------------------
1 | # Disks
2 |
3 | One of the first demonstrations of BOSH I ever saw in April 2012 was "let's resize a disk". It is an incredibly powerful demonstration of BOSH. Managing disk sizes can be non-trivial on most cloud infrastructures; with BOSH it is a fully managed service:
4 |
5 | As a user, it is just two steps:
6 |
7 | 1. change the persisent disk size or select a different persisent disk type in the deployment manifest
8 | 1. run `bosh deploy` to request the BOSH director orchestrate the changes
9 |
10 | The BOSH director will now orchestrate the following sequence for you:
11 |
12 | 1. provision a second, larger persistent disk
13 | 1. attach it to the instance
14 | 1. format and mount the disk as an additional temporary mounted volume
15 | 1. stop all the processes
16 | 1. copy all the data from the older volume to the new volume (this can be time consuming for large amounts of data)
17 | 1. unmount the older volume
18 | 1. remount the new volume to `/var/vcap/store`
19 | 1. restart all the processes
20 | 1. detach the older persistent disk
21 | 1. delete the older, orphaned persistent disk in 5 days
22 |
23 | You merely had to run `bosh deploy` and your BOSH environment does everything else above.
24 |
25 | Growing your infrastructure has never been easier.
26 |
27 | We first looked at disks in the section [Persistent Volumes](/instances/#persistent-volumes).
28 |
29 | Each instance of an instance group can have a fully managed persistent disk (see [Multiple Persistent Disks](/disks/#multiple-persistent-disks) to move to multiple disks). It will be mounted at `/var/vcap/store` and is shared across all job templates collocated on the same instance.
30 |
31 | This section will discuss persistent disk types, the provisioning and mounting sequence, and how the BOSH director fully manages the resizing of persistent disks between deployments.
32 |
33 | ## Persistent Disks and CPIs
34 |
35 | Each cloud infrastructure and the BOSH Cloud Provider Interface (CPI) will have its own implementation of persistent disks.
36 |
37 | The important implementation requirement of the CPI and the cloud infrastructure is that the persistent disk can be detached from one cloud server and reattached to another cloud server. These disk volumes are not necessarily located on the same host machine as the cloud server to which they are attached, rather will be located somewhere else on the network. Hence we can term them "network disk volumes".
38 |
39 | For example, the AWS CPI will provision AWS Elastic Block Storage (AWS EBS). The vSphere CPI has more work to do to implement persistent disks as vSphere does not have a native network disk volume concept. The Docker CPI and Garden CPI, which manage local Linux containers rather than virtual machines, will implement persistent disks upon the host machine.
40 |
41 | A BOSH persistent disk exists for the life of the instance group, not only during the lifespan of individual cloud servers. This is similar to the relationship between BOSH instances and the underlying cloud servers provided by the cloud infrastructure. The BOSH instance and BOSH persistent disk are concepts that exist for the lifetime of the deployment, but underneath they are implemented by cloud servers and network disk volumes that might be provisioned and destroyed to support resizing or upgrades.
42 |
43 | ## Persistent disks and volume mounts
44 |
45 | If an instance has a single persistent disk then the BOSH director will organise for the disk to be formatted and mounted.
46 |
47 | Each persistent disk is formatted with filesystem type `ext4` by default. [Alternate filesystem types](https://bosh.io/docs/persistent-disk-fs.html) are available.
48 |
49 | Each persistent disk is mounted at `/var/vcap/store`.
50 |
51 | ### Pitfalls of forgetting persistent disks
52 |
53 | At the time of writing, BOSH job templates do not have a way to communicate with the BOSH director that they **require** a persistent disk. With or without an external `/var/vcap/store` mount, these job templates will attempt to write data within this folder structure. Potentially large amounts of data. This scenario will result in system failures without a persistent disk mounted.
54 |
55 | Consider a job template that writes data to `/var/vcap/store/zookeeper/mydb.dat`.
56 |
57 | If there is a persistent disk mounted at `/var/vcap/store` then the file `/var/vcap/store/zookeeper/mydb.dat` will be safely stored upon this persistent disk. If the disk starts to fill up then it is a simple matter to resize the persistent disk (see the opening demonstration in [Disks](/disks)).
58 |
59 | If there is no persistent disk mounted at `/var/vcap/store`, then the file `/var/vcap/store/zookeeper/mydb.dat` will be be stored upon the root volume `/`. The root volume is typically small (large enough only for the system's packages), ephemeral (changes to the root volume will be lost when the cloud server is recreated), and fixed in size (root volumes are typically not resized during the life of a deployment). Eventually the root volume will fill up and the instance's processes and perhaps system processes will begin to fail.
60 |
61 | If your instances are ever experiencing failure, run `df -h` to check that your disks have not filled up. If the root volume `/` is at 100% then you have probably forgotten to include a persistent disk; or a job template has a mistake in it and is writing files outside of `/var/vcap/store` or `/var/vcap/data` volumes.
62 |
63 | ## Simple Persistent Disk
64 |
65 | The simplest technique for allocating a persistent disk in a deployment manifest is the `persistent_disk` attribute of an `instance_group`. Consider this abbreviated deployment manifest:
66 |
67 | ```yaml
68 | instance_groups:
69 | - name: zookeeper
70 | instances: 5
71 | persistent_disk: 10240
72 | ```
73 |
74 | BOSH will manage five different persistent disks and their association with the five `zookeeper` instances in this deployment. BOSH will use its CPI to request a `10240` Mb persistent disk from the cloud infrastructure. That is, on AWS it will provision five 10GB AWS EBS volumes and request that AWS attach each one to the five different AWS EC2 servers.
75 |
76 | Continuing with the AWS EBS example, the AWS CPI has to choose an [EBS Volume Type](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html). At the time of writing, the AWS CPI defaults all disks to General Purpose SSD (`gp2`). Other CPIs will have their own defaults:
77 |
78 | * Google CPI defaults to `pd-standard` https://bosh.io/docs/google-cpi.html#disk-types
79 | * AWS CPI defaults to `gp2` https://bosh.io/docs/aws-cpi.html#disk-pools
80 | * vSphere CPI defaults to `preallocated` https://bosh.io/docs/vsphere-cpi.html#disk-pools
81 | * OpenStack CPI defaults to `SSD` https://bosh.io/docs/openstack-cpi.html#disk-pools
82 |
83 | ## Persistent Disk Types
84 |
85 | The benefits of `persistent_disk` attribute are that it is simple (just a number of megabytes) and cloud infrastructure agnostic.
86 |
87 | The downside is the inability to customise the cloud infrastructure attributes of the disk. For this we switch from `persistent_disk` to the `persistent_disk_type` attribute.
88 |
89 | ```yaml
90 | instance_groups:
91 | - name: zookeeper
92 | instances: 5
93 | persistent_disk_type: large
94 | ```
95 |
96 | In the updated example above, we are now using a text label `large` to reference a new item `disk_types` from your `bosh cloud-config`:
97 |
98 | ```yaml
99 | disk_types:
100 | - name: default
101 | disk_size: 3000
102 | cloud_properties: {}
103 | - name: large
104 | disk_size: 50_000
105 | cloud_properties: {}
106 | ```
107 |
108 | The `name` attribute is the reference label used by `persistent_disk_type` in deployment manifests. The `disk_size` is in megabytes.
109 |
110 | The default `cloud_properties` for each item in `disk_types` is the same as for the `persistent_disk` section above. The linked URLs to documentation describe the cloud infrastructure options.
111 |
112 | In our modified `zookeeper` deployment manifest above, our `persistent_disk_type: large` currently maps to a 50,000 MB disk with default `cloud_properties` for the cloud infrastructure.
113 |
114 | If the `name: large` attributes are subsequently modified in `bosh cloud-config` then our deployment will not be immediately affected. Our deployment will pick up the changes when we next run `bosh deploy`.
115 |
116 | ### Available Disk Types
117 |
118 | You can use the `bosh` CLI to discover the `disk_types` and their labels on your BOSH director:
119 |
120 | ```
121 | bosh int <(bosh cloud-config) --path /disk_types
122 | ```
123 |
124 | The example output might be similar to:
125 |
126 | ```yaml
127 | - disk_size: 3000
128 | name: default
129 | - disk_size: 50000
130 | name: large
131 | - disk_size: 1024
132 | name: 1GB
133 | - disk_size: 5120
134 | name: 5GB
135 | - disk_size: 10240
136 | name: 10GB
137 | - disk_size: 102400
138 | name: 100GB
139 | ```
140 |
141 | In this example, the optional `cloud_properties` attribute was not included.
142 |
143 | To curate the lists of `disk_types` shared amongst your deployments you will need to update your `cloud-config`. We will discuss this later in the section [Cloud Config Updates](/cloud-config-updates).
144 |
145 | ## Orphaned Disks
146 |
147 | The BOSH director does not immediately delete disks that are no longer needed. These disks are marked as orphaned and will be garbage collected after 5 days.
148 |
149 | To save money or to reclaim disk space on the host system you can manually delete them.
150 |
151 | The get a list of all orphaned disks:
152 |
153 | ```
154 | bosh disks --orphaned
155 | ```
156 |
157 | After deleting our `zookeeper` deployment, or resizing all its disks, the output might look like:
158 |
159 | ```
160 | Disk CID Size Deployment Instance AZ Orphaned At
161 | disk-5d968d18... 10 GiB zookeeper zookeeper/0d382e23... z1 ...
162 | disk-bc5945ca... 10 GiB zookeeper zookeeper/0acaf2d8... z2 ...
163 | disk-bac28797... 10 GiB zookeeper zookeeper/a94bb2b7... z1 ...
164 | disk-bd67097c... 10 GiB zookeeper zookeeper/97c88778... z2 ...
165 | disk-f7e5d3cf... 10 GiB zookeeper zookeeper/e4d84929... z3 ...
166 | ```
167 |
168 | ### Reattach orphaned disks
169 |
170 | The primary purpose of orphaning disks is to allow you to recover from the error of accidentally deleting your deployments or from erroneous deployment changes.
171 |
172 | If we made an error with our `zookeeper` deployment, then we would need to reattach five different disks.
173 |
174 | To reattach an orphaned disk to a running instance:
175 |
176 | * run `bosh stop name/id` command to stop instance (or multiple instances) for repair
177 | * run `bosh attach-disk name/id disk-cid` command to attach disk to given instance
178 | * run `bosh start name/id` command to resume running instance workload
179 |
180 | The `bosh attach-disk` command will replace any currently attached disk, thus orphaning it.
181 |
182 | The `attach-disk` command can also attach available disks found in the cloud infrastructure. They don’t have to be listed in the `bosh disks --orphaned` list.
183 |
184 | ### Delete orphaned disks
185 |
186 | You can delete individual disks using their "Disk CID":
187 |
188 | ```
189 | bosh delete-disk disk-3b40021c...
190 | ```
191 |
192 | If you want to delete every orphaned disk, then this little snippet might come in handy:
193 |
194 | ```
195 | bosh disks --orphaned | grep disk- | awk '{print $1}' | xargs -L1 bosh delete-disk -n
196 | ```
197 |
198 | ## Multiple Persistent Disks
199 |
200 | BOSH deployment manifests can request multiple persistent disks, apply any disk formatting, and mount them to any path. This topic is discussed in a great blog post by Chris Weibel [Mounting Multiple Persistent Disks with BOSH](https://www.starkandwayne.com/blog/bosh-multiple-disks/).
201 |
--------------------------------------------------------------------------------
/docs/images/aws/aws-public-ami.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/aws/aws-public-ami.png
--------------------------------------------------------------------------------
/docs/images/aws/aws-subnet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/aws/aws-subnet.png
--------------------------------------------------------------------------------
/docs/images/bookcover/book-cover-being-read.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/bookcover/book-cover-being-read.png
--------------------------------------------------------------------------------
/docs/images/bookcover/book-cover-library.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/bookcover/book-cover-library.png
--------------------------------------------------------------------------------
/docs/images/bookcover/bookcover-3d.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/bookcover/bookcover-3d.png
--------------------------------------------------------------------------------
/docs/images/bookcover/bookcover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/bookcover/bookcover.png
--------------------------------------------------------------------------------
/docs/images/bosh-io-stemcell-sizes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/bosh-io-stemcell-sizes.png
--------------------------------------------------------------------------------
/docs/images/favicon/browserconfig.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | #FFFFFF
9 |
10 |
11 |
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-114.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-114.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-120.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-120.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-144.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-144.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-150.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-150.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-152.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-152.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-16.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-160.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-160.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-180.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-180.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-192.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-310.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-310.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-32.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-57.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-57.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-60.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-60.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-64.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-64.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-70.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-70.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-72.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-72.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-76.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-76.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon-96.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon-96.png
--------------------------------------------------------------------------------
/docs/images/favicon/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/favicon/favicon.ico
--------------------------------------------------------------------------------
/docs/images/favicon/faviconit-instructions.txt:
--------------------------------------------------------------------------------
1 | thanks for using faviconit!
2 | copy the files to your site and add this code inside the HTML tag:
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/docs/images/gcp/gcp-vpc-networks-bosh.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/gcp/gcp-vpc-networks-bosh.png
--------------------------------------------------------------------------------
/docs/images/gcp/gcp-vpc-networks-firewall-rules.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/gcp/gcp-vpc-networks-firewall-rules.png
--------------------------------------------------------------------------------
/docs/images/gcp/gcp-vpc-networks-route-details.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/gcp/gcp-vpc-networks-route-details.png
--------------------------------------------------------------------------------
/docs/images/handdrawn/app-stack.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/handdrawn/app-stack.jpg
--------------------------------------------------------------------------------
/docs/images/swlogo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/swlogo.png
--------------------------------------------------------------------------------
/docs/images/virtualbox/vbox-no-envs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/virtualbox/vbox-no-envs.png
--------------------------------------------------------------------------------
/docs/images/virtualbox/vbox-running-bosh-env.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/virtualbox/vbox-running-bosh-env.png
--------------------------------------------------------------------------------
/docs/images/zookeeper-deployment-aws.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/zookeeper-deployment-aws.png
--------------------------------------------------------------------------------
/docs/images/zookeeper-deployment-google.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/egen/ultimate-guide-to-bosh/c084daa48d7dfd0da8a1c7b040e67ae992ee0666/docs/images/zookeeper-deployment-google.png
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | description: BOSH is an open source tool for release engineering, deployment, lifecycle management, and monitoring of distributed systems. This guide will place you in the middle of daily life with BOSH and gradually guide you toward understanding, and then deploying your own systems, and then through to deep understanding. You'll become a raving fan.
2 | image_path: /images/bookcover/book-cover-being-read.png
3 |
4 | # Ultimate Guide to BOSH
5 |
6 | [BOSH](https://bosh.io) is an open source tool for release engineering, deployment, lifecycle management, and monitoring of distributed systems.
7 |
8 | It's incredible. Huge companies are using it. Tiny companies are using it. You too could be using it.
9 |
10 | This is the [Ultimate Guide to BOSH](https://ultimateguidetobosh.com).
11 |
12 | 
13 |
14 | It will place you in the middle of daily life with BOSH and gradually guide you toward understanding, and then deploying your own systems, and then through to deep understanding. You'll become a raving fan.
15 |
16 |
Outstanding work @drnic ! I'll even forgive you for your IPv6 lines, since you'll be seeing a lot more v6 in BOSH if I have my way 😉 https://t.co/HnniySvSiM
17 |
18 |
19 | The guide is currently hosted at https://ultimateguidetobosh.com/. Each section finishes with a Next link to the next section. Press 'f' to activate search dropdown.
20 |
21 | The guide is written by Dr Nic Williams: a long-time user and contributor to BOSH, a member of the Cloud Foundry and BOSH communities, [prolific contributor](https://github.com/drnic) to open source projects, the previous CEO of leading Cloud Foundry BOSH consultancy [Stark & Wayne](https://www.starkandwayne.com), a [user of Twitter](https://twitter.com/drnic), and purveyor of humour, sarcasm, and Australianisms.
22 |
--------------------------------------------------------------------------------
/docs/introduction.md:
--------------------------------------------------------------------------------
1 | # Introduction
2 |
3 | ## WIP
4 |
5 | I recently started writing this. If you're reading this guide now, please let me know (I'll actively ask you to review bits as I write them) and please "Watch" this repo. Perhaps I can update it via Github Releases so you can get notifications of new sections or updates. Or better jokes.
6 |
7 | ## Guide to the Guide
8 |
9 | This Ultimate Guide to BOSH is to be read linearly. Each section will build upon the preceding sections.
10 |
11 | The guide uses sample commands and videos to show you real systems instead of expecting that you can deploy systems yourself on day 1.
12 |
13 | Later in the guide you will deploy your own BOSH and use it to deploy systems. At that point, you will install the `bosh` command-line tool, and you will need to decide which target cloud infrastructure you will use.
14 |
15 | If you'd like to share a section with someone, wave your mouse cursor over the heading and a permanent link icon will appear. You can click it or copy it. It is my invisible gift to you so you can give to others.
16 |
17 | If you'd like to fix some spelling, some grammar, or help out with the guide in some way, there is an "edit" button at the top right of each page. This will link you to the Github project page and let you submit a pull request. Good things happen to people who submit pull requests.
18 |
19 | Each text block has a copy-and-paste icon. Click it to copy the text block into your clipboard.
20 |
21 | ## Joyful Operations
22 |
23 | You're a professional. You're resourceful. You're a king maker. You keep your organisation in the business of winning.
24 |
25 | In past lives you might have been called a developer, a sysadmin, or a devops engineer.
26 |
27 | You're always on the lookout for better tools, better mental models, and better systems.
28 |
29 | I'm going to show you how I do some day-to-day activities using BOSH. You get to decide if you'd like to level up your superhero status and learn how to do this too. Learning is involved. Effort. New tools. New ecosystem. I definitely think it's worth it. Let me know what you decide!
30 |
31 | Deploy a 5-node cluster of Zookeeper to Amazon AWS:
32 |
33 | ```
34 | git clone https://github.com/cppforlife/zookeeper-release
35 | export BOSH_ENVIRONMENT=aws
36 | export BOSH_DEPLOYMENT=zookeeper
37 | bosh deploy zookeeper-release/manifests/zookeeper.yml
38 | ```
39 |
40 |
41 | Sanity check that the ZooKeeper cluster is working:
42 |
43 | ```
44 | bosh run-errand smoke-tests
45 | ```
46 |
47 | Check the status of each node in the ZooKeeper cluster:
48 |
49 | ```
50 | bosh run-errand status
51 | ```
52 |
53 | Upgrade to new version of ZooKeeper:
54 |
55 | ```
56 | git pull
57 | bosh deploy zookeeper-release/manifests/zookeeper.yml
58 | ```
59 |
60 | Upgrade the base operating system to push out critical security patches:
61 |
62 | ```
63 | bosh upload-stemcell https://bosh.io/d/stemcells/bosh-aws-xen-hvm-ubuntu-trusty-go_agent
64 | bosh deploy zookeeper-release/manifests/zookeeper.yml
65 | ```
66 |
67 | If AWS deletes one of your VMs, heal your cluster by recreating a new VM, reattach the persistent disk, remount it, and restart all the processes to join the ZooKeeper node into the cluster:
68 |
69 | ```
70 | # do nothing, this resurrection will happen automatically
71 | ```
72 |
73 | List the health of each ZooKeeper server, including disks:
74 |
75 | ```
76 | bosh instances --vitals
77 | ```
78 |
79 | SSH into one of the ZooKeeper servers to check on something:
80 |
81 | ```
82 | bosh ssh zookeeper/0
83 | ```
84 |
85 | Run a command on each ZooKeeper server and display results:
86 |
87 | ```
88 | bosh ssh -c '/var/vcap/jobs/zookeeper/bin/ctl status' -r
89 | ```
90 |
91 | Tear down your ZooKeeper cluster, but retain the persistent disks for five days and then delete them:
92 |
93 | ```
94 | bosh delete-deployment
95 | ```
96 |
97 | ## What is BOSH?
98 |
99 | BOSH is a project of the Cloud Foundry Foundation. It was originally created to help the developers of Cloud Foundry to absolutely describe and test each commit and each release; and to help the site reliability engineers (SREs) tasked with running Cloud Foundry as a service.
100 |
101 | Cloud Foundry has a micro-services architecture - bespoke applications written in Ruby, Java, C, and Golang - combined with stateful data services such as PostgreSQL, Redis, and local disks for storing user-uploaded application code. The developers wanted to work with the SREs to reduce the time of upgrades to new releases, to reduce the time between new releases, to reduce the time to deploy security fixes, and to help SREs and developers communicate about issues in production.
102 |
103 | TODO twitter joke about laptop going into production https://twitter.com/oising/status/298464920717099009
104 |
105 | The solution required addressing the following challenges:
106 |
107 | * to have an absolute declaration of what specific versions of all bespoke and upstream projects combined together to form a "release";
108 | * to own responsibility for the lifecycle of the underlying infrastructure upon which Cloud Foundry would run, including healing activities after infrastructure failures;
109 | * to own responsibility for pushing out security patches to the base operating systems, the bespoke code, and the upstream dependencies;
110 | * to give developers and SREs the same tool to prevent "it works on my machine" scenarios.
111 |
112 | The "tool" that implemented this solution is a running server - a BOSH environment - which:
113 |
114 | * receives requests from operators, who primarily use the `bosh` CLI;
115 | * interacts with cloud infrastructures to provision and de-provision cloud servers and disks;
116 | * interacts with running servers to configure and monitor long-running processes;
117 | * monitors the health of cloud servers and performs remedial actions to recreate or fix any missing infrastructure
118 |
119 | Today, small teams and large businesses are using BOSH to run a wide variety of systems including, but not limited to, platforms such as Cloud Foundry, Kubernetes, DC/OS, Docker, Habitat, and Nomad. It is used to run database clusters. It can run source control systems. It can run web applications.
120 |
121 | Some teams use it only for its provisioning/infrastructure lifecycles features, and use their own packaging, container, and configuration management tools.
122 |
123 | Some teams put their BOSH environment behind an API, such as the Open Service Broker API, and dynamically provision and de-provision entire systems on demand. For example, [Pivotal Container Services](https://pivotal.io/platform/pivotal-container-service) is an API driven system to deploy entire Kubernetes clusters, all using BOSH.
124 |
125 | ## Help Users Run Long-Term Systems
126 |
127 | The publisher of software ultimately cares about their users **running** their software. At first their users will trial the software: to feel it work, to believe that the software might solve the user's problems, and to get a feel for how to run the software in production. That's Day 1. Day 2 is every day onwards.
128 |
129 | Any system that runs for a few years will go through the following events:
130 |
131 | * **Upgrades to primary software.** For example, upgrading to new Apache ZooKeeper versions within a running cluster of ZooKeeper.
132 | * **Upgrades to secondary software dependencies.** For example, ZooKeeper runs upon Java so a ZooKeeper cluster will need to upgrade to new Java versions. We will need to perform dependency upgrades to, at the very least, push out critical security fixes. We might always want to use newer dependencies if they improve performance, reduce CPU or RAM usage, or have additional features. We might also need to upgrade dependencies if our primary software ceases to support ageing dependencies.
133 | * **Upgrades to operating system kernels and core software.** If your system is running upon an Ubuntu host machine and/or an Ubuntu container image, then you will need to upgrade or replace the base host machine and/or the containers as soon as possible after the security notices are published. For example, see [Ubuntu Security Notices](https://usn.ubuntu.com/usn/). Security vulnerabilities are continually discovered within all layers of each operating system distribution and pushed out throughout the year.
134 | * **Resizing of the infrastructure.** As a system becomes increasingly used by end users, or as it acquires more data, it will need to grow. Persistent disks will need to be enlarged. Host machines will need to be replaced by larger machines. Container constraints may need to be increased.
135 | * **Healing of the infrastructure.** Host machines disappear. Physical disks can corrupt. Your users' systems will need to heal.
136 | * **Debugging of the software and its dependencies.** The brilliance of self-hosting your own software as a service is that only your team needs to debug your software. Conversely, when you distribute your software to other operations teams, it will be those teams that need to debug the entire system. They will want to be able to perform remediation if they can. If not, next they will want to help the software publisher with debugging and resolution. They want their system to be running and healthy. They want their own end users to be happy.
137 | * **Monitoring of the software and its dependencies.** End users of running systems don't enjoy being the first and only line of notification of downtime. They want to know how they can monitor the running software and its dependencies, they want to know what to look for to actively detect imminent misbehaviour, and they definitely want to know if the system is currently failing.
138 | * **Backup/recovery of data.** If the operators of your software discover that they need to restore data from archives, how will they do that? How much data will be lost? How bad is this situation for the end users of the system? How bad is the situation for all stakeholders? To put it another way, if the operators of your software lose data, what apology email will they be sending out to their own users? What letter will their chairman be sending out to shareholders?
139 |
140 | Your software will need to support each of these lifecycle events. It is even better if you help your end users to make great choices about how they run your software in production.
141 |
142 | {==
143 |
144 | If you author your software and its BOSH release in parallel, you will be giving your end users the ability to run a system, rather than just some software to install.
145 |
146 | ==}
147 |
148 | Publishing a BOSH release is like taking your users on a packaged, guided tour of ancient Roman ruins.
149 |
150 | Shipping them installable software is like leaving a Post-It Note on their fridge, "We went to Rome. You should go some day."
151 |
152 | Publishing a BOSH release shows that you care about your users' long-term success in running the system.
153 |
154 | Shipping installable software is not much better than saying "Works on my machine."
155 |
156 | ## About the Author
157 |
158 | I did not invent BOSH. A group of engineers from the Cloud Foundry project at VMWare conceived of BOSH and built the first iteration of BOSH. BOSH is architecturally still very similar to its original public incarnation in 2012.
159 |
160 | Nor have I ever worked on BOSH as a full-time job. The BOSH project is sponsored heavily by full-time employees from Pivotal, as well as IBM, and other Cloud Foundry Foundation members. Your joy and happiness from BOSH is in huge thanks to VMWare, Pivotal, IBM, Stark & Wayne and the vast community of contributors who have contributed in large and small ways to make it fantastic.
161 |
162 | My initial role in the history of BOSH was small - I was one of the first people outside VMWare to be publicly excited about BOSH. My relationship with BOSH and its community has snowballed ever since.
163 |
164 | In the years before BOSH, I was the VP of Technology at Engine Yard. Engine Yard was a "devops as a service" or "platform as a service" company. Or more crassly, it was a web hosting company.
165 |
166 | Several years earlier, the Engine Yard platform had been hastily prototyped on the new [Amazon Web Services](https://en.m.wikipedia.org/wiki/Amazon_Web_Services) (AWS) platform using an also-new configuration management tool called Chef. The Engine Yard platform worked and many customers' entire business run successfully upon it to this day. The internal cost of allowing customers to continually run their applications - even if they didn't want to upgrade and maintain their code bases - was tremendous. Upgrading the Engine Yard platform was difficult. The surface area of the implicit contract we made with customers - the base operating system, available packages, the version of Chef - was vast. Every change we wanted to make needed to be considered from the perspective of 2000 different web applications, run by 2000 different development teams, running 2000 different businesses.
167 |
168 | Some of our internal systems were not as automated as others. We did not have a nice way to publish new AMIs to AWS. Since we didn't publish new AMIs, we also didn't initially have a way to share them with customers. We had spent a year preparing, curating, and releasing the second-ever edition of our base AMI, when I saw BOSH for the first time. The demonstration I was shown was incredible - a small change was made to a YAML file, they run `bosh deploy`, and in the vCenter window it could be seen that all the VMs were being progressively destroyed and replaced by new ones built upon a new base machine image, with newly compiled packages for that machine image. It was also replacing VMs with bigger ones. And it was resizing the persistent disks for the databases. BOSH was incredible.
169 |
170 | ## Brief History of BOSH
171 |
172 | I was fortunate to see that demo. I had been invited to the VMWare campus in Palo Alto, CA, on April 11, 2012, for the unveiling of "The Outer Shell" that VMWare internally had been using to deploy Cloud Foundry. The Outer Shell was called BOSH. This is an acronym for "BOSH Outer Shell." All engineers know that recursion is funny.
173 |
174 | I invited myself to the VMware campus for two days to meet the developers of BOSH and Cloud Foundry and came away fascinated by the vast scope of problems that the BOSH team was trying to solve. The BOSH team were kind enough to either answer a question or fix BOSH so the question was void.
175 |
176 | In 2012, I was very publicly excited about BOSH on Twitter ([@drnic](https://twitter.com/drnic)) and I gave presentations at meetups and conferences about BOSH. I also began creating new open source projects to help myself and others to use BOSH and to create new BOSH releases. I also created the first [BOSH Getting Started](https://github.com/cloudfoundry-community-attic/LEGACY-bosh-getting-started) guide.
177 |
178 | Many of those early presentations and guides were helpful to the many people who've discovered BOSH and are using it to run production systems at huge scales.
179 |
180 | In 2013, the BOSH project was taken over by Pivotal engineering and has been gifted to the Cloud Foundry Foundation to secure its long term success as an open source, open community project. Thanks to Pivotal, IBM, and other members of the Cloud Foundry Foundation, the BOSH project has been receiving huge consistent investment to this day.
181 |
182 | There are many people in the history of BOSH who have directly made BOSH what it is, actively sponsored its investment, or evangelised it.
183 |
184 | * James Watters, SVP Product at Pivotal, has been the loudest cheerleader of BOSH on the planet.
185 | * Ferran Rodenas, has been my BOSH friend since 2012 and who created some of the greatest BOSH community contributions, including [docker-boshrelease](https://github.com/cloudfoundry-community/docker-boshrelease).
186 | * James Bayer, VP Project Management at Pivotal, was the creator of the Clam logo for BOSH. I love the clam.
187 | * Dmitriy Kalinin, Product Manager for BOSH, has been driving his incredible vision for BOSH.
188 | * Original BOSH team at VMware - Mark Lucovsky, Vadim Spivak, Oleg Shaldybin, Martin Englund - who had the original vision and execution to create the ultimate tool for release engineering, deployment, lifecycle management, and monitoring of distributed systems.
189 | * Stark & Wayne staff - I've been ever so lucky to have started an organisation that has attracted so many wonderful team members around the world, who've gone on to continuously expand the BOSH ecosystem with releases, tools, and 200+ blog posts.
190 |
191 | ## BOSH in Production
192 |
193 | BOSH is the core technology to Pivotal Ops Manager and its Pivotal Network delivery system for complex on-premise software systems. BOSH is the deployment technology used behind the scenes for [Pivotal Web Services](https://run.pivotal.io) which runs upon AWS.
194 |
195 | BOSH is the deployment technology used behind the scenes by [IBM Cloud](https://www.ibm.com/cloud/).
196 |
197 | BOSH is the deployment technology used by GE [Predix](https://www.predix.io/) which runs on various clouds and data centres.
198 |
199 | BOSH is the deployment technology used by Swisscom [appCloud](https://developer.swisscom.com/) which runs inside Swisscom data centres.
200 |
201 | These are huge companies who have small teams running huge production systems using BOSH.
202 |
203 | On the smaller end - our consultancy [Stark & Wayne](https://www.starkandwayne.com) - uses BOSH to run a variety of our internal systems across vSphere, AWS and Google Compute. (The rest of our systems run upon Cloud Foundry itself, such as https://www.starkandwayne.com and https://www.starkandwayne.com/blog).
204 |
205 | ## Why Write the Ultimate Guide to BOSH?
206 |
207 | I have been using BOSH since 2012. I have yet to find any system like BOSH, despite the fast paced and highly competitive fields of Platforms, Containers, DevOps, and Cloud Infrastructure.
208 |
209 | I didn't want to write a book. I had written a PhD thesis in 2001 and six people read it (two supervisors, three judges, and myself). Fortunately, this is enough people to be awarded a doctorate. Unfortunately, it doesn't really qualify as "sharing knowledge."
210 |
211 | I wrote my first blog post in 2006, and, once I quickly reached seven subscribers, I knew I'd found my preferred medium for sharing.
212 |
213 | I enjoy the continuous publication and feedback loop of blogging, and of sharing open source projects. I enjoy a relaxed writing style.
214 |
215 | I didn't want to write a "book" book. Not a boring book. Not a reference book.
216 |
217 | I want to share all the wonders of BOSH with you. I want you to use BOSH. I want you to feel great using BOSH. I want you to feel like a superhero. I want you to convince your friends and colleagues to use BOSH. I want you to help me evangelise BOSH.
218 |
219 | I want this to be a book that you enjoy reading. I hope you enjoy my manner of introducing each topic, and how the topics segue as we gradually introduce more complex topics. I hope you enjoy my sense of humour.
220 |
221 | I also want you to switch to Queen's English, to visit me in Australia, and to use the Oxford comma.
222 |
223 | ## Additional Sources of Information
224 |
225 | In addition to this Ultimate Guide to BOSH, there are some other sources of factual knowledge and tutorials.
226 |
227 | [BOSH documentation website](https://bosh.io/docs/) is very thorough and new features of BOSH now regularly appear simultaneously in this documentation site.
228 |
229 | Maria Shaldibina's [A Guide to Using BOSH](https://mariash.github.io/learn-bosh/) is a short step-by-step tutorial that includes provisioning BOSH within a local VirtualBox machine and the common commands for deploying systems.
230 |
231 | Duncan Winn's [Cloud Foundry: The Definitive Guide](http://shop.oreilly.com/product/0636920042501.do) includes four chapters introducing BOSH.
232 |
233 | Stark & Wayne's own blog (["bosh" tag](https://www.starkandwayne.com/blog/tag/bosh/)) has over fifty articles, tutorials, and tiny tips on BOSH.
234 |
235 | BOSH is an open source project. You can read the source code and learn how it works. A selection of repositories include:
236 |
237 | * https://github.com/cloudfoundry/bosh-cli - the `bosh` CLI
238 | * https://github.com/cloudfoundry/bosh - the BOSH director
239 | * https://github.com/cloudfoundry/bosh-agent - the BOSH agent
240 | * https://github.com/cloudfoundry/bosh-deployment - manifests for various permutations of deploying your own BOSH director
241 |
--------------------------------------------------------------------------------
/docs/print.css:
--------------------------------------------------------------------------------
1 | @media print {
2 | .md-main__inner.md-grid {
3 | padding-top: 0;
4 | }
5 | body > .md-container {
6 | padding-top: 0;
7 | }
8 | .md-content__inner {
9 | margin: 0;
10 | padding-top: 0;
11 | }
12 | .md-typeset {
13 | line-height: normal;
14 | }
15 | .md-typeset ol li, .md-typeset ul li {
16 | margin-bottom: 0;
17 | }
18 | .md-typeset h2 {
19 | margin: 2rem 0 0;
20 | }
21 | .md-typeset h1 {
22 | margin: 0;
23 | }
24 | .md-typeset blockquote, .md-typeset ol, .md-typeset p, .md-typeset ul {
25 | margin: 1ex 0;
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/docs/properties.md:
--------------------------------------------------------------------------------
1 | # Properties
2 |
3 | Most software allows for configuration by end users/operators. The method of configuration differs between software, such as environment variables, configuration files, command-line arguments, or runtime commands. A BOSH job template is a wrapper around the running of software and its method of configuration. All BOSH job templates are then configured in a homogeneous way: the deployment manifest provided to `bosh deploy`.
4 |
5 | You were first introduced to configuring a job template in [Update Job Template Properties](/deployment-updates/#update-job-template-properties). We were able to provide a `max_client_connections` property to the `zookeeper` job template from the `zookeeper` BOSH release:
6 |
7 | ```yaml hl_lines="7 8"
8 | instance_groups:
9 | - name: zookeeper
10 | instances: 5
11 | jobs:
12 | - name: zookeeper
13 | release: zookeeper
14 | properties:
15 | max_client_connections: 120
16 | ```
17 |
18 | This raises three excellent questions:
19 |
20 | * What other properties are available to the `zookeeper` job template from the `zookeeper` BOSH release?
21 | * What is the default value of `max_client_connections` if we had not explicitly provided it?
22 | * What does each property do?
23 |
24 | ## Available Properties
25 |
26 | The definition of available properties for a job template is in its source repository. Each job template has a `spec` file that documents the `properties` that its templates can use.
27 |
28 | For https://github.com/cppforlife/zookeeper-release we look in `jobs/zookeeper/spec`:
29 |
30 | ```
31 | git clone https://github.com/cppforlife/zookeeper-release
32 | cd zookeeper-release
33 | cat jobs/zookeeper/spec
34 | ```
35 |
36 | A sample of `properties` are:
37 |
38 | ```yaml hl_lines="15 16 17"
39 | properties:
40 | listen_address:
41 | description: "The address to listen for client connections"
42 | default: "0.0.0.0"
43 | client_port:
44 | description: "The port to listen for client connections"
45 | default: 2181
46 | quorum_port:
47 | description: "Apache Zookeeper Client quorum port"
48 | default: 2888
49 | leader_election_port:
50 | description: "Apache Zookeeper Client leader election port"
51 | default: 3888
52 | ...
53 | max_client_connections:
54 | description: "Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble"
55 | default: 60
56 | ...
57 | ```
58 |
59 | Each property should include a helpful description about the purpose of the property, and if the property is required or optional. It might also include a default value.
60 |
61 | ## Default Property Values
62 |
63 | The deployment manifest property `max_client_connections` is one of `properties` described in the BOSH release source repository above. It has a default value. This means that a deployment manifest does not need to explicitly provide the `max_client_connections` property.
64 |
65 | Default values allow deployment manifests to be smaller, simpler, and easier to read. The original `spec` file above has over twenty properties each with useful default values. A deployment manifest would be twenty lines longer if it were to explicitly provide each property in the deployment manifest:
66 |
67 | ```yaml hl_lines="8 9 10 11 12"
68 | instance_groups:
69 | - name: zookeeper
70 | instances: 5
71 | jobs:
72 | - name: zookeeper
73 | release: zookeeper
74 | properties:
75 | listen_address: 0.0.0.0
76 | client_port: 2181
77 | quorum_port: 2888
78 | leader_election_port: 3888
79 | max_client_connections: 60
80 | ...
81 | ```
82 |
83 | ### Be Wary of Changing Default Values
84 |
85 | A potential downside of default property values is that they may change between BOSH release versions. If the previous release version had default `max_client_connections: 120`, but was changed to `max_client_connections: 5` in the latest version, then our system's behaviour and performance might be negatively affected if we do not realise and explicit override the property to a higher value.
86 |
87 | Changes to default property values are not displayed when running `bosh deploy`.
88 |
89 | Ideally, the BOSH release developers will discuss any changes to property default values in their release notes. And that you will read those release notes.
90 |
91 | You can also use `git diff` to inspect any changes to `spec` files between versions. Try either of the following to view changes to one or all `spec` files, respectively:
92 |
93 | ```
94 | git diff v0.0.3..v0.0.6 -- jobs/zookeeper/spec
95 | git diff v0.0.3..v0.0.6 -- jobs/*/spec
96 | ```
97 |
98 | We can see that two properties from `zookeeper` have been removed, one new property added, ==and fortunately no properties have had their default values changed==:
99 |
100 | ```
101 | - user:
102 | - description: "User which will own the Apache ZooKeeper services"
103 | - default: "zookeeper"
104 | - group:
105 | - description: "Group which will own the Apache ZooKeeper services"
106 | - default: "vcap"
107 | ...
108 | + force_sync:
109 | + description: "Requires updates to be synced to media of the transaction log before finishing processing the update. Setting to 'no' improves performance dramatically at the cost of losing recent commits if all nodes crash at the same time"
110 | + default: "yes"
111 | ```
112 |
113 | ## What Does Each Property Do?
114 |
115 | Obviously, a property declared in a `spec` file is only useful if it is used in a template to ultimately setup/change/control the behaviour of processes that are run on BOSH instances.
116 |
117 | To use a property within a template we use the Ruby programming language [ERb template system](https://en.wikipedia.org/wiki/ERuby), and the `p` helper:
118 |
119 | ```
120 | <%= p('some-property') %>
121 | ```
122 |
123 | During `bosh deploy` this entire snippet will be replaced by the `properties.some-property` value from the deployment manifest, or the property's default value in the `spec` file.
124 |
125 | If a configuration item is optional then a template can use the `if_p` helper:
126 |
127 | ```
128 | <% if_p('some-property') do |value| %>
129 | optional-config-item: <%= value %>
130 | <% end %>
131 | ```
132 |
133 | We will revisit the Ruby ERb template syntax later when we look at creating our own BOSH releases.
134 |
135 | ## Discovery of Properties in Templates
136 |
137 | deployment manifest -> job template <- upstream configuration files/documentation
138 |
139 | It is ultimately a great idea to read through the entire job template and all its template files to fully understand how each property is used.
140 |
141 | For the `zookeeper` job template, the template file [`jobs/zookeeper/templates/zoo.cfg.erb`](https://github.com/cppforlife/zookeeper-release/blob/6f073fdbbf411babbde11085abb7f43cced8b8d3/jobs/zookeeper/templates/zoo.cfg.erb) `spec` properties are being used:
142 |
143 | ```
144 | ...
145 | autopurge.purgeInterval=<%= p('autopurge_purge_interval') %>
146 | autopurge.snapRetainCount=<%= p('autopurge_snap_retain_count') %>
147 | clientPortAddress=<%= p('listen_address') %>
148 | ...
149 | maxClientCnxns=<%= p('max_client_connections') %>
150 | ...
151 | ```
152 |
--------------------------------------------------------------------------------
/docs/service-discovery.md:
--------------------------------------------------------------------------------
1 | # Service Discovery
2 |
3 | Service discovery is a mechanism for applications to discover and connect to other sub-systems. For example, a ZooKeeper node needs to be able to discover and connect to the other members of its cluster; an application to discover and connect to its database; and a mobile or internet-of-things application to discover and connect to its backend services.
4 |
5 | Systems are deployed by BOSH may either want to discover and connect to other systems; or conversely they may want to advertise and be available for connection by clients.
6 |
7 | The primary roles of a BOSH environment is to manage the lifecycle of the cloud infrastructure, and the lifecycle of the software running upon it. However, BOSH job templates do have some facilities for service discovery with other job templates, and there are some popular options outside of BOSH for service discovery as well.
8 |
9 | BOSH offers three facilities to aide service discovery:
10 |
11 | * Static or elastic IP allocation
12 | * BOSH DNS
13 | * BOSH Links
14 |
15 | There are also many other additional systems that you could use, or even run them with BOSH, to facilitate service discovery between applications. For example:
16 |
17 | * Apache ZooKeeper, Hashicorp Consul, CoreOS etcd
18 | * Cloud Foundry Routing
19 | * Chef Habitat
20 | * Open Service Broker API
21 | * Apcera NATS, RabbitMQ
22 |
23 | ## Static or elastic IP allocation
24 |
25 | The least complex method for a client application to discover and connect with a server backend is to explicitly configure the client application in advance. The least complex method of this least complex method is to provide client applications with IP addresses that are promised to always point to the backend service.
26 |
27 | A BOSH deployment can do this with either [static IPs](/networking/#manual-static-addresses) or with [virtual/elastic IPs](/networking/#virtual-ip-addresses).
28 |
29 | An example of the former is to add a list of `static_ips` to an instance group's `networks`:
30 |
31 | ```yaml hl_lines="6 7 8 9 10 11"
32 | instance_groups:
33 | - name: zookeeper
34 | instances: 5
35 | networks:
36 | - name: default
37 | static_ips:
38 | - 10.0.0.220
39 | - 10.0.0.221
40 | - 10.0.0.222
41 | - 10.0.0.223
42 | - 10.0.0.224
43 | ```
44 |
45 | In this scenario, you would commit in advance to using the five IP addresses. You would write them down on two identical Post-It Notes. You'd give one Post-It Note to the team deploying ZooKeeper as above, and you'd give the other Post-It Note to the client team that wants to connect to the future ZooKeeper cluster. Instead of Post-It Notes you might send an email. But my guess is you've already ordered Post-It Notes from Amazon.com earlier in the paragraph when I first mentioned them.
46 |
47 | ## BOSH DNS
48 |
49 | Static IP addresses might be the least complex method - you buy some Post-It Notes, you pick some IP addresses - but what if you're a normal human being and have no ambitions to keep track of what systems have been allocated what IP addresses. Or you don't have Post-It Notes.
50 |
51 | Your BOSH environment can have the ability to automatically advertise all deployments' instances as DNS hostnames within all other BOSH instances. This means that any deployment can know in advance what its hostnames will be, without knowing in advance the IP addresses allocated or using static IPs.
52 |
53 | First, confirm that your BOSH environment has this BOSH DNS feature enabled. Run `bosh env` to view your environment's attributes:
54 |
55 | ``` hl_lines="10"
56 | > bosh env
57 | Using environment '192.168.50.6' as user 'admin' (openid, bosh.admin)
58 |
59 | Name Bosh Lite Director
60 | UUID d855fe91-a9cb-43be-b977-f44eea870775
61 | Version 263.2.0 (00000000)
62 | CPI warden_cpi
63 | Features compiled_package_cache: disabled
64 | config_server: disabled
65 | dns: enabled
66 | snapshots: disabled
67 | User admin
68 | ```
69 |
70 | TODO: why is `dns: disabled` appearing on my `-o local_dns.yml` bosh-lite?
71 |
72 | TODO: what about GCP?
73 |
74 | TODO: finish section
75 |
76 | ## BOSH Links
77 |
78 | The final service discovery option provided by your BOSH environment is called "links". At the time a job template is being installed into an instance, it is provided with information about other job templates within the same deployment, and possibly other deployments.
79 |
80 | BOSH links is the method by which the five instances in our `zookeeper` deployment can find each other and form a cluster.
81 |
--------------------------------------------------------------------------------
/docs/spread-the-word/meetups.md:
--------------------------------------------------------------------------------
1 | # Spread the word at meetups
2 |
3 | All local meetups would love to have an introduction to BOSH. Programming language meetups, devops, Docker, CI/CD etc. I will be your number one fan when you share BOSH at a local meetup. Please let me know how it goes!
4 |
5 | 
6 |
7 | If you have put your slide or a recording of your talk online, please update this page with a link.
8 |
9 | ---
10 |
11 | [Michael Richardson](https://twitter.com/m_richo) - [Persistence in the Cloud with BOSH](https://www.slideshare.net/m_richardson/persistence-in-the-cloud-with-bosh)
12 | ([Sydney DevOps Meetup](https://www.meetup.com/devops-sydney/events/235494979/) - 16 Feb 2017)
13 |
--------------------------------------------------------------------------------
/docs/stemcells.md:
--------------------------------------------------------------------------------
1 | # Stemcells
2 |
3 | Cloud infrastructures require a pre-existing virtual filesystem, also called a machine image, to provision new cloud servers. For example, to provision an AWS EC2 server you will need an AWS AMI.
4 |
5 | The BOSH director expects that the cloud servers it provisions will behave in a certain way when it wants to interact with them. For example, the BOSH director expects that each cloud server will have a BOSH Agent installed and running. We will introduce the BOSH Agent below.
6 |
7 | Towards this dual requirement - a preexisting machine image, which is pre-populated with the BOSH Agent and other software and configuration - that we now introduce BOSH Stemcells.
8 |
9 | ## Stemcells in Deployment Manifests
10 |
11 | My continuing objective with the Ultimate Guide to BOSH is that you feel good as you are reading each section in sequence. Towards this goal, the abridged deployment manifests and `cloud-config` examples have omitted sections that are actually required by the BOSH director. We now introduce the top-level `stemcells` attribute to our deployment manifests, and the `stemcell` attribute for each instance group.
12 |
13 | ```yaml
14 | name: zookeeper
15 |
16 | releases:
17 | - name: zookeeper
18 | version: 0.0.7
19 | url: git+https://github.com/cppforlife/zookeeper-release
20 |
21 | stemcells:
22 | - alias: ubuntu
23 | os: ubuntu-trusty
24 | version: latest
25 |
26 | instance_groups:
27 | - name: zookeeper
28 | instances: 5
29 | stemcell: ubuntu
30 |
31 | - name: smoke-tests
32 | lifecycle: errand
33 | instances: 1
34 | stemcell: ubuntu
35 | ```
36 |
37 | Let's look at the `stemcells` section:
38 |
39 | ```yaml
40 | stemcells:
41 | - alias: ubuntu
42 | os: ubuntu-trusty
43 | version: latest
44 | ```
45 |
46 | Although each BOSH release will have an implicit preference for a stemcell (most BOSH releases are developed/tested/deployed against Ubuntu stemcells), there is no metadata or contract within a BOSH release to help `bosh deploy` fail fast or fail with helpful error messages if you use the wrong stemcell.
47 |
48 | In the case of the `zookeeper` deployment manifest, the selection of an `os: ubuntu-trusty` stemcell can be discovered from the project's own [sample deployment manifest](https://github.com/cppforlife/zookeeper-release/blob/6f073fdbbf411babbde11085abb7f43cced8b8d3/manifests/zookeeper.yml#L9-L12). Good BOSH releases or deployment projects will provide sample BOSH deployment manifests.
49 |
50 | The selection of `os: ubuntu-trusty` means that the BOSH director must already have an `ubuntu-trusty` stemcell preloaded before running `bosh deploy`. At the time of writing there is no facilities in the BOSH CLI nor BOSH director to automatically discover, download, and install the required stemcell for a deployment manifest.
51 |
52 | The `version: latest` means that the deployment will use the latest available stemcell that has been uploaded to the BOSH director.
53 |
54 | To discover the available stemcells in your BOSH director:
55 |
56 | ```
57 | bosh stemcells
58 | ```
59 |
60 | An example output for a BOSH director with the Google CPI might be:
61 |
62 | ```
63 | Name Version OS CPI CID
64 | bosh-google-kvm-ubuntu-trusty-go_agent 3445.11 ubuntu-trusty - stemcell-04231868...
65 | ~ 3421.11 ubuntu-trusty - stemcell-61295c90...
66 | bosh-google-kvm-windows2012R2-go_agent 1200.5.0-build.1 windows2012R2 - ...packer-1499974558
67 | ```
68 |
69 | In this example we can see two `ubuntu-trusty` stemcells with the latest version `3445.11` available for deployments. We can also see a `windows2012R2` stemcell has been uploaded and is available for deployments that include BOSH releases targeting Windows.
70 |
71 | The `alias: ubuntu` attribute gives the `os` and `version` combination a name that we can now use within `instance_groups`. From our example manifest above, we added `stemcell: ubuntu` to each instance group:
72 |
73 | ```yaml
74 | instance_groups:
75 | - name: zookeeper
76 | instances: 5
77 | stemcell: ubuntu
78 |
79 | - name: smoke-tests
80 | lifecycle: errand
81 | instances: 1
82 | stemcell: ubuntu
83 | ```
84 |
85 | ## Updating Stemcells
86 |
87 | Anytime that `bosh deploy` is run, `version: latest` will be adjusted to any newer stemcells that have been uploaded to the BOSH director. The BOSH director will display this proposed update before commencing the deployment.
88 |
89 | In this example, we have deployed a small (2 instance) zookeeper deployment on AWS using version 3541.9 of the Ubuntu Trusty AWS Xen HVM stemcell.
90 |
91 | ```
92 | $ bosh stemcells
93 | Name Version OS CPI CID
94 | bosh-aws-xen-hvm-ubuntu-trusty-go_agent 3541.9* ubuntu-trusty - ami-3207964a light
95 |
96 | $ bosh deployments
97 | Name Release(s) Stemcell(s) Team(s) Cloud Config
98 | zookeeper zookeeper/0.0.7 bosh-aws-xen-hvm-ubuntu-trusty-go_agent/3541.9 - latest
99 | ```
100 |
101 | However since this stemcell was uploaded, a new version, 3541.10 has been released and so now we will update our stemcell with the new version.
102 |
103 | ```
104 | bosh upload-stemcell https://bosh.io/d/stemcells/bosh-aws-xen-hvm-ubuntu-trusty-go_agent
105 |
106 | Task 102
107 |
108 | Task 102 | 15:37:21 | Update stemcell: Downloading remote stemcell (00:00:01)
109 | Task 102 | 15:37:22 | Update stemcell: Extracting stemcell archive (00:00:00)
110 | Task 102 | 15:37:22 | Update stemcell: Verifying stemcell manifest (00:00:00)
111 | Task 102 | 15:37:28 | Update stemcell: Checking if this stemcell already exists (00:00:00)
112 | Task 102 | 15:37:28 | Update stemcell: Uploading stemcell bosh-aws-xen-hvm-ubuntu-trusty-go_agent/3541.10 to the cloud (00:00:08)
113 | Task 102 | 15:37:36 | Update stemcell: Save stemcell bosh-aws-xen-hvm-ubuntu-trusty-go_agent/3541.10 (ami-3fb42c47 light) (00:00:00)
114 |
115 | Task 102 Started Thu Mar 29 15:37:21 UTC 2018
116 | Task 102 Finished Thu Mar 29 15:37:36 UTC 2018
117 | Task 102 Duration 00:00:15
118 | Task 102 done
119 |
120 | Succeeded
121 |
122 | $ bosh stemcells
123 | Name Version OS CPI CID
124 | bosh-aws-xen-hvm-ubuntu-trusty-go_agent 3541.10 ubuntu-trusty - ami-3fb42c47 light
125 | ~ 3541.9* ubuntu-trusty - ami-3207964a light
126 | ```
127 |
128 | On the next deployment of zookeeper - it will be rebuilt on the newer version of Ubuntu Trusty, 3541.10. This will require a re-compile of the application and then the underlying VMs will be re-created once the new version is compiled.
129 |
130 | ```
131 | $ bosh deploy -d zookeeper manifests/zookeeper.yml
132 |
133 | Using deployment 'zookeeper'
134 |
135 | Release 'zookeeper/0.0.7' already exists.
136 |
137 | stemcells:
138 | + - alias: default
139 | + os: ubuntu-trusty
140 | + version: '3541.10'
141 | - - alias: default
142 | - os: ubuntu-trusty
143 | - version: '3541.9'
144 |
145 | Continue? [yN]: y
146 |
147 | Task 103
148 |
149 | Task 103 | 15:39:08 | Preparing deployment: Preparing deployment (00:00:00)
150 | Task 103 | 15:39:08 | Preparing package compilation: Finding packages to compile (00:00:00)
151 | Task 103 | 15:39:08 | Compiling packages: golang-1.8-linux/3eac55db0483de642b1be389966327e931db3e3f (00:01:41)
152 | Task 103 | 15:40:49 | Compiling packages: zookeeper/43ee655b89f8a05cc472ca997e8c8186457241c1 (00:00:10)
153 | Task 103 | 15:40:59 | Compiling packages: java/c524e46e61b37894935ae28016973e0e8644fcde (00:00:29)
154 | Task 103 | 15:41:28 | Compiling packages: smoke-tests/ec91e258c41471227a759c2749e7295cb65eff5a (00:00:13)
155 | Task 103 | 15:42:17 | Updating instance zookeeper: zookeeper/587013fa-a927-44b8-9ef4-1a2b73eca415 (0) (canary) (00:03:17)
156 | Task 103 | 15:45:34 | Updating instance zookeeper: zookeeper/d5f3430c-5393-44bf-8030-78732846bacd (1) (canary) (00:03:21)
157 |
158 | Task 103 Started Thu Mar 29 15:39:08 UTC 2018
159 | Task 103 Finished Thu Mar 29 15:48:55 UTC 2018
160 | Task 103 Duration 00:09:47
161 | Task 103 done
162 |
163 | Succeeded
164 | ```
165 |
166 | Once this is complete, we can see that the deployment is now running on stemcell version 3541.10.
167 |
168 |
169 | ```
170 | $ bosh deployments
171 | Name Release(s) Stemcell(s) Team(s) Cloud Config
172 | zookeeper zookeeper/0.0.7 bosh-aws-xen-hvm-ubuntu-trusty-go_agent/3541.10 - latest
173 |
174 | ```
175 |
176 |
177 |
178 | ## Finding Stemcells
179 |
180 | You can discover stemcells for your CPI at http://bosh.io/stemcells. At the time of writing, there are stemcells published for the following major operating system distributions:
181 |
182 | * Ubuntu Linux
183 | * CentOS Linux
184 | * Windows
185 |
186 | The Ubuntu stemcells are the most commonly used base images, are the most battle tested in production systems around the world, and seem to the author to have the most security updates pushed out. I would recommend you always use an Ubuntu stemcell unless you have a strong requirement to choose an alternate.
187 |
188 | The BOSH release you are deploying will have a specific requirement for either a Linux or Windows stemcell. If the BOSH release specifically requires CentOS Linux, then it will indicate this in its documentation and sample deployment manifests.
189 |
190 | ## Light Stemcells
191 |
192 | On public cloud infrastructures - AWS, GCP, Azure - the BOSH Core Team publish shared machine images that are referenced by the stemcell file.
193 |
194 | To quickly confirm what I mean, let's download a stemcell for AWS and look inside it.
195 |
196 | ```
197 | curl -o stemcell-aws.tgz https://s3.amazonaws.com/bosh-aws-light-stemcells/light-bosh-stemcell-3445.11-aws-xen-hvm-ubuntu-trusty-go_agent.tgz
198 | ```
199 |
200 | The `stemcell.MF` file within the archive is a YAML file referencing each Amazon Machine Image (AMI) for each region:
201 |
202 | ```
203 | tar -axf stemcell-aws.tgz stemcell.MF -O
204 | ```
205 |
206 | The output will look like:
207 |
208 | ```yaml
209 | ---
210 | name: bosh-aws-xen-hvm-ubuntu-trusty-go_agent
211 | version: '3445.11'
212 | bosh_protocol: '1'
213 | sha1: da39a3ee5e6b4b0d3255bfef95601890afd80709
214 | operating_system: ubuntu-trusty
215 | cloud_properties:
216 | ami:
217 | us-gov-west-1: ami-4a0d8e2b
218 | ap-northeast-1: ami-17d61871
219 | ap-northeast-2: ami-8f409be1
220 | ap-south-1: ami-7076301f
221 | ap-southeast-1: ami-5bd9ae38
222 | ap-southeast-2: ami-eff81e8d
223 | ca-central-1: ami-31a41d55
224 | eu-central-1: ami-de19afb1
225 | eu-west-1: ami-c0cf03b9
226 | eu-west-2: ami-eddbc889
227 | sa-east-1: ami-f89ce194
228 | us-east-1: ami-9a43afe0
229 | us-east-2: ami-ffab899a
230 | us-west-1: ami-5493a534
231 | us-west-2: ami-c03ec3b8
232 | cn-north-1: ami-296cbc44
233 | ```
234 |
235 | We can confirm each AMI is a pre-created public AMI. For the `us-east-1` AMI:
236 |
237 | [](https://console.aws.amazon.com/ec2/v2/home?region=us-east-1#Images:visibility=public-images;search=ami-9a43afe0;sort=name)
238 |
239 | For AWS alone, the BOSH Core Team are creating 16 different AMIs in 16 different AWS regions for each AWS light stemcell.
240 |
241 | As a BOSH user, you do not need to correctly select the right `ami-1234567` image. The BOSH director knows which region you are using and will use the appropriate public machine image from the list above.
242 |
243 | ## On-Premise Stemcells
244 |
245 | If you are using an on-premise cloud infrastructure such as vSphere or OpenStack then your stemcells cannot reference pre-built machine images. Instead, the BOSH director will have the task of creating machine images within your cloud infrastructure that it can use for provisioning cloud servers.
246 |
247 | These stemcells will be substantially larger than "light" stemcells as they contain the entire machine image. On-premise stemcells will be 300+ MB in size, whereas "light" stemcells are tiny 20KB files (discussed in preceding section).
248 |
249 | [](http://bosh.io/stemcells)
250 |
251 | Your cloud infrastructure BOSH CPI has the responsibility of converting a stemcell into a machine image.
252 |
253 | For example, the OpenStack CPI will [interact with OpenStack Glance](https://github.com/cloudfoundry-incubator/bosh-openstack-cpi-release/blob/master/docs/openstack-api-calls.md#all-calls-for-api-endpoint-image-glance) to convert a stemcell into an OpenStack Machine Image.
254 |
255 | ## Agent
256 |
257 | One of the primary reasons for BOSH stemcells, rather than allowing you to bring your own base machine images, is that they have the BOSH agent preinstalled.
258 |
--------------------------------------------------------------------------------
/docs/stylesheets/extra.css:
--------------------------------------------------------------------------------
1 | div.x3 {
2 | width: 43%;
3 | color: silver;
4 | }
5 |
6 | div.x3 img{
7 | width: 50%;
8 | margin-right: 50%;
9 | }
10 |
11 | div.x3 a:link {
12 | color: white;
13 | }
14 |
15 | div.x3 a:visited {
16 | color: white;
17 | }
18 |
19 | div.x3 a:active {
20 | color: white;
21 | }
22 |
--------------------------------------------------------------------------------
/docs/targeting-bosh-envs.md:
--------------------------------------------------------------------------------
1 | # Targeting BOSH Environments and Deployments
2 |
3 | Many `bosh` CLI commands require you to explicitly reference which BOSH deployment name, and the details about the BOSH environment (specifically the BOSH director API).
4 |
5 | You can provide the full array of information using `bosh` CLI flags:
6 |
7 | ```
8 | bosh instances \
9 | --deployment zookeeper \
10 | --environment https://192.168.50.6:25555 \
11 | --ca-cert path/to/root-ca \
12 | --client admin \
13 | --client-secret password
14 | ```
15 |
16 | You can avoid typing secrets into your shell by dynamically extracting them from files:
17 |
18 | ```
19 | bosh instances \
20 | --deployment zookeeper \
21 | --environment https://192.168.50.6:25555 \
22 | --ca-cert "$(bosh int path/to/bosh/creds.yml --path /director_ssl/ca)" \
23 | --client admin \
24 | --client-secret "$(bosh int path/to/bosh/creds.yml --path /admin_password)"
25 | ```
26 |
27 | Alternately, each of the flags above can be declared using an environment variable.
28 |
29 | ``` hl_lines="7"
30 | export BOSH_DEPLOYMENT=zookeeper
31 | export BOSH_ENVIRONMENT=https://192.168.50.6:25555
32 | export BOSH_CA_CERT="$(bosh int path/to/bosh/creds.yml --path /director_ssl/ca)"
33 | export BOSH_CLIENT=admin
34 | export BOSH_CLIENT_SECRET="$(bosh int path/to/bosh/creds.yml --path /admin_password)"
35 |
36 | bosh instances
37 | ```
38 |
39 | ## Alias a BOSH Environment
40 |
41 | It will be quickly tiresome to type all this information. No one does that. Continuous integration scripts might do this, but not you nor I. We like short cuts.
42 |
43 | The `bosh` CLI allows us to alias each BOSH environment URL and root CA with a meaningful name. For example, in the tutorial for setting up a local VirtualBox BOSH environment we used the alias `vbox`:
44 |
45 | ``` hl_lines="1"
46 | bosh alias-env vbox \
47 | --environment https://192.168.50.6:25555 \
48 | --ca-cert "$(bosh int path/to/vbox/creds.yml --path /director_ssl/ca)"
49 | ```
50 |
51 | ## Login to a BOSH Environment
52 |
53 | Once you have aliased an environment you can also cache the authentication credentials for that environment.
54 |
55 | That is the most elaborate way I could describe "log in":
56 |
57 | ```
58 | export BOSH_ENVIRONMENT=vbox
59 | bosh login
60 | ```
61 |
62 | Or if you want to use `--environment` flag:
63 |
64 | ```
65 | bosh --environment vbox login
66 | ```
67 |
68 | ## Simplified Targeting
69 |
70 | After aliasing and logging in to an environment, we can now simplify all our commands.
71 |
72 | We can now express our `bosh instances` command from above with flags:
73 |
74 | ```
75 | bosh instances \
76 | --deployment zookeeper \
77 | --environment vbox
78 | ```
79 |
80 | Or with shortened flags:
81 |
82 | ```
83 | bosh instances \
84 | -d zookeeper \
85 | -e vbox
86 | ```
87 |
88 | We can also place the flags before the subcommand:
89 |
90 | ```
91 | bosh -e vbox -d zookeeper instances
92 | ```
93 |
94 | Or, we can use environment variables:
95 |
96 | ```
97 | export BOSH_ENVIRONMENT=vbox
98 | export BOSH_DEPLOYMENT=zookeeper
99 |
100 | bosh instances
101 | ```
102 |
103 | {==
104 |
105 | I personally prefer using the `BOSH_ENVIRONMENT` and `BOSH_DEPLOYMENT` environment variables to target BOSH environments and their deployments.
106 |
107 | ==}
108 |
109 | Especially for you reading the Ultimate Guide to BOSH, I felt that adding `-e environment-alias -d deployment-name` to every command in every example was repetitive, verbose, and repetitive.
110 |
111 | ## Available Aliased Environments
112 |
113 | If you've forgotten what aliases you've given your environments you can list them all:
114 |
115 | ```
116 | bosh envs
117 | ```
118 |
119 | ## Forgetting the Environment Alias
120 |
121 | Sometimes you will forget to provide the environment alias. This is not a career-altering mistake.
122 |
123 | ``` hl_lines="2"
124 | > bosh deploy manifests/zookeeper.yml
125 | Expected non-empty Director URL
126 |
127 | Exit code 1
128 | ```
129 |
130 | Run the command again with the `-e` flag or set the `BOSH_ENVIRONMENT` environment.
131 |
132 | ## Forgetting the Deployment Name
133 |
134 | Sometimes you will forget to provide the deployment name. You probably also forgot the environment alias.
135 |
136 | ``` hl_lines="4"
137 | > bosh deploy zookeeper-release/manifests/zookeeper.yml
138 | Using environment '10.0.0.4' as client 'admin'
139 |
140 | Expected non-empty deployment name
141 |
142 | Exit code 1
143 | ```
144 |
145 | Run the command again with the `-d` flag or set the `BOSH_DEPLOYMENT` environment.
146 |
--------------------------------------------------------------------------------
/docs/todo.md:
--------------------------------------------------------------------------------
1 | # One-Off Tasks with Errands
2 |
3 | TODO
4 |
5 | # Availability Zones
6 |
7 | TODO
8 |
9 | # Director authentication and authorisation
10 |
11 | TODO
12 |
13 | # New Person Ideas
14 |
15 | FinKit - Gareth Evans, Anouska
16 |
17 | started with bosh.io - loads of information, but hard to know what to look for
18 |
19 | Give users access to GCP, not to a BOSH on GCP.
20 |
21 |
22 | Rona - `bbl` is the starting point; `bosh-deployment` is another (kubo-deployment is another).
23 | Gives new users a BOSH director with running deployments rather than blank cloud or blank director.
24 |
25 |
26 | As a chef person, what is BOSH?
27 |
28 |
29 | When new ppl "get it"
30 |
31 | * tried to kill machines but BOSH resurrector recreated them
32 | * stemcell upgrade; and be able to explain exactly how the patching process works
33 | * resizing VMs and the persistent data is still there
34 | * resizing persistent disks
35 |
36 |
37 | What is BOSH?!?
38 |
39 | BOSH is to Cloud Foundry, as Cloud Foundry is to your app.
40 |
41 | What is BOSH architecture?
42 |
--------------------------------------------------------------------------------
/docs/tutorials/bosh-lite-virtualbox.md:
--------------------------------------------------------------------------------
1 | # Complete Deployment Example
2 |
3 | In this section we will start with nothing except your local laptop or desktop computer, create a BOSH environment using VirtualBox, and deploy the `zookeeper` cluster we frequently refer to in the Ultimate Guide to BOSH.
4 |
5 | ## Dependencies
6 |
7 | You will need the `bosh` CLI.
8 |
9 | For our VirtualBox environment you will also need to download and install Oracle VirtualBox.
10 |
11 | Install the `git` CLI to allow us to clone the `bosh-deployment` and `zookeeper-release` project repositories.
12 |
13 | ## Create workspace
14 |
15 | ```bash
16 | mkdir -p ~/workspace
17 | cd ~/workspace
18 | ```
19 |
20 | ## Create BOSH environment
21 |
22 | We have not yet discussed how to create a BOSH environment for your cloud infrastructure. We will absolutely 100% discuss this topic for your cloud infrastructure. Later. For now, we will copy and paste some instructions that will setup a BOSH environment to use VirtualBox.
23 |
24 | Create a workspace folder and clone the `bosh-deployment` repository. It contains nearly everything you need to create a BOSH environment for your cloud infrastructure.
25 |
26 | ```
27 | mkdir -p bosh-env-vbox
28 | cd bosh-env-vbox
29 | git clone https://github.com/cloudfoundry/bosh-deployment
30 | ```
31 |
32 | The following fully-formed command will download the remaining dependencies (for example the VirtualBox CPI, the `bosh` BOSH release, etc), provision a new VirtualBox VM, and use it to run the BOSH director and other subsystems of a BOSH environment:
33 |
34 | ```
35 | bosh create-env bosh-deployment/bosh.yml \
36 | --state vbox/state.json \
37 | -o bosh-deployment/virtualbox/cpi.yml \
38 | -o bosh-deployment/virtualbox/outbound-network.yml \
39 | -o bosh-deployment/bosh-lite.yml \
40 | -o bosh-deployment/bosh-lite-runc.yml \
41 | -o bosh-deployment/jumpbox-user.yml \
42 | -o bosh-deployment/uaa.yml \
43 | -o bosh-deployment/credhub.yml \
44 | --vars-store vbox/creds.yml \
45 | -v director_name="Bosh-Lite-Director" \
46 | -v internal_ip=192.168.50.6 \
47 | -v internal_gw=192.168.50.1 \
48 | -v internal_cidr=192.168.50.0/24 \
49 | -v outbound_network_name=NatNetwork
50 | ```
51 |
52 | Visit the VirtualBox application to confirm a new VM has been created:
53 |
54 | 
55 |
56 | TODO write nice text
57 |
58 | ```
59 | bosh -e 192.168.50.6 alias-env vbox --ca-cert <(bosh int vbox/creds.yml --path /director_ssl/ca)
60 | ```
61 |
62 | ```
63 | bosh int vbox/creds.yml --path /admin_password
64 | ```
65 |
66 | ```
67 | export BOSH_ENVIRONMENT=vbox
68 | bosh login
69 | ```
70 |
71 | ```
72 | bosh update-cloud-config bosh-deployment/warden/cloud-config.yml
73 | ```
74 |
75 | ```
76 | wget --content-disposition https://bosh.io/d/stemcells/bosh-warden-boshlite-ubuntu-xenial-go_agent
77 | bosh upload-stemcell bosh-stemcell-*-warden-boshlite-ubuntu-xenial-go_agent.tgz
78 | ```
79 |
80 | ## SSH into BOSH Environment
81 |
82 | ```
83 | mkdir ssh
84 | chmod 700 ssh
85 | bosh int vbox/creds.yml --path /jumpbox_ssh/private_key > ssh/vbox.pem
86 | chmod 600 ssh/vbox.pem
87 | ssh-add ssh/vbox.pem
88 | ```
89 |
90 | Now you can SSH into your BOSH environment using `jumpbox` user:
91 |
92 | ```
93 | ssh jumpbox@192.168.50.6
94 | ```
95 |
96 | ## Deploy ZooKeeper
97 |
98 | Return to the `workspace` parent directory:
99 |
100 | ```
101 | cd ~/workspace
102 | ```
103 |
104 | Create a workspace folder for our `zookeeper` deployment and its dependencies:
105 |
106 | ```
107 | mkdir -p zookeeper-demo
108 | cd zookeeper-demo
109 | ```
110 |
111 | Using environment variables we now target our BOSH environment and the BOSH deployment by name:
112 |
113 | ```
114 | export BOSH_ENVIRONMENT=vbox
115 | export BOSH_DEPLOYMENT=zookeeper
116 | ```
117 |
118 | Alternately to using these two environment variables, in the subsequent `bosh` commands you could use `bosh -e vbox -d zookeeper`. You might see these two flag options used in other documentation or examples as you start using BOSH outside of this Ultimate Guide to BOSH. I prefer using the environment variables. Author's privilege.
119 |
120 | Clone the `zookeeper-release`, which contains the base deployment manifest:
121 |
122 | ```
123 | git clone https://github.com/cppforlife/zookeeper-release
124 | cat zookeeper-release/manifests/zookeeper.yml
125 | ```
126 |
127 | This base deployment manifest is lovely. It has no [Variables](/deployment-updates/#deployment-manifest-variables); it "Just Works".
128 |
129 | We can confirm that our `BOSH_DEPLOYMENT` name is correct:
130 |
131 | ```
132 | bosh int zookeeper-release/manifests/zookeeper.yml --path /name
133 | ```
134 |
135 | Alternately, we could use this command to set the `BOSH_DEPLOYMENT` variable:
136 |
137 | ```
138 | export BOSH_DEPLOYMENT=$(bosh int zookeeper-release/manifests/zookeeper.yml --path /name)
139 | ```
140 |
141 | We can now use our VirtualBox BOSH environment to deploy our `zookeeper` cluster:
142 |
143 | ```
144 | bosh deploy zookeeper-release/manifests/zookeeper.yml
145 | ```
146 |
147 | This `bosh` will first begin to download the `zookeeper` BOSH release because the corresponding BOSH release version has not been uploaded yet.
148 |
149 | Next, the CLI will display the proposed changes to the deployment. Since we are creating a new deployment, everything is new:
150 |
151 | ```
152 | + stemcells:
153 | + - alias: default
154 | + os: ubuntu-xenial
155 | + version: '621.78'
156 |
157 | + releases:
158 | + - name: zookeeper
159 | + url: git+https://github.com/cppforlife/zookeeper-release
160 | + version: 0.0.10
161 |
162 | + update:
163 | + canaries: 2
164 | + canary_watch_time: 5000-60000
165 | + max_in_flight: 1
166 | + update_watch_time: 5000-60000
167 |
168 | + instance_groups:
169 | + - azs:
170 | + - z1
171 | + - z2
172 | + - z3
173 | + instances: 5
174 | + jobs:
175 | + - name: zookeeper
176 | + properties: {}
177 | + release: zookeeper
178 | + - name: status
179 | + properties: {}
180 | + release: zookeeper
181 | + name: zookeeper
182 | + networks:
183 | + - name: default
184 | + persistent_disk: 10240
185 | + stemcell: default
186 | + vm_type: default
187 | + - azs:
188 | + - z1
189 | + instances: 1
190 | + jobs:
191 | + - name: smoke-tests
192 | + properties: {}
193 | + release: zookeeper
194 | + lifecycle: errand
195 | + name: smoke-tests
196 | + networks:
197 | + - name: default
198 | + stemcell: default
199 | + vm_type: default
200 |
201 | + name: zookeeper-demo
202 |
203 | Continue? [yN]:
204 | ```
205 |
206 | Press `y` to continue.
207 |
208 | The BOSH environment will now be provided with the deployment manifest, and will in turn perform all the activities necessary to create our new deployment.
209 |
210 | Initially it will need to compile the packages within the `zookeeper` BOSH release, since it has not compiled them already. It will not perform this task again on subsequent deployments.
211 |
212 | ```
213 | Task 4 | 08:40:43 | Preparing deployment: Preparing deployment (00:00:00)
214 | Task 4 | 08:40:43 | Preparing package compilation: Finding packages to compile (00:00:00)
215 | Task 4 | 08:40:43 | Compiling packages: golang-1.8-linux/3eac55db0483de642b1be389966327e931db3e3f
216 | Task 4 | 08:40:43 | Compiling packages: zookeeper/43ee655b89f8a05cc472ca997e8c8186457241c1
217 | Task 4 | 08:40:43 | Compiling packages: java/c524e46e61b37894935ae28016973e0e8644fcde
218 | Task 4 | 08:41:36 | Compiling packages: zookeeper/43ee655b89f8a05cc472ca997e8c8186457241c1 (00:00:53)
219 | Task 4 | 08:42:01 | Compiling packages: java/c524e46e61b37894935ae28016973e0e8644fcde (00:01:18)
220 | Task 4 | 08:42:07 | Compiling packages: golang-1.8-linux/3eac55db0483de642b1be389966327e931db3e3f (00:01:24)
221 | Task 4 | 08:42:08 | Compiling packages: smoke-tests/ec91e258c41471227a759c2749e7295cb65eff5a (00:00:08)
222 | ```
223 |
224 | Next, the BOSH environment will provision "missing vms" within its cloud environment.
225 |
226 | ```
227 | Task 4 | 08:42:17 | Creating missing vms: zookeeper/262baa62-d027-4dbc-b3be-b5bacd86cd20 (3)
228 | Task 4 | 08:42:17 | Creating missing vms: zookeeper/4c77e4e4-5452-4af0-afab-05f4f75bdaf3 (2)
229 | Task 4 | 08:42:17 | Creating missing vms: zookeeper/05027f6c-c13f-493f-9f32-d45c46df34c0 (4)
230 | Task 4 | 08:42:17 | Creating missing vms: zookeeper/c80f4fd5-3d31-4c1a-a82c-68a166fad0a0 (0)
231 | Task 4 | 08:42:17 | Creating missing vms: zookeeper/247623ea-8427-4f4a-939c-691faa7ef31f (1)
232 | Task 4 | 08:43:10 | Creating missing vms: zookeeper/262baa62-d027-4dbc-b3be-b5bacd86cd20 (3) (00:00:53)
233 | Task 4 | 08:43:10 | Creating missing vms: zookeeper/05027f6c-c13f-493f-9f32-d45c46df34c0 (4) (00:00:53)
234 | Task 4 | 08:43:11 | Creating missing vms: zookeeper/c80f4fd5-3d31-4c1a-a82c-68a166fad0a0 (0) (00:00:54)
235 | Task 4 | 08:43:11 | Creating missing vms: zookeeper/4c77e4e4-5452-4af0-afab-05f4f75bdaf3 (2) (00:00:54)
236 | Task 4 | 08:43:11 | Creating missing vms: zookeeper/247623ea-8427-4f4a-939c-691faa7ef31f (1) (00:00:54)
237 | ```
238 |
239 | Finally, the BOSH environment will update these five instances in small [Update Batches](/deployment-updates/#update-batches). Two initial canaries, and then one subsequent instance at a time:
240 |
241 | ```
242 | Task 4 | 08:43:11 | Updating instance zookeeper: zookeeper/c80f4fd5-3d31-4c1a-a82c-68a166fad0a0 (0) (canary) (00:00:40)
243 | Task 4 | 08:43:51 | Updating instance zookeeper: zookeeper/262baa62-d027-4dbc-b3be-b5bacd86cd20 (3) (canary) (00:00:39)
244 | Task 4 | 08:44:30 | Updating instance zookeeper: zookeeper/05027f6c-c13f-493f-9f32-d45c46df34c0 (4) (00:00:39)
245 | Task 4 | 08:45:09 | Updating instance zookeeper: zookeeper/247623ea-8427-4f4a-939c-691faa7ef31f (1) (00:00:38)
246 | Task 4 | 08:45:47 | Updating instance zookeeper: zookeeper/4c77e4e4-5452-4af0-afab-05f4f75bdaf3 (2) (00:00:36)
247 | ```
248 |
249 | Finally, the CLI will summarise the time taken to complete the `bosh deploy` task:
250 |
251 | ```
252 | Task 4 Started Tue Oct 10 08:40:43 UTC 2017
253 | Task 4 Finished Tue Oct 10 08:46:23 UTC 2017
254 | Task 4 Duration 00:05:40
255 | Task 4 done
256 | ```
257 |
258 | We can now see our five instances running:
259 |
260 | ```
261 | > bosh instances
262 | ```
263 |
264 | If we do not have `BOSH_ENVIRONMENT` and `BOSH_DEPLOYMENT` environment variables set, then we need to pass the `-e` and `-d` flags:
265 |
266 | ```
267 | bosh -e vbox -d zookeeper instances
268 | ```
269 |
270 | The output will be similar to:
271 |
272 | ```
273 | Using environment '192.168.50.6' as client 'admin'
274 |
275 | Task 5. Done
276 |
277 | Deployment 'zookeeper-demo'
278 |
279 | Instance Process State AZ IPs
280 | smoke-tests/d688f60e-d34c-4e95-9a2c-44d246ad08b7 - z1 -
281 | zookeeper/05027f6c-c13f-493f-9f32-d45c46df34c0 running z2 10.244.0.4
282 | zookeeper/247623ea-8427-4f4a-939c-691faa7ef31f running z2 10.244.0.5
283 | zookeeper/262baa62-d027-4dbc-b3be-b5bacd86cd20 running z1 10.244.0.3
284 | zookeeper/4c77e4e4-5452-4af0-afab-05f4f75bdaf3 running z3 10.244.0.6
285 | zookeeper/c80f4fd5-3d31-4c1a-a82c-68a166fad0a0 running z1 10.244.0.2
286 |
287 | 6 instances
288 |
289 | Succeeded
290 | ```
291 |
292 | ## Errands
293 |
294 | ```
295 | > bosh errands
296 | ```
297 |
298 | Our `zookeeper` deployment has two one-off tasks, called [Errands](https://bosh.io/docs/errands.html) that we can run upon our deployment:
299 |
300 | ```
301 | Name
302 | smoke-tests
303 | status
304 | ```
305 |
306 | The purpose and behaviour of these two errands will be documented by the `zookeeper-release` project. From their errand name, and the output of `bosh instances` above, we can infer:
307 |
308 | * `bosh run-errand smoke-tests` will use a dedicated new instance to perform some sort of tests, probably upon the running cluster
309 | * `bosh run-errand status` will check the local status of each `zookeeper` instance. The `status` errand does not have a dedicated instance, so we know it will be invoked within the running instances of the deployment.
310 |
311 | The two errand names do not infer that they are destructive or harmful. Let's run them. Put on your cowboy hat.
312 |
313 | ```
314 | > bosh run-errand smoke-tests
315 | ```
316 |
317 | The output confirms that a dedicated instance is provisioned to run `smoke-tests`:
318 |
319 | TODO: hilite=5,6
320 |
321 | ```
322 | Task 6 | 09:17:12 | Preparing deployment: Preparing deployment
323 | Task 6 | 09:17:12 | Warning: Ambiguous request: the requested errand name 'smoke-tests' matches both a job name and an errand instance group name. Executing errand on all relevant instances with job 'smoke-tests'.
324 | Task 6 | 09:17:12 | Preparing package compilation: Finding packages to compile (00:00:00)
325 | Task 6 | 09:17:12 | Preparing deployment: Preparing deployment (00:00:00)
326 | Task 6 | 09:17:12 | Creating missing vms: smoke-tests/d688f60e-d34c-4e95-9a2c-44d246ad08b7 (0) (00:00:09)
327 | Task 6 | 09:17:21 | Updating instance smoke-tests: smoke-tests/d688f60e-d34c-4e95-9a2c-44d246ad08b7 (0) (canary) (00:00:22)
328 | Task 6 | 09:17:43 | Running errand: smoke-tests/d688f60e-d34c-4e95-9a2c-44d246ad08b7 (0) (00:00:17)
329 | Task 6 | 09:18:00 | Fetching logs for smoke-tests/d688f60e-d34c-4e95-9a2c-44d246ad08b7 (0): Finding and packing log files (00:00:01)
330 | ```
331 |
332 | Finally, the errand outputs all stdout and stderr it collected:
333 |
334 | ```
335 | Instance smoke-tests/d688f60e-d34c-4e95-9a2c-44d246ad08b7
336 | Exit Code 0
337 | Stdout -----> simple test
338 | Successfully created value
339 | Successfully retrieved created value
340 | Successfully set new value
341 | Successfully deleted value
342 | ...
343 |
344 | Stderr 2017/10/10 09:17:43 Connected to 10.244.0.3:2181
345 | 2017/10/10 09:17:43 Authenticated: id=314976500763983872, timeout=4000
346 | 2017/10/10 09:17:43 Re-submitting `0` credentials after reconnect
347 | 2017/10/10 09:17:43 Recv loop terminated: err=EOF
348 | 2017/10/10 09:17:43 Send loop terminated: err=
349 |
350 | 1 errand(s)
351 |
352 | Succeeded
353 | ```
354 |
355 | Personally I find it confusing that `Stdout` and `Stderr` are displayed in two separate sections. I'd like the output interlaced so as to give an indication when the `Stderr` output occurred relative to `Stdout`. But the errand `Succeeded` so I'll stop complaining.
356 |
357 | One from one. Let's try the `status` errand:
358 |
359 | ```
360 | > bosh run-errand status
361 | ```
362 |
363 | The output confirms that no new dedicated instance is created, rather the errand is invoked upon the existing `zookeeper` instance group's instances:
364 |
365 | ```
366 | Task 7 | 09:21:59 | Preparing deployment: Preparing deployment (00:00:01)
367 | Task 7 | 09:22:00 | Running errand: zookeeper/262baa62-d027-4dbc-b3be-b5bacd86cd20 (3)
368 | Task 7 | 09:22:00 | Running errand: zookeeper/05027f6c-c13f-493f-9f32-d45c46df34c0 (4)
369 | Task 7 | 09:22:00 | Running errand: zookeeper/c80f4fd5-3d31-4c1a-a82c-68a166fad0a0 (0)
370 | Task 7 | 09:22:00 | Running errand: zookeeper/247623ea-8427-4f4a-939c-691faa7ef31f (1)
371 | Task 7 | 09:22:00 | Running errand: zookeeper/4c77e4e4-5452-4af0-afab-05f4f75bdaf3 (2)
372 | Task 7 | 09:22:01 | Running errand: zookeeper/c80f4fd5-3d31-4c1a-a82c-68a166fad0a0 (0) (00:00:01)
373 | Task 7 | 09:22:01 | Running errand: zookeeper/262baa62-d027-4dbc-b3be-b5bacd86cd20 (3) (00:00:01)
374 | Task 7 | 09:22:01 | Fetching logs for zookeeper/c80f4fd5-3d31-4c1a-a82c-68a166fad0a0 (0): Finding and packing log files
375 | Task 7 | 09:22:01 | Fetching logs for zookeeper/262baa62-d027-4dbc-b3be-b5bacd86cd20 (3): Finding and packing log files
376 | ...
377 | ```
378 |
379 | The `status` errand output is specific to an Apache ZooKeeper status helper command:
380 |
381 | ```
382 | Instance zookeeper/05027f6c-c13f-493f-9f32-d45c46df34c0
383 | Exit Code 0
384 | Stdout Mode: leader
385 |
386 | Stderr ZooKeeper JMX enabled by default
387 | Using config: /var/vcap/jobs/zookeeper/config/zoo.cfg
388 |
389 |
390 | Instance zookeeper/247623ea-8427-4f4a-939c-691faa7ef31f
391 | Exit Code 0
392 | Stdout Mode: follower
393 |
394 | Stderr ZooKeeper JMX enabled by default
395 | Using config: /var/vcap/jobs/zookeeper/config/zoo.cfg
396 | ...
397 | ```
398 |
399 |
400 | ## Delete deployment
401 |
402 | The `bosh delete-deployment` command will destroy all running instances, and will [orphan its persistent disks](/disks/#orphaned-disks).
403 |
404 | ```
405 | bosh delete-deployment
406 | ```
407 |
408 | The orphaned disks are retained in case you notice the cowboy hat on your head and realise you've accidentally deleted your organisation's production cluster of ZooKeeper accidentally.
409 |
410 | ```
411 | bosh disks --orphaned
412 | ```
413 |
414 | ## Clean up
415 |
416 | But today we are intent on cleaning up and shutting down our BOSH environment.
417 |
418 | The `bosh clean-up` command will perform a decent job of removing any very old releases, and stemcells.
419 |
420 | Today we will use `bosh clean-up --all` to destroy every BOSH release, stemcell, and orphaned disk that is not being used by a deployment. Since we have no deployments running anymore, this clean up command will purge everything.
421 |
422 | ```
423 | > bosh clean-up --all
424 | ```
425 |
426 | The output for this section will show release packages, release jobs, stemcells, and orphaned disks being deleted:
427 |
428 | ```
429 | Task 9 | 09:33:53 | Deleting releases: zookeeper/0.0.7
430 | Task 9 | 09:33:53 | Deleting packages: golang-1.8-linux/3eac55db0483de642b1be389966327e931db3e3f (00:00:00)
431 | Task 9 | 09:33:53 | Deleting packages: java/c524e46e61b37894935ae28016973e0e8644fcde (00:00:01)
432 | Task 9 | 09:33:54 | Deleting packages: smoke-tests/ec91e258c41471227a759c2749e7295cb65eff5a (00:00:00)
433 | Task 9 | 09:33:54 | Deleting packages: zookeeper/43ee655b89f8a05cc472ca997e8c8186457241c1 (00:00:00)
434 | Task 9 | 09:33:54 | Deleting jobs: smoke-tests/840b14bc609483bb03cf87a938bc69e76a6e2d88 (00:00:00)
435 | Task 9 | 09:33:54 | Deleting jobs: status/1a6e60e211521487e4d03c8f7dc652b27a9ee368 (00:00:00)
436 | Task 9 | 09:33:54 | Deleting jobs: zookeeper/17fe24515b0740d72b3ecdfe002bfaa6ae1771ca (00:00:00)
437 | Task 9 | 09:33:54 | Deleting releases: zookeeper/0.0.7 (00:00:01)
438 | Task 9 | 09:33:54 | Deleting stemcells: bosh-warden-boshlite-ubuntu-trusty-go_agent/3468 (00:00:03)
439 | Task 9 | 09:33:57 | Deleting orphaned disks: 3f41e9a9-fa8b-43b8-5d52-2e59fa2dc7f4
440 | Task 9 | 09:33:57 | Deleting orphaned disks: a7b695fe-032e-476b-4d88-ebbf9982ad38
441 | Task 9 | 09:33:57 | Deleting orphaned disks: 1f73c1ea-0798-4c03-6569-ec12772f1922
442 | Task 9 | 09:33:57 | Deleting orphaned disks: 01a49f13-0c88-4169-623c-31ddfa561b8c
443 | Task 9 | 09:33:57 | Deleting orphaned disks: a9d0382a-ef40-4da9-5883-268934afd827
444 | Task 9 | 09:33:57 | Deleting orphaned disks: a7b695fe-032e-476b-4d88-ebbf9982ad38 (00:00:00)
445 | Task 9 | 09:33:57 | Deleting orphaned disks: 1f73c1ea-0798-4c03-6569-ec12772f1922 (00:00:00)
446 | Task 9 | 09:33:57 | Deleting orphaned disks: 3f41e9a9-fa8b-43b8-5d52-2e59fa2dc7f4 (00:00:00)
447 | Task 9 | 09:33:57 | Deleting orphaned disks: a9d0382a-ef40-4da9-5883-268934afd827 (00:00:00)
448 | Task 9 | 09:33:57 | Deleting orphaned disks: 01a49f13-0c88-4169-623c-31ddfa561b8c (00:00:00)
449 | Task 9 | 09:33:57 | Deleting dns blobs: DNS blobs (00:00:00)
450 | ```
451 |
452 | ## Delete BOSH environment
453 |
454 | Now that we have cleaned up our BOSH environment we can delete our BOSH environment. For this VirtualBox tutorial this will mean deleting the VirtualBox VM.
455 |
456 | First, return to our `bosh-env-vbox` workspace directory:
457 |
458 | ```
459 | cd ~/workspace/bosh-env-vbox
460 | ```
461 |
462 | We need to run the same `bosh create-env` command as above, but using the `delete-env` subcommand instead. That is, the `bosh delete-env` command needs the same arguments used for the original `bosh create-env`.
463 |
464 | ```
465 | bosh delete-env bosh-deployment/bosh.yml \
466 | --state vbox/state.json \
467 | -o bosh-deployment/virtualbox/cpi.yml \
468 | -o bosh-deployment/virtualbox/outbound-network.yml \
469 | -o bosh-deployment/bosh-lite.yml \
470 | -o bosh-deployment/bosh-lite-runc.yml \
471 | -o bosh-deployment/jumpbox-user.yml \
472 | -o bosh-deployment/uaa.yml \
473 | -o bosh-deployment/credhub.yml \
474 | --vars-store vbox/creds.yml \
475 | -v director_name="Bosh-Lite-Director" \
476 | -v internal_ip=192.168.50.6 \
477 | -v internal_gw=192.168.50.1 \
478 | -v internal_cidr=192.168.50.0/24 \
479 | -v outbound_network_name=NatNetwork
480 | ```
481 |
482 | Visit the VirtualBox application to confirm the VM has been deleted:
483 |
484 | 
485 |
486 |
--------------------------------------------------------------------------------
/docs/why-bosh.md:
--------------------------------------------------------------------------------
1 | # Why BOSH?
2 |
3 | First, let's answer the question:
4 |
5 | ## What is a Running Software System?
6 |
7 | 
8 |
9 | Your bespoke or user-facing application is either a compiled application (Golang) or source code that runs within an interpreter (Ruby or Python) or is compiled and requires an interpreter (JVM languages).
10 |
11 | Your bespoke application will be composed of bespoke code plus third party software libraries (RubyGems for Ruby, NPM for Node, Wheels for Python, etc).
12 |
13 | Your application will need to be configured (combination of local configuration files, environment variables, service discovery system) to run and connect to any dependent systems.
14 |
15 | Your application will require local dependencies to be already installed - its interpreter, linked libraries, executable applications, etc.
16 |
17 | Your application and its dependencies all require an operating system. And formatted disks. And networking to be configured.
18 |
19 | All this runs in a virtual server/virtual machines (VMs) either in someone else's data centre called "the cloud" (AWS, GCP, Microsoft Azure) or someone else's data center called, "on premise" (but it's really normally not in your building, is it?) running virtualisation software (vSphere, OpenStack).
20 |
21 | Your applications and databases running on virtual machines will require disks: local or ephemeral disks that might not survive VM downtime or replacement; and persistent networked disks that are independent of each VM and will be available again if you need (or are forced) to replace your VMs.
22 |
23 | All of this runs upon physical machines connected to actual storage systems and interconnected by real-world networking.
24 |
25 | Servers need to be powered, so you'll need stable affordable electricity. Servers get hot, so you'll need cooling. Servers can be stolen or physically hacked, so you'll need security guards with appropriate lapel badges.
26 |
27 | It's incredible that it all works. Click on https://google.com to check that it all works.
28 |
29 | Note: The Ultimate Guide to BOSH will include unsolicited sarcasm and humour. With luck, you'll enjoy both the Ultimate Guide to BOSH and the humour.
30 |
31 | ## Choose Your Own Deployment Level
32 |
33 | You might define "deploying my system" at a different level to other people:
34 |
35 | * using an application platform, such as Cloud Foundry or Heroku
36 | * using a container orchestration system provided by someone else, such as Kubernetes, Docker, Amazon ECS
37 | * using virtual machines provided by someone else, such as AWS, GCP, vSphere
38 | * using bare metal machines provided by someone else
39 | * racking bare metal servers or putting Raspberry Pis into the field
40 |
41 | From the perspective of your organization and their goals of efficiently using your time and energy,
42 | hopefully you can start as high up this stack as possible. For example, there is simply nothing faster, more time efficient, and UI consistent as `cf push`-ing an application to any Cloud Foundry. Every system you deploy should have to first justify why it cannot be deployed to Cloud Foundry, Heroku, or Google App Engine.
43 |
44 | If you do need to "go down the stack" and take responsibility for more, then you will need more help. Either your organization will need to expect less from you and your team, or you'll need more tooling, automation, and education.
45 |
46 | ## Assumptions
47 |
48 | The Ultimate Guide to BOSH assumes you need the latter: you need tooling, automation, and education.
49 |
50 | It also assumes that you have direct access to your virtualisation/cloud infrastructure - you have suitable AWS credentials, a Google Compute account, or vSphere admin access.
51 |
52 | The Ultimate Guide to BOSH assumes you are prepared to learn a new tool, its features, and its quirks.
53 |
54 | ## Continuous Integration and Continuous Delivery
55 |
56 | BOSH slots in very nicely into any continuous deployment systems you might already be using. The `bosh` command-line tool is a perfect abstraction for, "Please make this happen," that will make it pleasurable to move BOSH deployments into your CI/CD systems.
57 |
--------------------------------------------------------------------------------
/manifest.yml:
--------------------------------------------------------------------------------
1 | applications:
2 | - name: ultimate-guide-to-bosh
3 | buildpack: staticfile_buildpack
4 | disk_quota: 1G
5 | instances: 1
6 | memory: 64M
7 | routes:
8 | - route: ultimate-guide-to-bosh.cfapps.io
9 | - route: ultimateguidetobosh.com
10 | - route: www.ultimateguidetobosh.com
11 | stack: cflinuxfs3
12 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: Ultimate Guide to BOSH
2 | nav:
3 | - Guide:
4 | - Welcome: index.md
5 | - Introduction: introduction.md
6 | - Why BOSH?: why-bosh.md
7 | - Deployments: deployments.md
8 | - Instances: instances.md
9 | - Deployment Manifests: deployment-manifests-part-1.md
10 | - Networking: networking.md
11 | - Disks: disks.md
12 | - Cloud Config Updates: cloud-config-updates.md
13 | - Deployment Updates: deployment-updates.md
14 | - Releases: releases.md
15 | - Properties: properties.md
16 | - Stemcells: stemcells.md
17 | - Complete Deployment Manifest: complete-deployment-manifest.md
18 | - Service Discovery: service-discovery.md
19 | - Targeting BOSH Environments and Deployments: targeting-bosh-envs.md
20 | - Complete Tutorials:
21 | - bosh-lite on VirtualBox: tutorials/bosh-lite-virtualbox.md
22 | - Spread the Word:
23 | - Meetups: spread-the-word/meetups.md
24 | - Ask For Help:
25 | - Suggestions: ask-for-help/suggestions.md
26 |
27 | theme:
28 | name: 'material'
29 | custom_dir: 'theme-overrides'
30 | palette:
31 | primary: 'orange'
32 | accent: 'amber'
33 | feature:
34 | tabs: true
35 |
36 | logo: 'images/favicon/favicon-64.png'
37 |
38 | font:
39 | text: 'Bitter' # https://fonts.google.com/specimen/Bitter
40 | code: 'Ubuntu Mono'
41 |
42 | markdown_extensions:
43 | - admonition
44 | - codehilite
45 | - pymdownx.betterem:
46 | smart_enable: all
47 | - pymdownx.critic
48 | - pymdownx.magiclink
49 | - pymdownx.mark
50 | - pymdownx.superfences
51 | - toc:
52 | permalink: true
53 |
54 | repo_name: 'help fix the book'
55 | repo_url: 'https://github.com/starkandwayne/ultimate-guide-to-bosh'
56 | edit_uri: edit/master/docs/
57 | extra_css: [print.css, stylesheets/extra.css]
58 | google_analytics: ['G-GGW6405JCV', 'ultimateguidetobosh.com']
59 |
--------------------------------------------------------------------------------
/paketo/pack-build:
--------------------------------------------------------------------------------
1 | # --builder paketobuildpacks/builder:base
2 | # --builder gcr.io/buildpacks/builder
3 | pack build ultimate-guide-bosh \
4 | --builder paketobuildpacks/builder:base \
5 | --buildpack gcr.io/paketo-community/python \
6 | --buildpack gcr.io/paketo-buildpacks/procfile \
7 | --env-file pip.env \
8 | --clear-cache \
9 | --default-process build
10 |
--------------------------------------------------------------------------------
/pip.env:
--------------------------------------------------------------------------------
1 | LC_ALL=C.UTF-8
2 | LANG=C.UTF-8
3 |
--------------------------------------------------------------------------------
/theme-overrides/main.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 |
3 | {% block extrahead %}
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 | {% if page and page.meta and page.meta.title %}
14 |
15 |
16 | {% elif page and page.title and not page.is_homepage %}
17 |
18 | {% else %}
19 | {% endif %}
20 | {% if page and page.meta and page.meta.description %}
21 | {% endif %}
22 | {% if page and page.meta and page.meta.image_path %}
23 | {% if page.meta.image_path %}
24 | {% endif %}
25 | {% if page.meta.image_alt %}
26 | {% endif %}
27 | {% if page.meta.image_type %}
28 | {% endif %}
29 | {% if page.meta.image_width %}
30 | {% endif %}
31 | {% if page.meta.image_height %}
32 | {% endif %}
33 | {% endif %}
34 | {% endblock %}
35 |
36 | {% block scripts %}
37 |
38 | {% endblock %}
39 |
--------------------------------------------------------------------------------
/theme-overrides/partials/footer.html:
--------------------------------------------------------------------------------
1 |
19 |
20 | {% import "partials/language.html" as lang with context %}
21 |
22 |
23 |
--------------------------------------------------------------------------------