├── .gitignore ├── .travis.yml ├── LICENSE ├── MAINTAINERS.md ├── Makefile ├── README.md ├── deploy ├── gk-deploy ├── gk-deploy-minikube ├── heketi.json.template ├── kube-templates │ ├── deploy-heketi-deployment.yaml │ ├── gluster-s3-pvcs.yaml │ ├── gluster-s3-storageclass.yaml │ ├── gluster-s3-template.yaml │ ├── glusterfs-daemonset.yaml │ ├── heketi-deployment.yaml │ └── heketi-service-account.yaml ├── ocp-templates │ ├── deploy-heketi-template.yaml │ ├── gluster-s3-pvcs.yaml │ ├── gluster-s3-storageclass.yaml │ ├── gluster-s3-template.yaml │ ├── glusterfs-template.yaml │ ├── heketi-service-account.yaml │ └── heketi-template.yaml ├── topology-minikube.json └── topology.json.sample ├── docs ├── design │ ├── README.md │ ├── gluster-block-provisioning.md │ └── tls-security.md ├── examples │ ├── containerized_heketi_dedicated_gluster │ │ ├── README.md │ │ ├── heketi-deployment.yaml │ │ └── heketi.json │ ├── dynamic_provisioning_external_gluster │ │ └── README.md │ ├── gluster-s3-storage-template │ │ └── README.md │ ├── hello_world │ │ ├── README.md │ │ ├── gluster-pvc.yaml │ │ ├── gluster-storage-class.yaml │ │ └── nginx-pod.yaml │ ├── sample-gluster-endpoints.yaml │ └── sample-gluster-service.yaml ├── presentations │ ├── 2017-03-vault │ │ ├── Gluster-in-Kubernetes.pdf │ │ ├── demo-deploy.json │ │ ├── demo-dynamic-provisioning.json │ │ └── demo-test-heketi.json │ ├── 2017.02.05_-_gluster-kubernetes_fosdem.pdf │ ├── 2017.02.05_-_gluster-kubernetes_fosdem.png │ └── README.md ├── release-maintenance.md └── setup-guide.md ├── tests ├── Makefile ├── README.md ├── complex │ ├── Makefile │ ├── README.md │ ├── lib.sh │ ├── run-all.sh │ ├── run-basic.sh │ ├── run-object.sh │ ├── run.sh │ ├── test-dynamic-provisioning.sh │ ├── test-gk-deploy-object.sh │ ├── test-gk-deploy.sh │ ├── test-inside-dynamic-provisioning.sh │ ├── test-inside-gk-deploy.sh │ ├── test-inside-object-store-setup.sh │ ├── test-inside-object-store.sh │ ├── test-object-store.sh │ ├── test-setup.sh │ └── test-teardown.sh └── simple │ ├── Makefile │ ├── README.md │ ├── common │ ├── shell_tests.sh │ └── subunit.sh │ ├── gk-deploy │ ├── Makefile │ ├── run.sh │ ├── stubs │ │ ├── cli.sh │ │ ├── kubectl │ │ └── oc │ └── test_gk_deploy_basic.sh │ ├── run.sh │ ├── shell │ ├── Makefile │ ├── run.sh │ ├── test_realpath.sh │ └── test_syntax.sh │ └── yaml │ ├── Makefile │ ├── glusterfs-daemonset-wrong.yaml │ ├── run.sh │ └── test_syntax.sh └── vagrant ├── README.md ├── Vagrantfile ├── ansible-step ├── demo ├── README.md ├── demo-deploy.sh ├── demo-dynamic-provisioning.sh ├── demo-inside-deploy.sh ├── demo-inside-dynamic-provisioning.sh ├── demo-inside-prepare.sh ├── demo-inside-status.sh ├── demo-inside-test-heketi.sh ├── demo-inside-wrapper.sh ├── demo-prepare.sh ├── demo-status.sh ├── demo-test-heketi.sh └── util.sh ├── docker-cache.sh ├── docker-registry-run.sh ├── gcr-proxy-state.sh ├── gcr_proxy.yml ├── global_vars.yml ├── roles ├── common │ ├── files │ │ ├── 10-kubeadm-post-1.8.conf │ │ ├── 10-kubeadm.conf │ │ ├── dm_snapshot.conf │ │ ├── k8s.conf │ │ └── rc.local │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── docker.j2 ├── master │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ ├── gcr_proxy.yml │ │ ├── main.yml │ │ └── yum_cache.yml │ └── templates │ │ └── nginx.conf.j2 └── nodes │ └── tasks │ ├── gcr_proxy.yml │ └── main.yml ├── rollback.sh ├── site.yml └── up.sh /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.sw? 3 | *.retry 4 | */.vagrant/** 5 | topology.json 6 | heketi-storage.json 7 | ssh-config 8 | heketi.json 9 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: true 2 | 3 | addons: 4 | apt: 5 | sources: 6 | - sourceline: 'deb http://archive.ubuntu.com/ubuntu trusty-backports main restricted universe multiverse' 7 | packages: 8 | - shellcheck 9 | 10 | before_install: 11 | - sudo pip install yamllint 12 | 13 | script: 14 | - make test 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /MAINTAINERS.md: -------------------------------------------------------------------------------- 1 | Project Lead: José A. Rivera (@jarrpa) 2 | 3 | Maintainers: 4 | 5 | * Michael Adam (@obnoxxx) 6 | * José A. Rivera (@jarrpa) 7 | 8 | The Project Lead helps drive the development of the project and coordinates with 9 | other projects. Maintainers have administrative rights to the repository and can 10 | merge PRs. 11 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | $(MAKE) -C tests test 3 | 4 | complex-tests: 5 | $(MAKE) -C tests/complex test 6 | 7 | .PHONY: test complex-tests 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # gluster-kubernetes 2 | 3 | [![Build Status](https://travis-ci.org/gluster/gluster-kubernetes.svg?branch=master)](https://travis-ci.org/gluster/gluster-kubernetes) 4 | 5 | ## GlusterFS Native Storage Service for Kubernetes 6 | 7 | **gluster-kubernetes** is a project to provide Kubernetes administrators a 8 | mechanism to easily deploy GlusterFS as a native storage service onto an 9 | existing Kubernetes cluster. Here, GlusterFS is managed and orchestrated like 10 | any other app in Kubernetes. This is a convenient way to unlock the power of 11 | dynamically provisioned, persistent GlusterFS volumes in Kubernetes. 12 | 13 | ### Component Projects 14 | 15 | * **[Kubernetes](http://kubernetes.io/)**, the container management system. 16 | * **[GlusterFS](https://www.gluster.org/)**, the scale-out storage system. 17 | * **[heketi](https://github.com/heketi/heketi)**, the RESTful volume management 18 | interface for GlusterFS. 19 | 20 | ### Presentations 21 | 22 | You can find slides and videos of community presentations [here](docs/presentations). 23 | 24 | **>>> [Video demo of the technology!](https://drive.google.com/file/d/0B667S2caJiy7QVpzVVFNQVdyaVE/view?usp=sharing) <<<** 25 | 26 | ### Documentation 27 | 28 | * [Quickstart](#quickstart) 29 | * [Setup Guide](./docs/setup-guide.md) 30 | * [Hello World with GlusterFS Dynamic Provisioning](./docs/examples/hello_world/README.md) 31 | * [Contact](#contact) 32 | * [Release and Maintenance Policies](./docs/release-maintenance.md) 33 | 34 | ### Quickstart 35 | 36 | If you already have a Kubernetes cluster you wish to use, make sure it meets 37 | the prerequisites outlined in our [setup guide](./docs/setup-guide.md). 38 | 39 | This project includes a vagrant setup in the `vagrant/` directory to spin up a 40 | Kubernetes cluster in VMs. To run the vagrant setup, you'll need to have the 41 | following pre-requisites on your machine: 42 | 43 | * 4GB of memory 44 | * 32GB of storage minimum, 112GB recommended 45 | * ansible 46 | * vagrant 47 | * libvirt or VirtualBox 48 | 49 | To spin up the cluster, simply run `./up.sh` in the `vagrant/` directory. 50 | 51 | **NOTE**: If you plan to run ./up.sh more than once the vagrant setup supports 52 | caching packages and container images. Please read the 53 | [vagrant directory README](./vagrant/README.md) 54 | for more information on how to configure and use the caching support. 55 | 56 | Next, copy the `deploy/` directory to the master node of the cluster. 57 | 58 | You will have to provide your own topology file. A sample topology file is 59 | included in the `deploy/` directory (default location that gk-deploy expects) 60 | which can be used as the topology for the vagrant libvirt setup. When 61 | creating your own topology file: 62 | 63 | * Make sure the topology file only lists block devices intended for heketi's 64 | use. heketi needs access to whole block devices (e.g. /dev/sdb, /dev/vdb) 65 | which it will partition and format. 66 | 67 | * The `hostnames` array is a bit misleading. `manage` should be a list of 68 | hostnames for the node, but `storage` should be a list of IP addresses on 69 | the node for backend storage communications. 70 | 71 | If you used the provided vagrant libvirt setup, you can run: 72 | 73 | ```bash 74 | $ vagrant ssh-config > ssh-config 75 | $ scp -rF ssh-config ../deploy master: 76 | $ vagrant ssh master 77 | [vagrant@master]$ cd deploy 78 | [vagrant@master]$ mv topology.json.sample topology.json 79 | ``` 80 | 81 | The following commands are meant to be run with administrative privileges 82 | (e.g. `sudo su` beforehand). 83 | 84 | At this point, verify the Kubernetes installation by making sure all nodes are 85 | Ready: 86 | 87 | ```bash 88 | $ kubectl get nodes 89 | NAME STATUS AGE 90 | master Ready 22h 91 | node0 Ready 22h 92 | node1 Ready 22h 93 | node2 Ready 22h 94 | ``` 95 | 96 | **NOTE**: To see the version of Kubernetes (which will change based on 97 | latest official releases) simply do `kubectl version`. This will help in 98 | troubleshooting. 99 | 100 | Next, to deploy heketi and GlusterFS, run the following: 101 | 102 | ```bash 103 | $ ./gk-deploy -g 104 | ``` 105 | 106 | If you already have a pre-existing GlusterFS cluster, you do not need the 107 | `-g` option. 108 | 109 | After this completes, GlusterFS and heketi should now be installed and ready 110 | to go. You can set the `HEKETI_CLI_SERVER` environment variable as follows so 111 | that it can be read directly by `heketi-cli` or sent to something like `curl`: 112 | 113 | ```bash 114 | $ export HEKETI_CLI_SERVER=$(kubectl get svc/heketi --template 'http://{{.spec.clusterIP}}:{{(index .spec.ports 0).port}}') 115 | 116 | $ echo $HEKETI_CLI_SERVER 117 | http://10.42.0.0:8080 118 | 119 | $ curl $HEKETI_CLI_SERVER/hello 120 | Hello from Heketi 121 | ``` 122 | 123 | Your Kubernetes cluster should look something like this: 124 | 125 | ```bash 126 | $ kubectl get nodes,pods 127 | NAME STATUS AGE 128 | master Ready 22h 129 | node0 Ready 22h 130 | node1 Ready 22h 131 | node2 Ready 22h 132 | NAME READY STATUS RESTARTS AGE 133 | glusterfs-node0-2509304327-vpce1 1/1 Running 0 1d 134 | glusterfs-node1-3290690057-hhq92 1/1 Running 0 1d 135 | glusterfs-node2-4072075787-okzjv 1/1 Running 0 1d 136 | heketi-3017632314-yyngh 1/1 Running 0 1d 137 | ``` 138 | 139 | You should now also be able to use `heketi-cli` or any other client of the 140 | heketi REST API (like the GlusterFS volume plugin) to create/manage volumes and 141 | then mount those volumes to verify they're working. To see an example of how 142 | to use this with a Kubernetes application, see the following: 143 | 144 | [Hello World application using GlusterFS Dynamic Provisioning](./docs/examples/hello_world/README.md) 145 | 146 | ### Contact 147 | 148 | The gluster-kubernetes developers hang out in #sig-storage on the Kubernetes Slack and 149 | on IRC channels in #gluster and #heketi at freenode network. 150 | 151 | And, of course, you are always welcomed to reach us via Issues and Pull Requests on GitHub. 152 | -------------------------------------------------------------------------------- /deploy/gk-deploy-minikube: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) 2018 Red Hat, Inc. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 13 | # implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | SCRIPT_DIR="$(cd "$(dirname "${0}")" && pwd)" 18 | TOPOLOGY='topology-minikube.json' 19 | CLI='kubectl' 20 | 21 | set -ex 22 | 23 | minikube ssh "\ 24 | sudo truncate -s 10G /mnt/vda1/heketi-bricks.img \ 25 | && sudo losetup /dev/loop1 /mnt/vda1/heketi-bricks.img \ 26 | && sudo pvcreate /dev/loop1 \ 27 | " 28 | 29 | ${SCRIPT_DIR}/gk-deploy --yes --single-node --cli ${CLI} --deploy-gluster ${SCRIPT_DIR}/${TOPOLOGY} 30 | 31 | HEKETI_URL=$(${CLI} get --no-headers endpoints/heketi | awk '{print $2}') 32 | 33 | if minikube addons list | grep -q "default-storageclass: enabled" ; then 34 | minikube addons disable default-storageclass 35 | ${CLI} patch storageclass standard -p '{"metadata":{"annotations":{"storageclass.beta.kubernetes.io/is-default-class":"false"}}}' 36 | fi 37 | 38 | ${CLI} create -f - <. 125 | 126 | The provisioner is mainly a simple translation engine that turns PVC requests 127 | into requests for heketi via heketi's RESTful API, and wraps the resulting 128 | volume information into a PV. 129 | 130 | The provisioner is configured via a StorageClass which can look like this: 131 | 132 | ``` 133 | kind: StorageClass 134 | apiVersion: storage.k8s.io/v1 135 | metadata: 136 | name: glusterblock 137 | provisioner: gluster.org/glusterblock 138 | parameters: 139 | resturl: "http://127.0.0.1:8081" 140 | restuser: "admin" 141 | secretnamespace: "default" 142 | secretname: "heketi-secret" 143 | hacount: "3" 144 | clusterid: "630372ccdc720a92c681fb928f27b53f" 145 | ``` 146 | 147 | The available parameters are: 148 | 149 | * ```resturl```: how to reach heketi 150 | * ```restuser```, ```secretnamespace```, ```secretname```: 151 | authentication information 152 | * ```hacount``` (optional): How many paths to the target server to configure. 153 | * ```clusterid``` (optional): List of one or more clusters to consider for 154 | finding space for the requested volume. 155 | 156 | See for 157 | additional and up-to-date details. 158 | 159 | ## Details About Heketi's New ```blockvolume``` Functionality 160 | 161 | For the purposes of gluster block volumes, the same heketi instance is used 162 | as for the regular glusterfs file volumes. Heketi has a new API though for 163 | treating blockvolume requests. Just as with the glusterfs file volume 164 | provisioning, the logic for finding suitable clusters and file volumes 165 | for hosting the loopback files is part of heketi. 166 | 167 | The API is a variation of the ```volume``` API and looks like this. 168 | 169 | The supported functions are: 170 | 171 | * ```BlockVolumeCreate``` 172 | * ```BlockVolumeInfo``` 173 | * ```BlockVolumeDelete``` 174 | * ```BlockVolumeList``` 175 | 176 | In the future, ```BlockVolumeExpand``` might get added. 177 | 178 | ### Note About The State Of The Implementation 179 | 180 | As of 2017-06-23, a PR with the implementation is available 181 | at . 182 | 183 | 184 | ### Details about the requests 185 | 186 | #### BlockVolumeCreateRequest 187 | 188 | The block volume create request takes the size and a name 189 | and can optionally take a list of clusters and an hacount. 190 | 191 | ``` 192 | type BlockVolumeCreateRequest struct { 193 | Size int `json:"size"` 194 | Clusters []string `json:"clusters,omitempty"` 195 | Name string `json:"name"` 196 | Hacount int `json:"hacount,omitempty"` 197 | Auth bool `json:"auth,omitempty" 198 | } 199 | ``` 200 | 201 | #### BlockVolume 202 | 203 | This is the basic info about a block volume. 204 | 205 | ``` 206 | BlockVolume struct { 207 | Hosts []string `json:"hosts"` 208 | Iqn string `json:"iqn"` 209 | Lun int `json:"lun"` 210 | Username string `json:"username"` 211 | Password string `json:"password"` 212 | } 213 | 214 | ``` 215 | 216 | #### BlockVolumeInfo 217 | 218 | This is returned for the blockvolume info request and 219 | upon successful creation. 220 | 221 | ``` 222 | type BlockVolumeInfo struct { 223 | Size int `json:"size"` 224 | Clusters []string `json:"clusters,omitempty"` 225 | Name string `json:"name"` 226 | Hacount int `json:"hacount,omitempty"` 227 | Id string `json:"id"` 228 | Size int `json:"size"` 229 | BlockVolume struct { 230 | Hosts []string `json:"hosts"` 231 | Hacount int `json:"hacount"` 232 | Iqn string `json:"iqn"` 233 | Lun int `json:"lun"` 234 | } `json:"blockvolume"` 235 | } 236 | 237 | ``` 238 | 239 | 240 | #### BlockVolumeListResponse 241 | 242 | The block volume list request just gets a list 243 | of block volume IDs as response. 244 | 245 | 246 | ``` 247 | type BlockVolumeListResponse struct { 248 | BlockVolumes []string `json:"blockvolumes"` 249 | } 250 | 251 | ``` 252 | 253 | ### Details About Heketi's Internal Logic 254 | 255 | #### Block-hosting volumes 256 | 257 | The loopback files for block volumes need to be stored in 258 | gluster file volumes. Volumes used for gluster-block volumes 259 | should not be used for other purposes. For want of a better 260 | term, we call these volumes that can host block-volume 261 | loopback files **block-hosting file-volumes** or (for brevity) 262 | **block-hosting volumes** in this document. 263 | 264 | #### Labeling block-hosting volumes 265 | 266 | In order to satisfy a blockvolume create request, Heketi 267 | needs to find and appropriate block-hosting volume in 268 | the available clusters. Hence heketi should internally 269 | flag these volumes with a label (`block`). 270 | 271 | #### Type of block-hosting volumes 272 | 273 | The block-hosting volumes should be regular 274 | 3-way replica volumes (possibly distributed). 275 | One important aspect is that for performance 276 | reasons, sharding should be enabled on these volumes. 277 | 278 | #### Block-hosting volume creation automatism 279 | 280 | When heketi, upon receiving a blockvolume create request, 281 | does not find a block-hosting volume with sufficient 282 | space in any of the considered clusters, it would look for 283 | sufficient unused space in the considered clusters and create 284 | a new gluster file volume, or expand an existing volume 285 | labeled `block`. 286 | 287 | The sizes to be used for auto-creation of block-hosting 288 | volumes will be subject to certain parameters that can 289 | be configured and will have reasonable defaults: 290 | 291 | * `auto_create_block_hosting_volume`: Enable auto-creation of 292 | block-hosting volumes? 293 | Defaults to **false**. 294 | * `block_hosting_volume_size`: The size for a new block-hosting 295 | volume to be created on a cluster will be the minimum of the value 296 | of this setting and maximum size of a volume that could be created. 297 | This size will also be used when expanding volumes: The amount 298 | added to the existing volume will be the minimum of this value 299 | and the maximum size that could be added. 300 | Defaults to **1TB**. 301 | 302 | #### Internal heketi db format for block volumes 303 | 304 | Heketi stores information about the block volumes 305 | in it's internal DB. The information stored is 306 | 307 | * id: id of this block volume 308 | * name: name given to the volume 309 | * volume: the id of the block-hosting volume where the loopback file resides 310 | * hosts: the target ips for this volume 311 | 312 | #### Cluster selection 313 | 314 | By default, heketi would consider all available clusters 315 | when looking for space to create a new block-volume file. 316 | 317 | With the clusters request parameter, this search can be 318 | narrowed down to an explicit list of one or more clusters. 319 | With the help of the ```clusterid``` storage class option, 320 | this gives the kubernetes administrator a way to e.g. separate 321 | different storage qualities, or to reserve a cluster 322 | exclusively for block-volumes. 323 | 324 | 325 | ### Details On Calling ```gluster-block``` 326 | 327 | Heketi calls out to ```gluster-block``` the same way it 328 | currently calls out to the standard gluster cli for the 329 | normal volume create operations, i.e. it uses a kubexec 330 | mechanism to run the command on one of the gluster nodes. 331 | (In a non-kubernetes install, it uses ssh.) 332 | 333 | 334 | ## Details About ```gluster-block``` 335 | 336 | ```gluster-block``` is the gluster-level tool to make creation 337 | and consumption of block volumes very easy. It consists of 338 | a server component ```gluster-blockd``` that runs on the gluster 339 | storage nodes and a command line client utility ```gluster-block``` 340 | that talks to the ```gluster-blockd``` with local RPC mechanism 341 | and can be invoked on any of the gluster storage nodes. 342 | 343 | gluster-block takes care of creating loopback files on the 344 | specified gluster volume. These volumes are then exported 345 | as iSCSI targets with the help of the tcmu-runner mechanism, 346 | using the gluster backend with libgfapi. This has the big 347 | advantage that it is talking to the gluster volume directly 348 | in user space without the need of a glusterfs fuse mount, 349 | skipping the kernel/userspace context switches and the 350 | user-visible mount altogether. 351 | 352 | The supported operations are: 353 | 354 | * create 355 | * list 356 | * info 357 | * delete 358 | * modify 359 | 360 | Details about the gluster-block architecture can be found 361 | in the gluster-block git repository 362 | and the original design notes 363 | . 364 | 365 | 366 | ## Details About Using The In-Tree iSCSI Mount Plugin 367 | 368 | The kubernetes in-tree iSCSI mount plugin is used by the PVs created 369 | by the new glusterblock external provisioner. The tasks that are performed 370 | when a user brings up an application pod using a corresponding glusterblock 371 | PVC are: 372 | 373 | * create /dev/sdX on the host (iscsi login / initiator) 374 | * format the device if it is has not been formatted 375 | * mount the file system on the host 376 | * bind-mount the host mounted directory into the application pod 377 | 378 | Multi-pathing support has been added to the iSCSI plugin so that 379 | the gluster-block volumes are made highly availble with the ```hacount``` 380 | feature. 381 | 382 | The iSCSI plugin has been changed in such a way that it does not allow 383 | more than one concurrent R/W mount of an iscsi block device. 384 | -------------------------------------------------------------------------------- /docs/design/tls-security.md: -------------------------------------------------------------------------------- 1 | # Enabling security in Gluster/Kubernetes 2 | This design proposal describes a plan to enable Gluster’s TLS security for both 3 | the management as well as the data path when Gluster is used as persistent 4 | storage in Kubernetes. 5 | 6 | ## Motivation 7 | Currently, the default installation of Gluster for storage in Kubernetes (via 8 | gluster-kubernetes) does not enable Gluster’s TLS security for either the 9 | management nor for the data path operations. Without TLS security enabled, 10 | Gluster will serve volumes and requests to any client that has permission 11 | according to Gluster’s `auth.allow` and `auth.deny` volume options. This allows 12 | clients to be restricted based on IP address, and UID/GID or ACLs are the 13 | mechanisms for controlling authorization of users on those clients. 14 | 15 | In a containerized environment, the network traffic sent by a container (pod) 16 | appears to originate from the hosting node, making it indistinguishable from 17 | traffic sent by the node’s infrastructure components. The result is that with 18 | just IP restriction, it is not possible to permit a node to access/mount a 19 | Gluster volume (to provide storage to a container), while preventing a 20 | container from directly accessing that same Gluster server in a malicious way. 21 | 22 | With TLS security enabled, clients must present a valid certificate to the 23 | Gluster server in order to mount and access volumes. The certificate data is 24 | stored at a location on the client nodes that does not (by default) get mapped 25 | into the container’s file system tree, preventing processes within a container 26 | from being able to directly access the Gluster server. Intended access (as 27 | persistent volume storage) is still permitted because the relevant process 28 | handling the mount and data access (e.g., fuse) is run directly on the host. In 29 | the case of containerized mounts, it would be possible to bind mount the 30 | certificate data into the container holding the mount utilities. 31 | 32 | ## Approach 33 | In order to enable TLS, a number of conditions must be met: 34 | * TLS keys and certificates must be generated and distributed to each client 35 | and server 36 | * The file: `/var/lib/glusterd/secure-access` must be present on each client 37 | and server machine. 38 | 39 | Additionally, to enable TLS for the data path, `client.ssl` and `server.ssl` 40 | options must be enabled for each volume. 41 | 42 | When using TLS keys in Gluster, the TLS keys for each machine can be 43 | self-signed keys, or a common certificate authority (CA) can be used. In the 44 | case of self-signed keys, each client’s certificate would need to be 45 | distributed to each server, and each server’s certificate would need to be 46 | distributed to both servers and clients. For large infrastructures, this is 47 | impractical. 48 | 49 | For keys signed by a common CA, only the CA certificate needs to be distributed 50 | to all machines, meaning no changes would be required if additional machines 51 | (clients or servers) are added to the infrastructure, other than generating a 52 | signed certificate for that new machine. 53 | 54 | ### Client (node) configuration 55 | For a Kubernetes node to be able to access a Gluster server (with TLS), it 56 | needs to have a key and certificate file generated 57 | (`/etc/ssl/glusterfs.[key|pem]`), and it needs to have a copy of the common 58 | CA’s certificate (`/etc/ssl/glusterfs.ca`). 59 | 60 | The proposal is to have the common CA (both key and pem) stored in a kubernetes 61 | secret in a namespace that is specific to Gluster (e.g., `glusterfs`). This 62 | would restrict access in a way similar to how the heketi key is managed today. 63 | This common CA key could also be signed by the cluster's top-level CA. This may 64 | provide future advantages for cross cluster operations such as georeplication. 65 | 66 | To distribute the TLS keys to each node, a DaemonSet would be created to run on 67 | each node. This DS would have access to the CA secret as well as the path on 68 | the host corresponding to `/etc/ssl`. Upon startup, it would use the CA key to 69 | generate and sign a key pair for the node and place them, along with a copy of 70 | the CA certificate, in `/etc/ssl/glusterfs.*`. It would then create the 71 | `/var/lib/glusterd/secure-access` file, which would also be exposed to the DS 72 | via a host path mapping. Note, that the secure-access file is specifically 73 | created as the final step to ensure proper sequencing with a containerized 74 | Gluster server (see below). 75 | 76 | In the event that the DS starts and sees an existing set of key files, it 77 | should check whether the `glusterfs.ca` matches the certificate in the secret 78 | and whether the `glusterfs.key` and `glusterfs.pem` files are properly signed 79 | with that CA. If any of those checks fail, the CA certificate should be 80 | re-copied and the node’s key/pem regenerated. These steps would permit keys to 81 | be updated and/or fixed by optionally updating the secret and respawning the 82 | DS. 83 | 84 | ### Containerized Gluster configuration 85 | The DaemonSet described above would also run on nodes that host the Gluster 86 | server containers. The gluster containers already mount the `/etc/ssl` 87 | directory from the host, meaning that once the DS creates them, they would be 88 | visible to the containerized server. Likewise, `/var/lib/glusterd` is also 89 | mapped from the host, giving access to the secure-access marker file. 90 | 91 | Care must be taken to ensure sequencing between the security DS and the Gluster 92 | server startup. The Gluster server must only be started once the keys and the 93 | secure-access file are in place. To accomplish this, we propose the addition of 94 | an optional environment variable, `ENABLE_TLS`, to the Gluster server pod 95 | template. If this variable is set to `1`, glusterd should not be started until 96 | the secure-access file is present. If the variable is `0` or unset, glusterd 97 | should be started immediately as is the behavior today. 98 | 99 | ### External Gluster configuration 100 | For Gluster servers that run outside of the containerized environment, the 101 | administrator is responsible for generating and installing the key, 102 | pem, ca, and secure-access files. This is no different than in a traditional 103 | deployment of Gluster. 104 | 105 | ### Data path security 106 | The DaemonSet is sufficient to enable management security for Gluster, but data 107 | path security must be enabled on a per-volume basis by setting options on each 108 | volume. In the case of dynamic provisioning, it would be the responsibility of 109 | heketi to ensure that the `client.ssl` and `server.ssl` options are enabled 110 | when a volume is created. Version XXX of heketi added support for generic 111 | volume options passed via the StorageClass that is used for provisioning. As a 112 | result, no modifications are required to heketi, only a small change to the 113 | StorageClass that is used. 114 | 115 | ### Bootstrapping the CA secret 116 | The above approach assumes the presence of a secret holding the CA key and 117 | certificate. As a part of deploying Gluster, this secret must be created (i.e., 118 | the keys must be generated). This could be handled by a Job that is spawned at 119 | the time Gluster and heketi are deployed. By containerizing the secret 120 | creation, the key generation tools (openssl) are guaranteed to exist and be of 121 | a suitable version. Deployment of the DS and Gluster pods would necessarily 122 | wait for the completion of this secret creation Job. 123 | 124 | ## Alternatives 125 | This section lists other approaches that were considered but not chosen. 126 | 127 | ### Direct install of keys on nodes 128 | Instead of using a DS on each node, it is possible to install the key, pem, ca, 129 | and secure-access files directly on the nodes, either manually or via 130 | automation such as Ansible. While this would work, it requires access to the 131 | individual hosts separate from that provided by the Kubernetes infrastructure. 132 | Additionally, it requires the CA key and pem to be managed outside of the 133 | infrastructure as well. 134 | 135 | ### Self-signed keys 136 | Using self-signed keys requires the certificates from all nodes to be 137 | concatenated and stored in the glusterfs.ca file. This means that when adding a 138 | node to the infrastructure, the .ca file on each node would need to be updated, 139 | making it difficult to ensure consistent, atomic updates that are 140 | non-disruptive to the cluster’s storage. 141 | -------------------------------------------------------------------------------- /docs/examples/containerized_heketi_dedicated_gluster/heketi-deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: extensions/v1beta1 3 | metadata: 4 | name: heketi-deployment 5 | labels: 6 | app: heketi 7 | annotations: 8 | description: Defines how to deploy Heketi 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | name: heketi 14 | labels: 15 | app: heketi 16 | spec: 17 | hostNetwork: true 18 | containers: 19 | - name: heketi 20 | image: heketi/heketi:dev 21 | imagePullPolicy: IfNotPresent 22 | volumeMounts: 23 | - name: keys 24 | mountPath: /usr/share/keys 25 | - name: config 26 | mountPath: /etc/heketi 27 | - name: db 28 | mountPath: /var/lib/heketi 29 | volumes: 30 | - name: keys 31 | secret: 32 | secretName: ssh-key-secret 33 | - name: config 34 | configMap: 35 | name: heketi-config 36 | - name: db 37 | glusterfs: 38 | endpoints: glusterfs-cluster 39 | path: MyHeketi 40 | -------------------------------------------------------------------------------- /docs/examples/containerized_heketi_dedicated_gluster/heketi.json: -------------------------------------------------------------------------------- 1 | { 2 | "_port_comment": "Heketi Server Port Number", 3 | "port": "8081", 4 | 5 | "_use_auth": "Enable JWT authorization. Please enable for deployment", 6 | "use_auth": false, 7 | 8 | "_jwt": "Private keys for access", 9 | "jwt": { 10 | "_admin": "Admin has access to all APIs", 11 | "admin": { 12 | "key": "My Secret" 13 | }, 14 | "_user": "User only has access to /volumes endpoint", 15 | "user": { 16 | "key": "My Secret" 17 | } 18 | }, 19 | 20 | "_glusterfs_comment": "GlusterFS Configuration", 21 | "glusterfs": { 22 | "_executor_comment": [ 23 | "Execute plugin. Possible choices: mock, ssh", 24 | "mock: This setting is used for testing and development.", 25 | " It will not send commands to any node.", 26 | "ssh: This setting will notify Heketi to ssh to the nodes.", 27 | " It will need the values in sshexec to be configured.", 28 | "kubernetes: Communicate with GlusterFS containers over", 29 | " Kubernetes exec api." 30 | ], 31 | "executor": "ssh", 32 | 33 | "_sshexec_comment": "SSH username and private key file information", 34 | "sshexec": { 35 | "keyfile": "/usr/share/keys/heketi_key", 36 | "user": "root", 37 | "port": "22", 38 | "fstab": "/etc/fstab" 39 | }, 40 | 41 | "_kubeexec_comment": "Kubernetes configuration", 42 | "kubeexec": { 43 | "host" :"https://kubernetes.host:8443", 44 | "cert" : "/path/to/crt.file", 45 | "insecure": false, 46 | "user": "kubernetes username", 47 | "password": "password for kubernetes user", 48 | "namespace": "OpenShift project or Kubernetes namespace", 49 | "fstab": "Optional: Specify fstab file on node. Default is /etc/fstab" 50 | }, 51 | 52 | "_db_comment": "Database file name", 53 | "db": "/var/lib/heketi/heketi.db", 54 | 55 | "_loglevel_comment": [ 56 | "Set log level. Choices are:", 57 | " none, critical, error, warning, info, debug", 58 | "Default is warning" 59 | ], 60 | "loglevel" : "debug" 61 | } 62 | } 63 | 64 | -------------------------------------------------------------------------------- /docs/examples/gluster-s3-storage-template/README.md: -------------------------------------------------------------------------------- 1 | # Gluster S3 object storage as native app. on OpenShift 2 | 3 | ## Prerequisites 4 | 5 | * OpenShift setup is up with master and nodes ready. 6 | 7 | * cns-deploy tool has been run and heketi service is ready. 8 | 9 | ## Deployment 10 | 11 | ### 1. Provide the backend store 12 | 13 | The gluster-s3 service requires there be at least two GlusterFS volumes 14 | available for its use, one to store the object data and another for the 15 | object meta-data. In this example, we will create a new StorageClass to 16 | dynamically provision these two volumes on our pre-existing GlusterFS cluster. 17 | 18 | #### Create a StorageClass 19 | 20 | In our example, we have set up heketi to require a secret key for the admin user. A StorageClass created to use such a heketi instance needs a Secret that contains the admin key. This Secret is not needed if heketi is not configured to use an admin key. 21 | Replace `NAMESPACE` and `ADMIN_KEY` parameters with your configuration. 22 | * `NAMESPACE` is the project 23 | * `ADMIN_KEY` is used for authorization to access Heketi service. 24 | 25 | ``` 26 | oc create secret generic heketi-${NAMESPACE}-admin-secret --from-literal=key=${ADMIN_KEY} --type=kubernetes.io/glusterfs 27 | ``` 28 | 29 | As an optional step, the Secret can be labelled. This is useful to be able to select the secret as part of a general query like `oc get 30 | --selector=glusterfs` and allows the secret to be removed programatically by the `gk-deploy` tool. 31 | 32 | Replace `NAMESPACE` parameter with your configuration. 33 | 34 | ``` 35 | oc label --overwrite secret heketi-${NAMESPACE}-admin-secret glusterfs=s3-heketi-${NAMESPACE}-admin-secret gluster-s3=heketi-${NAMESPACE}-admin-secret 36 | ``` 37 | 38 | Create a GlusterFS StorageClass as below: 39 | * `HEKETI_URL` is the URL to access GlusterFS cluster. 40 | * `NAMESPACE` is the project. 41 | * `STORAGE_CLASS` is the new StorageClass name provided by admin. 42 | 43 | ``` 44 | sed -e 's/${HEKETI_URL}/heketi-store-project1.cloudapps.mystorage.com/g' -e 's/${STORAGE_CLASS}/gluster-s3-store/g' -e 's/${NAMESPACE}/store-project1/g' deploy/ocp-templates/gluster-s3-storageclass.yaml | oc create -f - 45 | ``` 46 | 47 | Available at 48 | [gluster-s3-storageclass.yaml](../../../deploy/ocp-templates/gluster-s3-storageclass.yaml) 49 | 50 | #### Create backend PVCs 51 | 52 | Now, create PVCs using the StorageClass. 53 | * Replace `STORAGE_CLASS` with the above created one 54 | * Adjust `VOLUME_CAPACITY` as per your needs in GBs. 55 | 56 | ``` 57 | sed -e 's/${VOLUME_CAPACITY}/2Gi/g' -e 's/${STORAGE_CLASS}/gluster-s3-store/g' deploy/ocp-templates/gluster-s3-pvcs.yaml | oc create -f - 58 | persistentvolumeclaim "gluster-s3-claim" created 59 | persistentvolumeclaim "gluster-s3-meta-claim" created 60 | ``` 61 | 62 | Available at 63 | [gluster-s3-pvcs.yaml](../../../deploy/ocp-templates/gluster-s3-pvcs.yaml) 64 | 65 | ### 2. Start gluster-s3 service 66 | 67 | Launch S3 storage service. Set `S3_ACCOUNT` name, `S3_USER` name, `S3_PASSWORD` according to the user wish. `S3_ACCOUNT` is the S3 account which will be created and associated with GlusterFS volume. `S3_USER` is the user created to access the above account and `S3_PASSWORD` is for Authorization of the S3 user. 68 | `PVC` and `META_PVC` are persistentvolumeclaim(s) obtained from above step. 69 | 70 | ### For example: 71 | 72 | ``` 73 | oc new-app deploy/ocp-templates/gluster-s3-template.yaml \ 74 | --param=S3_ACCOUNT=testvolume --param=S3_USER=adminuser \ 75 | --param=S3_PASSWORD=itsmine --param=PVC=gluster-s3-claim \ 76 | --param=META_PVC=gluster-s3-meta-claim 77 | --> Deploying template "store-project1/gluster-s3" for "deploy/ocp-templates/gluster-s3-template.yaml" to project store-project1 78 | 79 | gluster-s3 80 | --------- 81 | Gluster s3 service template 82 | 83 | 84 | * With parameters: 85 | * S3 Account Name=testvolume 86 | * S3 User=adminuser 87 | * S3 User Password=itsmine 88 | * Primary GlusterFS-backed PVC=gluster-s3-claim 89 | * Metadata GlusterFS-backed PVC=gluster-s3-meta-claim 90 | 91 | --> Creating resources ... 92 | service "gluster-s3-service" created 93 | route "gluster-s3-route" created 94 | deploymentconfig "gluster-s3-dc" created 95 | --> Success 96 | Run 'oc status' to view your app. 97 | ``` 98 | 99 | Available at: 100 | [gluster-s3-template.yaml](../../../deploy/ocp-templates/gluster-s3-template.yaml) 101 | 102 | 103 | ### 3. Verify gluster-s3 resources 104 | 105 | Use the following commands to verify the deployment was succesful. 106 | 107 | ``` 108 | # oc get pods -o wide 109 | NAME READY STATUS RESTARTS AGE IP NODE 110 | glusterfs-1nmdp 1/1 Running 0 4d 10.70.42.234 node3 111 | glusterfs-5k7dk 1/1 Running 0 4d 10.70.42.4 node2 112 | glusterfs-85qds 1/1 Running 0 4d 10.70.42.5 node1 113 | gluster-s3 1/1 Running 0 4m 10.130.0.29 node3 114 | heketi-1-m8817 1/1 Running 0 4d 10.130.0.19 node3 115 | storage-project-router-1-2816m 1/1 Running 0 4d 10.70.42.234 node3 116 | ``` 117 | 118 | ``` 119 | # oc get service gluster-s3-service 120 | NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE 121 | gluster-s3-service 172.30.121.75 8080/TCP 1m 122 | 123 | ``` 124 | 125 | ``` 126 | # oc get route gluster-s3-route 127 | NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD 128 | gluster-s3-route gluster-s3-route-storage-project.cloudapps.mystorage.com ... 1 more gluster-s3-service None 129 | 130 | ``` 131 | 132 | # Testing 133 | 134 | 135 | ### Get url of glusters3object route which exposes the s3 object storage interface 136 | ``` 137 | s3_storage_url=$(oc get routes | grep "gluster.*s3" | awk '{print $2}') 138 | ``` 139 | 140 | We will be using this url for accessing s3 object storage. 141 | 142 | 143 | ### s3curl.pl for testing 144 | Download s3curl from here [s3curl](https://aws.amazon.com/code/128) 145 | 146 | We are going to make use of s3curl.pl for verification. 147 | 148 | s3curl.pl requires the presence of `Digest::HMAC_SHA1` and `Digest::MD5`. 149 | On Red Hat-based OSes, you can install the `perl-Digest-HMAC` package to get this. 150 | 151 | Now, update s3curl.pl perl script with glusters3object url which we retreived above. 152 | 153 | For example: 154 | 155 | ``` 156 | my @endpoints = ( 'glusters3object-storage-project.cloudapps.mystorage.com'); 157 | ``` 158 | 159 | 160 | ### Verify put of a Bucket 161 | ``` 162 | s3curl.pl --debug --id "testvolume:adminuser" --key "itsmine" --put /dev/null -- -k -v http://$s3_storage_url/bucket1 163 | ``` 164 | 165 | 166 | Sample output: 167 | 168 | ``` 169 | # s3curl.pl --debug --id "testvolume:adminuser" --key "itsmine" --put /dev/null -- -k -v http://glusters3object-storage-project.cloudapps.mystorage.com/bucket1 170 | s3curl: Found the url: host=glusters3object-storage-project.cloudapps.mystorage.com; port=; uri=/bucket1; query=; 171 | s3curl: ordinary endpoint signing case 172 | s3curl: StringToSign='PUT\n\n\nFri, 30 Jun 2017 05:19:41 +0000\n/bucket1' 173 | s3curl: exec curl -H Date: Fri, 30 Jun 2017 05:19:41 +0000 -H Authorization: AWS testvolume:adminuser:5xMXB7uyz51dUcephS6g1dVFwCM= -L -H content-type: -T /dev/null -k -v http://glusters3object-storage-project.cloudapps.mystorage.com/bucket1 174 | * About to connect() to glusters3object-storage-project.cloudapps.mystorage.com port 80 (#0) 175 | * Trying 10.70.42.234... 176 | * Connected to glusters3object-storage-project.cloudapps.mystorage.com (10.70.42.234) port 80 (#0) 177 | > PUT /bucket1 HTTP/1.1 178 | > User-Agent: curl/7.29.0 179 | > Host: glusters3object-storage-project.cloudapps.mystorage.com 180 | > Accept: */* 181 | > Transfer-Encoding: chunked 182 | > Date: Fri, 30 Jun 2017 05:19:41 +0000 183 | > Authorization: AWS testvolume:adminuser:5xMXB7uyz51dUcephS6g1dVFwCM= 184 | > Expect: 100-continue 185 | > 186 | < HTTP/1.1 200 OK 187 | < Content-Type: text/html; charset=UTF-8 188 | < Location: bucket1 189 | < Content-Length: 0 190 | < X-Trans-Id: tx188fd6bb5f41403c8d114-005955df6d 191 | < Date: Fri, 30 Jun 2017 05:19:41 GMT 192 | < Set-Cookie: fad43e2ce02bfea85cd465cc937029f2=0551e8024aa5cd2c9b0791109252676d; path=/; HttpOnly 193 | < Cache-control: private 194 | < 195 | * Connection #0 to host glusters3object-storage-project.cloudapps.mystorage.com left intact 196 | ``` 197 | 198 | ### Verify object put request. Create a simple file with some content 199 | ``` 200 | touch my_object.jpg 201 | 202 | 203 | echo \"Hello Gluster from OpenShift - for S3 access demo\" > my_object.jpg 204 | 205 | 206 | s3curl.pl --debug --id "testvolume:adminuser" --key "itsmine" --put my_object.jpg -- -k -v -s http://$s3_storage_url/bucket1/my_object.jpg 207 | ``` 208 | 209 | ### Verify listing objects in the container 210 | ``` 211 | s3curl.pl --debug --id "testvolume:adminuser" --key "itsmine" -- -k -v -s http://$s3_storage_url/bucket1/ 212 | ``` 213 | 214 | ### Verify object get request 215 | ``` 216 | s3curl.pl --debug --id "testvolume:adminuser" --key "itsmine" -- -o test_object.jpg http://$s3_storage_url/bucket1/my_object.jpg 217 | ``` 218 | 219 | ### Verify received object 220 | ``` 221 | cat test_object.jpg 222 | ``` 223 | 224 | ### Verify object delete request 225 | ``` 226 | s3curl.pl --debug --id "testvolume:adminuser" --key "itsmine" --delete -- http://$s3_storage_url/bucket1/my_object.jpg 227 | ``` 228 | 229 | ### Verify listing of objects 230 | ``` 231 | s3curl.pl --debug --id "testvolume:adminuser" --key "itsmine" -- -k -v -s http://$s3_storage_url/bucket1/ 232 | ``` 233 | 234 | ### Verify bucket delete request 235 | ``` 236 | s3curl.pl --debug --id "testvolume:adminuser" --key "itsmine" --delete -- http://$s3_storage_url/bucket1 237 | ``` 238 | 239 | ### To add a new user to the S3 account: 240 | 241 | #### First login to the pod 242 | ``` 243 | oc rsh 244 | ``` 245 | 246 | #### This step prepares the gluster volume where gswauth will save its metadata 247 | ``` 248 | gswauth-prep -A http://:8080/auth -K gswauthkey 249 | ``` 250 | 251 | Where, `ipaddr` is the IP address of the glusters3 pod obtained from 'oc get pods -o wide' 252 | 253 | #### To add user to account 254 | ``` 255 | gswauth-add-user -K gswauthkey -a 256 | ``` 257 | -------------------------------------------------------------------------------- /docs/examples/hello_world/README.md: -------------------------------------------------------------------------------- 1 | # Hello World application using GlusterFS Dynamic Provisioning 2 | 3 | At this point, we have a working Kubernetes cluster deployed, and a working Heketi Server. 4 | Next we will create a simple NGINX HelloWorld application utilizing Kubernetes Dynamic Provisioning and 5 | Heketi. 6 | 7 | This example assumes some familiarity with Kubernetes and the [Kubernetes Persistent Storage](http://kubernetes.io/docs/user-guide/persistent-volumes/) model. 8 | 9 | 10 | ### Verify our environment and gather some information to be used in later steps. 11 | 12 | Identify the Heketi REST URL and Server IP Address: 13 | 14 | ``` 15 | $ echo $HEKETI_CLI_SERVER 16 | http://10.42.0.0:8080 17 | ``` 18 | 19 | By default, `user_authorization` is disabled. If it were enabled, you might 20 | also need to find the rest user and rest user secret key (not applicable for 21 | this example as any values will work). It is also possible to configure a 22 | `secret` and pass the credentials to the Gluster dynamic provisioner via 23 | StorageClass parameters. 24 | 25 | #### Dynamic provisioner in Kubernetes 1.4 #### 26 | 27 | ***NOTE***: Endpoints define the GlusterFS cluster, for version 1.4.X this is 28 | a required parameter for the StorageClass. For versions later than 1.4.X skip 29 | this step. 30 | 31 | Identify the Gluster Storage Endpoint to be passed in as a parameter to 32 | the StorageClass (heketi-storage-endpoints): 33 | 34 | ``` 35 | kubectl get endpoints 36 | NAME ENDPOINTS AGE 37 | heketi 10.42.0.0:8080 22h 38 | heketi-storage-endpoints 192.168.10.100:1,192.168.10.101:1,192.168.10.102:1 22h 39 | kubernetes 192.168.10.90:6443 23h 40 | ``` 41 | 42 | #### Dynamic provisioner in Kubernetes >= 1.5 #### 43 | 44 | Starting with Kubernetes 1.5 a manual Endpoint is no longer necessary for the 45 | GlusterFS dynamic provisioner. In Kubernetes 1.6 and later manually specifying 46 | an endpoint will cause the provisioning to fail. When the dynamic provisioner 47 | creates a volume it will also automatically create the Endpoint. 48 | 49 | There are other StorageClass parameters (e.g. cluster, GID) which were added 50 | to the Gluster dynamic provisioner in Kubernetes. Please refer to 51 | [GlusterFS Dynamic Provisioning](https://github.com/kubernetes/kubernetes/blob/master/examples/experimental/persistent-volume-provisioning/README.md) 52 | for more details on these parameters. 53 | 54 | ### Create a _StorageClass_ for our GlusterFS Dynamic Provisioner 55 | 56 | [Kuberentes Storage Classes](http://kubernetes.io/docs/user-guide/persistent-volumes/#storageclasses) are used to 57 | manage and enable Persistent Storage in Kubernetes. Below is an example of a _Storage Class_ that will request 58 | 5GB of on-demand storage to be used with our _HelloWorld_ application. 59 | 60 | 61 | ##### For Kubernetes 1.4: 62 | ``` 63 | apiVersion: storage.k8s.io/v1beta1 64 | kind: StorageClass 65 | metadata: 66 | name: gluster-heketi <1> 67 | provisioner: kubernetes.io/glusterfs <2> 68 | parameters: 69 | endpoint: "heketi-storage-endpoints" <3> 70 | resturl: "http://10.42.0.0:8080" <4> 71 | restuser: "joe" <5> 72 | restuserkey: "My Secret Life" <6> 73 | ``` 74 | 75 | ##### For Kubernetes 1.5 and later: 76 | ``` 77 | apiVersion: storage.k8s.io/v1beta1 78 | kind: StorageClass 79 | metadata: 80 | name: gluster-heketi <1> 81 | provisioner: kubernetes.io/glusterfs <2> 82 | parameters: 83 | resturl: "http://10.42.0.0:8080" <4> 84 | restuser: "joe" <5> 85 | restuserkey: "My Secret Life" <6> 86 | ``` 87 | <1> Name of the Storage Class 88 | 89 | <2> Provisioner 90 | 91 | <3> GlusterFS defined EndPoint taken from Step 1 above (kubectl get endpoints). For Kubernetes >= 1.6, this parameter should be removed as Kubernetes will reject this YAML definition. 92 | 93 | <4> Heketi REST Url, taken from Step 1 above (echo $HEKETI_CLI_SERVER), may also be set to the Kubernetes service DNS name for the Heketi service. 94 | 95 | <5> Restuser, can be anything since authorization is turned off 96 | 97 | <6> Restuserkey, like Restuser, can be anything 98 | 99 | Create the Storage Class YAML file. Save it. Then submit it to Kubernetes 100 | 101 | ``` 102 | kubectl create -f gluster-storage-class.yaml 103 | storageclass "gluster-heketi" created 104 | ``` 105 | 106 | View the Storage Class: 107 | 108 | ``` 109 | kubectl get storageclass 110 | NAME TYPE 111 | gluster-heketi kubernetes.io/glusterfs 112 | ``` 113 | 114 | 115 | ### Create a PersistentVolumeClaim (PVC) to request storage for our HelloWorld application. 116 | 117 | Next, we will create a PVC that will request 5GB of storage, at which time, the Kubernetes Dynamic Provisioning Framework and Heketi 118 | will automatically provision a new GlusterFS volume and generate the Kubernetes PersistentVolume (PV) object. 119 | 120 | ``` 121 | apiVersion: v1 122 | kind: PersistentVolumeClaim 123 | metadata: 124 | name: gluster1 125 | annotations: 126 | volume.beta.kubernetes.io/storage-class: gluster-heketi <1> 127 | spec: 128 | accessModes: 129 | - ReadWriteOnce 130 | resources: 131 | requests: 132 | storage: 5Gi <2> 133 | ``` 134 | <1> The Kubernetes Storage Class annotation and the name of the Storage Class 135 | 136 | <2> The amount of storage requested 137 | 138 | 139 | Create the PVC YAML file. Save it. Then submit it to Kubernetes 140 | 141 | ``` 142 | kubectl create -f gluster-pvc.yaml 143 | persistentvolumeclaim "gluster1" created 144 | ``` 145 | 146 | View the PVC: 147 | 148 | ``` 149 | kubectl get pvc 150 | NAME STATUS VOLUME CAPACITY ACCESSMODES AGE 151 | gluster1 Bound pvc-7d37c7bd-bb5b-11e6-b81e-525400d87180 5Gi RWO 14h 152 | 153 | ``` 154 | 155 | Notice, that the PVC is bound to a dynamically created volume. We can also view 156 | the Volume (PV): 157 | 158 | ``` 159 | kubectl get pv 160 | NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM REASON AGE 161 | pvc-7d37c7bd-bb5b-11e6-b81e-525400d87180 5Gi RWO Delete Bound default/gluster1 14h 162 | 163 | ``` 164 | 165 | ### Create a NGINX pod that uses the PVC 166 | 167 | At this point we have a dynamically created GlusterFS volume, bound to a PersistentVolumeClaim, we can now utilize this claim 168 | in a pod. We will create a simple NGINX pod. 169 | 170 | ``` 171 | apiVersion: v1 172 | kind: Pod 173 | metadata: 174 | name: nginx-pod1 175 | labels: 176 | name: nginx-pod1 177 | spec: 178 | containers: 179 | - name: nginx-pod1 180 | image: gcr.io/google_containers/nginx-slim:0.8 181 | ports: 182 | - name: web 183 | containerPort: 80 184 | volumeMounts: 185 | - name: gluster-vol1 186 | mountPath: /usr/share/nginx/html 187 | volumes: 188 | - name: gluster-vol1 189 | persistentVolumeClaim: 190 | claimName: gluster1 <1> 191 | ``` 192 | <1> The name of the PVC created in step 3 193 | 194 | 195 | 196 | Create the Pod YAML file. Save it. Then submit it to Kubernetes 197 | 198 | ``` 199 | kubectl create -f nginx-pod.yaml 200 | pod "nginx-pod1" created 201 | ``` 202 | 203 | View the Pod (Give it a few minutes, it might need to download the image if it doesn't already exist): 204 | 205 | ``` 206 | kubectl get pods -o wide 207 | NAME READY STATUS RESTARTS AGE IP NODE 208 | nginx-pod1 1/1 Running 0 9m 10.38.0.0 node1 209 | glusterfs-node0-2509304327-vpce1 1/1 Running 0 1d 192.168.10.100 node0 210 | glusterfs-node1-3290690057-hhq92 1/1 Running 0 1d 192.168.10.101 node1 211 | glusterfs-node2-4072075787-okzjv 1/1 Running 0 1d 192.168.10.102 node2 212 | heketi-3017632314-yyngh 1/1 Running 0 1d 10.42.0.0 node0 213 | 214 | ``` 215 | 216 | Now we will exec into the container and create an index.html file 217 | 218 | ``` 219 | kubectl exec -ti nginx-pod1 /bin/sh 220 | $ cd /usr/share/nginx/html 221 | $ echo 'Hello World from GlusterFS!!!' > index.html 222 | $ ls 223 | index.html 224 | $ exit 225 | ``` 226 | 227 | Now we can curl the URL of our pod: 228 | 229 | ``` 230 | curl http://10.38.0.0 231 | Hello World from GlusterFS!!! 232 | ``` 233 | 234 | Lastly, let's check our gluster pod, to see the index.html file we wrote. Choose any of the gluster pods 235 | 236 | ``` 237 | kubectl exec -ti glusterfs-node1-3290690057-hhq92 /bin/sh 238 | $ mount | grep heketi 239 | /dev/mapper/VolGroup00-LogVol00 on /var/lib/heketi type xfs (rw,relatime,seclabel,attr2,inode64,noquota) 240 | /dev/mapper/vg_f92e09091f6b20ab12b02a2513e4ed90-brick_1e730a5462c352835055018e1874e578 on /var/lib/heketi/mounts/vg_f92e09091f6b20ab12b02a2513e4ed90/brick_1e730a5462c352835055018e1874e578 type xfs (rw,noatime,seclabel,nouuid,attr2,inode64,logbsize=256k,sunit=512,swidth=512,noquota) 241 | /dev/mapper/vg_f92e09091f6b20ab12b02a2513e4ed90-brick_d8c06e606ff4cc29ccb9d018c73ee292 on /var/lib/heketi/mounts/vg_f92e09091f6b20ab12b02a2513e4ed90/brick_d8c06e606ff4cc29ccb9d018c73ee292 type xfs (rw,noatime,seclabel,nouuid,attr2,inode64,logbsize=256k,sunit=512,swidth=512,noquota) 242 | 243 | $ cd /var/lib/heketi/mounts/vg_f92e09091f6b20ab12b02a2513e4ed90/brick_d8c06e606ff4cc29ccb9d018c73ee292/brick 244 | $ ls 245 | index.html 246 | $ cat index.html 247 | Hello World from GlusterFS!!! 248 | ``` 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | -------------------------------------------------------------------------------- /docs/examples/hello_world/gluster-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: gluster1 5 | annotations: 6 | volume.beta.kubernetes.io/storage-class: gluster-heketi 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 5Gi 13 | -------------------------------------------------------------------------------- /docs/examples/hello_world/gluster-storage-class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1beta1 2 | kind: StorageClass 3 | metadata: 4 | name: gluster-heketi 5 | provisioner: kubernetes.io/glusterfs 6 | parameters: 7 | endpoint: "heketi-storage-endpoints" 8 | resturl: "http://10.42.0.0:8080" 9 | restuser: "joe" 10 | restuserkey: "My Secret Life" 11 | 12 | 13 | -------------------------------------------------------------------------------- /docs/examples/hello_world/nginx-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: gluster-pod1 5 | labels: 6 | name: gluster-pod1 7 | spec: 8 | containers: 9 | - name: gluster-pod1 10 | image: gcr.io/google_containers/nginx-slim:0.8 11 | ports: 12 | - name: web 13 | containerPort: 80 14 | securityContext: 15 | privileged: true 16 | volumeMounts: 17 | - name: gluster-vol1 18 | mountPath: /usr/share/nginx/html 19 | volumes: 20 | - name: gluster-vol1 21 | persistentVolumeClaim: 22 | claimName: gluster1 23 | -------------------------------------------------------------------------------- /docs/examples/sample-gluster-endpoints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: glusterfs-cluster 5 | subsets: 6 | - addresses: 7 | - ip: 192.168.10.100 8 | ports: 9 | - port: 1 10 | - addresses: 11 | - ip: 192.168.10.101 12 | ports: 13 | - port: 1 14 | - addresses: 15 | - ip: 192.168.10.102 16 | ports: 17 | - port: 1 18 | -------------------------------------------------------------------------------- /docs/examples/sample-gluster-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: glusterfs-cluster 5 | spec: 6 | ports: 7 | - port: 1 8 | 9 | -------------------------------------------------------------------------------- /docs/presentations/2017-03-vault/Gluster-in-Kubernetes.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gluster/gluster-kubernetes/7246eb4053c8c5336e4da68d86b76124d435eb3e/docs/presentations/2017-03-vault/Gluster-in-Kubernetes.pdf -------------------------------------------------------------------------------- /docs/presentations/2017.02.05_-_gluster-kubernetes_fosdem.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gluster/gluster-kubernetes/7246eb4053c8c5336e4da68d86b76124d435eb3e/docs/presentations/2017.02.05_-_gluster-kubernetes_fosdem.pdf -------------------------------------------------------------------------------- /docs/presentations/2017.02.05_-_gluster-kubernetes_fosdem.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gluster/gluster-kubernetes/7246eb4053c8c5336e4da68d86b76124d435eb3e/docs/presentations/2017.02.05_-_gluster-kubernetes_fosdem.png -------------------------------------------------------------------------------- /docs/presentations/README.md: -------------------------------------------------------------------------------- 1 | #gluster-kubernetes Presentations 2 | 3 | This directory contains slides and links to presentations about the work in the 4 | gluster-kubernetes project. 5 | 6 | ## 2017.01-02: DevConf.cz 2017 and FOSDEM 2017 7 | 8 | 9 | [Ashiq](https://github.com/MohamedAshiqrh) and [Jose](https://github.com/jarrpa) 10 | traveled to Brno, CZ and Brussels, BE to give an overview presentation on 11 | hyper-converged GlusterFS on Kubernetes and demo the gk-deploy tool. 12 | 13 | * **DevConf.cz 2017** 14 | 15 | **Slides:** [![Hyper-converged, persistent storage for containers with GlusterFS](./2017.02.05_-_gluster-kubernetes_fosdem.png)](./2017.02.05_-_gluster-kubernetes_fosdem.pdf) 16 | 17 | NOTE: The slides used for DevConf were a somewhat incomplete version of the 18 | ones seen at FOSDEM. ;) 19 | 20 | **Video:** [DevConf.cz 2017 presentation](https://youtu.be/DGuP38tAh48?t=5m12s) 21 | 22 | * **FOSDEM 2017** 23 | 24 | **Main talk (slides and video):** [Hyper-converged, persistent storage for containers with GlusterFS](https://fosdem.org/2017/schedule/event/glustercontainer/) 25 | 26 | **Lightning talk (slides and video):** [Kubernetes+GlusterFS: Lightning Ver.](https://fosdem.org/2017/schedule/event/kubegluster/) 27 | 28 | ## 2017-03-23 [vault](http://events.linuxfoundation.org/events/vault/) 29 | 30 | [Michael](https://github.com/obnoxxx) presented on gluster-kubernetes at the 31 | vault conference in Cambride, MA. 32 | 33 | * **slides**: [Gluster in Kubernetes](2017-03-vault/Gluster-in-Kubernetes.pdf) 34 | * **demos**: 35 | * gk-deploy: [online](https://asciinema.org/a/5apn5yv7rryqa0hpjozq0s06v) [download](2017-03-vault/demo-deploy.json) 36 | * heketi: [online](https://asciinema.org/a/9cluxpf9weuyq6oqhmd3v7r0c) [download](./2017-03-vault/demo-test-heketi.json) 37 | * dynamic provisioning: [online](https://asciinema.org/a/amyldm9lp8sxfqc89eogymx0x) [download](./2017-03-vault/demo-dynamic-provisioning.json) 38 | 39 | -------------------------------------------------------------------------------- /docs/release-maintenance.md: -------------------------------------------------------------------------------- 1 | # gluster-kubernetes Release and Maintenance Policies 2 | 3 | This document outlines the release and maintenance policies of the 4 | gluster-kubernetes project. 5 | 6 | ## Maintained Branches 7 | 8 | The project will only support and actively maintain two branches, `master` and 9 | the latest release branch. The latest release branch will always be reachable 10 | by two HEADs, `-latest` and `stable`. Requests for support of older 11 | branches may be considered on a case-by-case basis, but users will be 12 | encouraged to use newer versions where possible. 13 | 14 | ## Version Numbering 15 | 16 | This project follows the versioning guidelines outlined by the [Semantic 17 | Versioning specification, version 2.0.0](http://semver.org/spec/v2.0.0.html). 18 | In short, versions numbers will follow the structure `..`, 19 | with the following definitions: 20 | 21 | * MAJOR version indicates a fundamental change to the structure of the 22 | project, often due to innovations from significant changes in the component 23 | projects. Major versions will typically not be compatible with older 24 | versions of the component projects. 25 | * MINOR version indicates a major feature, a broad set of changes, and/or new 26 | releases of the component projects. Minor versions retain the following 27 | compatibility guarantees: 28 | 1. Component projects from the last MAJOR release will still work with the 29 | current code. 30 | 2. Deployments made with the current code will not conflict with other 31 | deployments made since the last MAJOR release. 32 | * PATCH version indicates backwards-compatible bug fixes, and guarantees that 33 | the versions of the component projects has not changed. 34 | 35 | ## Branch Definitions and Structure 36 | 37 | The `master` branch will always contain the latest development code. The 38 | project guarantees that this branch will be functional and tested but not 39 | bug-free. It will always track the latest versions of all component projects 40 | and makes no guarantee of backwards compatibility to older versions of those 41 | projects or itself. 42 | 43 | The `stable` branch will track the latest stable release of the code. When a 44 | new release is made, the `stable` branch will be moved to follow the new 45 | release branch. 46 | 47 | Each MAJOR and MINOR release will get its own branch, forked from `master`. 48 | Each release branch name will be of the form `-latest`, e.g. 49 | `1.0-latest`. PATCH releases to those versions will be made in those branches, 50 | and will be marked by tags of the form `v`, e.g. `v1.0.0`. PATCH 51 | releases may contain more than one commit, depending on the whimsy of the 52 | release engineers. :) 53 | 54 | Commits to a release branch will be of the following types, ranked in order of 55 | preference: 56 | 57 | 1. Direct cherry-picks from `master` (`git cherry-pick -sx`) 58 | 2. Cherry-picks from `master` modified to resolve conflicts (change `cherry 59 | picked from commit` to `based on commit`) 60 | 3. Custom patches 61 | 62 | An example git history is presented below. 63 | ``` 64 | * I (master) 65 | | 66 | * H * H' (1.1-latest, stable) tag: v1.1.0 67 | | | 68 | | * G 69 | | | 70 | | / 71 | | / 72 | | / 73 | |/ 74 | * F * F' (1.0-latest) tag: v.1.0.3 75 | | | 76 | * E * E' tag: v1.0.2 77 | | | 78 | | * D 79 | | | 80 | * C * C' tag: v1.0.1 81 | | | 82 | | * B tag: v1.0.0 83 | | / 84 | | / 85 | | / 86 | |/ 87 | * A 88 | ``` 89 | -------------------------------------------------------------------------------- /docs/setup-guide.md: -------------------------------------------------------------------------------- 1 | # Setup Guide 2 | 3 | This guide contains detailed instructions for deploying GlusterFS + heketi onto 4 | Kubernetes. 5 | 6 | ## Infrastructure Requirements 7 | 8 | The only strict requirement is a pre-existing Kubernetes cluster and 9 | administrative access to that cluster. You can opt to deploy GlusterFS as a 10 | hyper-converged service on your Kubernetes nodes if they meet the following 11 | requirements: 12 | 13 | * There must be at least three nodes. 14 | 15 | * Each node must have at least one raw block device attached (like an EBS 16 | Volume or a local disk) for use by heketi. These devices must not have any 17 | data on them, as they will be formatted and partitioned by heketi. 18 | 19 | * Each node must have the following ports opened for GlusterFS communications: 20 | 21 | * 2222 - GlusterFS pod's sshd 22 | 23 | * 24007 - GlusterFS Daemon 24 | 25 | * 24008 - GlusterFS Management 26 | 27 | * 49152 to 49251 - Each brick for every volume on the host requires its own 28 | port. For every new brick, one new port will be used starting at 49152. We 29 | recommend a default range of 49152-49251 on each host, though you can 30 | adjust this to fit your needs. 31 | 32 | * The following kernel modules must be loaded: 33 | 34 | 1. dm_snapshot 35 | 2. dm_mirror 36 | 3. dm_thin_pool 37 | 38 | For kernel modules, `lsmod | grep ` will show you if a given module is present, and `modprobe ` will load 39 | a given module. 40 | 41 | * Each node requires that the `mount.glusterfs` command is available. Under 42 | all Red Hat-based OSes this command is provided by the `glusterfs-fuse` 43 | package. 44 | 45 | * GlusterFS client version installed on nodes should be as close as possible 46 | to the version of the server. To get installed versions run 47 | `glusterfs --version` or `kubectl exec -- glusterfs --version`. 48 | 49 | If you are not able to deploy a hyper-converged GlusterFS cluster, you must 50 | have one running somewhere that the Kubernetes nodes can access. The above 51 | requirements still apply for any pre-existing GlusterFS cluster. 52 | 53 | ## Deployment Overview 54 | 55 | An administrator must provide the topology information of the GlusterFS cluster 56 | to be accessed by heketi. The majority of the deployment tasks are handled by 57 | the [gk-deploy](../deploy/gk-deploy) script. The following is an overview of 58 | the steps taken by the script: 59 | 60 | 1. Creates a Service Account for heketi to securely communicate with the 61 | GlusterFS nodes. 62 | 2. As an option, deploys GlusterFS as a 63 | [DaemonSet](http://kubernetes.io/docs/admin/daemons/) onto the Kubernetes 64 | nodes specified in the topology. 65 | 3. Deploys an instance of heketi called 'deploy-heketi', which is used to 66 | initialize the heketi database. 67 | 4. Creates the Service and Endpoints for communicating with the GlusterFS 68 | cluster and initializes the heketi database by creating a GlusterFS volume, 69 | then copies the database onto that same volume for use by the final 70 | instance of heketi. 71 | 5. Deletes all the 'deploy-heketi' related resources. 72 | 6. Deploys the final instance of the heketi service. 73 | 74 | ## Deployment 75 | 76 | ### 1. Create a topology file 77 | 78 | As mentioned in the overview, an administrator must provide the GlusterFS 79 | cluster topology information. This takes the form of a topology file, which 80 | describes the nodes present in the GlusterFS cluster and the block devices 81 | attached to them for use by heketi. A 82 | [sample topology file](../deploy/topology.json.sample) is provided. When 83 | creating your own topology file: 84 | 85 | * Make sure the topology file only lists block devices intended for heketi's 86 | use. heketi needs access to whole block devices (e.g. /dev/sdb, /dev/vdb) 87 | which it will partition and format. 88 | 89 | * The `hostnames` array is a bit misleading. `manage` should be a list of 90 | hostnames for the node, but `storage` should be a list of IP addresses on 91 | the node for backend storage communications. 92 | 93 | ### 2. Run the deployment script 94 | 95 | Next, run the [gk-deploy](../deploy/gk-deploy) script from a machine with 96 | administrative access to your Kubernetes cluster. You should familiarize 97 | yourself with the script's options by running `gk-deploy -h`. Some things to 98 | note when running the script: 99 | 100 | * By default it expects the topology file to be in the same directory as 101 | itself. You can specify a different location as the first non-option 102 | argument on the command-line. 103 | 104 | * By default it expects to have access to Kubernetes template files in a 105 | subdirectory called `kube-templates`. Specify their location otherwise 106 | with `-t`. 107 | 108 | * By default it will NOT deploy GlusterFS, allowing you to use heketi with 109 | any existing GlusterFS cluster. If you specify the `-g` option, it will 110 | deploy a GlusterFS DaemonSet onto your Kubernetes cluster by treating the 111 | nodes listed in the topology file as hyper-converged nodes with both 112 | Kubernetes and storage devices on them. 113 | 114 | * If you use a pre-existing GlusterFS cluster, please note that any 115 | pre-existing volumes will not be detected by heketi, and thus not be under 116 | heketi's management. 117 | 118 | # Usage Examples 119 | 120 | Running the following from a node with Kubernetes administrative access and 121 | [heketi-cli](https://github.com/heketi/heketi/releases) installed creates a 122 | 100GB Persistent Volume 123 | [which can be claimed](http://kubernetes.io/docs/user-guide/persistent-volumes/#claims-as-volumes) 124 | from any application: 125 | 126 | ``` 127 | $ export HEKETI_CLI_SERVER=http://
128 | $ heketi-cli volume create --size=100 \ 129 | --persistent-volume \ 130 | --persistent-volume-endpoint=heketi-storage-endpoints | kubectl create -f - 131 | ``` 132 | 133 | You will also find a [sample application](./examples/hello_world) shipped as 134 | part of this documentation. 135 | -------------------------------------------------------------------------------- /tests/Makefile: -------------------------------------------------------------------------------- 1 | test: simple-tests 2 | 3 | simple-tests: 4 | $(MAKE) -C simple test 5 | 6 | .PHONY: test simple-tests 7 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # Testsuite 2 | 3 | This contains the tests for gluster-kubernetes. 4 | * The subdirectory `simple` contains simple local tests 5 | like syntax-checks, unit tests and test that use mocking and stubbing. 6 | * The subdirectory `complex` contains end-to-end functional tests 7 | run in vagrant environments. 8 | -------------------------------------------------------------------------------- /tests/complex/Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | ./run.sh 3 | 4 | .PHONY: test 5 | -------------------------------------------------------------------------------- /tests/complex/README.md: -------------------------------------------------------------------------------- 1 | # Test suite - complex tests for gluster-kubernetes 2 | 3 | These tests are complex, end-to-end functional tests using 4 | our vagrant based test environment. 5 | 6 | ## Running 7 | 8 | `./run.sh` will run a basic set of tests. The tests can 9 | also be run individually like 10 | 11 | ``` 12 | ./test-setup.sh 13 | ./test-gk-deploy.sh 14 | ``` 15 | 16 | and at a later time: 17 | 18 | ``` 19 | ./test-dynamic-provisioning.sh 20 | ``` 21 | 22 | There are additional test runs available which require that 23 | the vagrant environment be setup (e.g. `test-setup.sh`) 24 | before each run: 25 | 26 | * `run-basic.sh`: Test basic deployment and functionality 27 | * `run-object.sh`: Test gluster-s3 deployment 28 | 29 | Running `run-all.sh` will go through all test runs, rolling 30 | back the vagrant environment between each run. 31 | 32 | ## Environment variables 33 | 34 | There are various environment variables that can be 35 | overridden, so that this can run for instance against 36 | a different vagrant environment. E.g. if you have 37 | already brought up a vagrant environment manually, 38 | you could from the vagrant-dir do: 39 | 40 | ``` 41 | export VAGRANT_DIR=$(realpath ./) 42 | ../tests/functional/test-gk-deploy.sh 43 | ``` 44 | 45 | All variables: 46 | - `BASE_DIR` 47 | - `TEST_DIR` 48 | - `DEPLOY_DIR` 49 | - `VAGRANT_DIR` 50 | - `TOPOLOGY_FILE` 51 | 52 | If you override these, then you should provide absolute paths. 53 | -------------------------------------------------------------------------------- /tests/complex/lib.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # honor variables set from the caller: 4 | : "${TEST_DIR="$(cd "$(dirname "${0}")" && pwd)"}" 5 | : "${BASE_DIR="${TEST_DIR}/../.."}" 6 | : "${VAGRANT_DIR="${BASE_DIR}/vagrant"}" 7 | : "${DEPLOY_DIR="${BASE_DIR}/deploy"}" 8 | : "${TOPOLOGY_FILE="${DEPLOY_DIR}/topology.json.sample"}" 9 | : "${TESTNAME="$(basename "${0}")"}" 10 | : "${TEST_LOG="${TEST_DIR}/gk-tests.log"}" 11 | : "${SUBTEST_MSG=""}" 12 | : "${SUBTEST_COUNT=0}" 13 | : "${SUBTEST_OUT=1}" 14 | : "${RUN_DEPTH=0}" 15 | : "${RUN_SUMMARY=""}" 16 | 17 | SSH_CONFIG=${VAGRANT_DIR}/ssh-config 18 | LOCAL_FAILURE=0 19 | 20 | pass() { 21 | if [[ "x${TESTNAME}" != "x" ]]; then 22 | echo -en "| \e[32m\e[1mPASS...:\e[21m" 23 | echo -en " ${TESTNAME}" 24 | # print subtest information if we haven't yet 25 | if [[ ${SUBTEST_OUT} -eq 0 ]]; then 26 | if [[ ${SUBTEST_COUNT} -gt 0 ]]; then 27 | echo -en ":${SUBTEST_COUNT}" 28 | fi 29 | if [[ "x${SUBTEST_MSG}" != "x" ]]; then 30 | echo -en ": ${SUBTEST_MSG}" 31 | fi 32 | fi 33 | 34 | if [[ ${#} -ge 1 ]]; then 35 | echo -en " ${*}" 36 | fi 37 | 38 | echo -e "\e[0m" 39 | fi 40 | } 41 | 42 | fail() { 43 | if [[ "x${TESTNAME}" != "x" ]]; then 44 | echo -en "| \e[31m\e[1mFAIL...:\e[21m " 45 | echo -en "${TESTNAME}" 46 | # print subtest information if we haven't yet 47 | if [[ ${SUBTEST_OUT} -eq 0 ]]; then 48 | if [[ ${SUBTEST_COUNT} -gt 0 ]]; then 49 | echo -en ":${SUBTEST_COUNT}" 50 | fi 51 | if [[ "x${SUBTEST_MSG}" != "x" ]]; then 52 | echo -en ": ${SUBTEST_MSG}" 53 | fi 54 | fi 55 | 56 | if [[ ${#} -ge 1 ]]; then 57 | echo -en " ${*}" 58 | fi 59 | 60 | echo -e "\e[0m" 61 | fi 62 | 63 | } 64 | 65 | create_vagrant() { 66 | cd "${VAGRANT_DIR}" || exit 1 67 | 68 | local vstatus 69 | local run=0 70 | 71 | vstatus=$(vagrant status | grep "master\|node") 72 | for mstatus in ${vstatus}; do 73 | if [[ "$(echo "${mstatus}" | grep "running")" == "" ]]; then 74 | run=1 75 | fi 76 | done 77 | 78 | if [[ ${run} -eq 1 ]]; then 79 | ./up.sh || end_test -e "Error bringing up vagrant environment" 80 | fi 81 | 82 | ssh_config 83 | } 84 | 85 | start_vagrant() { 86 | cd "${VAGRANT_DIR}" || exit 1 87 | vagrant up --no-provision || end_test -e "Error starting vagrant environment" 88 | } 89 | 90 | stop_vagrant() { 91 | cd "${VAGRANT_DIR}" || exit 1 92 | vagrant halt || end_test -e "Error halting vagrant environment" 93 | } 94 | 95 | destroy_vagrant() { 96 | cd "${VAGRANT_DIR}" || exit 1 97 | vagrant destroy || end_test -e "Error destroying vagrant environment" 98 | } 99 | 100 | ssh_config() { 101 | cd "${VAGRANT_DIR}" || exit 1 102 | vagrant ssh-config > "${SSH_CONFIG}" || end_test -e "Error creating ssh-config" 103 | } 104 | 105 | rollback_vagrant() { 106 | cd "${VAGRANT_DIR}" || exit 1 107 | ( 108 | ./rollback.sh 109 | if [[ ${?} -ne 0 ]]; then 110 | destroy_vagrant 111 | create_vagrant 112 | ssh_config 113 | fi 114 | ) || end_test -e "Error rolling back vagrant environment" 115 | } 116 | 117 | copy_deploy() { 118 | local node=${1:-master} 119 | 120 | cd "${VAGRANT_DIR}" || exit 1 121 | scp -qr -F "${SSH_CONFIG}" "${DEPLOY_DIR}" "${node}:" || end_test -e "SCP deploy to ${node} failed" 122 | scp -qr -F "${SSH_CONFIG}" "${TOPOLOGY_FILE}" "${node}:deploy/topology.json" || end_test -e "SCP topology to ${node} failed" 123 | } 124 | 125 | pull_docker_image() { 126 | cd "${VAGRANT_DIR}" || exit 1 127 | 128 | local image=${1} 129 | local vstatus 130 | 131 | vstatus=$(vagrant status | grep "node" | awk '{print $1}') 132 | for NODE in ${vstatus}; do 133 | ssh -q -F "${SSH_CONFIG}" "${NODE}" "sudo docker pull ${image}" || end_test -e "Error pulling '${image}' docker image to ${NODE}" 134 | done 135 | } 136 | 137 | end_test() { 138 | local result="${?}" 139 | local output="" 140 | local e=0 141 | if [[ "${1}" == "-e" ]]; then 142 | e=1 143 | shift 144 | fi 145 | if [[ ${result} -eq 0 ]]; then 146 | output="$(pass "${@}")" 147 | else 148 | output="$(fail "${@}")" 149 | LOCAL_FAILURE=1 150 | fi 151 | if [[ "x${output}" != "x" ]]; then 152 | echo -e "\r${output}" | tee -a "${TEST_LOG}" 153 | fi 154 | SUBTEST_MSG="" 155 | SUBTEST_OUT=1 156 | 157 | if [[ ${result} -ne 0 ]] && [[ ${e} -eq 1 ]]; then 158 | exit 1 159 | fi 160 | } 161 | 162 | start_test() { 163 | if [[ "x${TESTNAME}" != "x" ]]; then 164 | echo -en "| \e[32m\e[1mRUNNING:\e[21m ${TESTNAME}" 165 | # print a subtest number if counting 166 | if [[ ${SUBTEST_COUNT} -gt 0 ]]; then 167 | echo -en ":${SUBTEST_COUNT}" 168 | fi 169 | # print a subtest message if given 170 | if [[ "x${SUBTEST_MSG}" != "x" ]]; then 171 | echo -en ": ${SUBTEST_MSG}" 172 | fi 173 | echo -e "\e[0m" 174 | else 175 | echo -e "|=====" 176 | fi 177 | } 178 | 179 | end_run() { 180 | (exit ${LOCAL_FAILURE}) 181 | end_test 182 | if [[ ${RUN_DEPTH} -eq 0 ]]; then 183 | echo -e "|=====\n| \e[1mTEST SUMMARY:\e[21m" 184 | echo -e "$(cat "${TEST_LOG}")" 185 | rm -f "${TEST_LOG}" 186 | fi 187 | exit ${LOCAL_FAILURE} 188 | } 189 | 190 | run() { 191 | local e="" 192 | local remote=0 193 | local node 194 | local script 195 | local args 196 | local res 197 | 198 | while [[ "${1}" == -* ]]; do 199 | if [[ "${1}" == *e* ]]; then 200 | e="-e" 201 | fi 202 | if [[ "${1}" == *r ]]; then 203 | remote=1 204 | shift 205 | node="${1}" 206 | fi 207 | shift 208 | done 209 | script="${1%% *}" 210 | args=${1#* } 211 | shift 212 | 213 | if [[ ${#} -ge 1 ]]; then 214 | SUBTEST_MSG="${*}" 215 | fi 216 | ((SUBTEST_COUNT+=1)) 217 | SUBTEST_OUT=0 218 | 219 | ((RUN_DEPTH+=1)) 220 | if [[ ${remote} -eq 1 ]]; then 221 | start_test 222 | ( 223 | cd "${VAGRANT_DIR}" || exit 1 224 | scp -q -F "${SSH_CONFIG}" "${script}" "${node}": 1>/dev/null && \ 225 | ssh -qt -F "${SSH_CONFIG}" "${node}" "./$(basename "${script}") ${args}" 226 | ) 227 | res=${?} 228 | else 229 | ( 230 | # shellcheck disable=SC2086 231 | RUN_DEPTH=${RUN_DEPTH} ${script} ${args} 232 | ) 233 | res=${?} 234 | fi 235 | ((RUN_DEPTH-=1)) 236 | (exit ${res}) 237 | end_test ${e} 238 | } 239 | 240 | trap end_run EXIT 241 | trap "(exit 1)" ERR 242 | trap "(exit 1)" INT 243 | 244 | if [[ ${RUN_DEPTH} -eq 0 ]]; then 245 | rm -f "${TEST_LOG}" 246 | fi 247 | start_test 248 | -------------------------------------------------------------------------------- /tests/complex/run-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # shellcheck disable=SC2034 4 | TESTNAME="" 5 | TEST_DIR="$(cd "$(dirname "${0}")" && pwd)" 6 | 7 | source "${TEST_DIR}/lib.sh" 8 | 9 | run -e "${TEST_DIR}/test-setup.sh" 10 | 11 | run -e "${TEST_DIR}/run-basic.sh" 12 | 13 | rollback_vagrant 14 | 15 | run "${TEST_DIR}/run-object.sh" 16 | 17 | run "${TEST_DIR}/test-teardown.sh" 18 | -------------------------------------------------------------------------------- /tests/complex/run-basic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TEST_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | 5 | source "${TEST_DIR}/lib.sh" 6 | 7 | run -e "${TEST_DIR}/test-gk-deploy.sh" 8 | 9 | run "${TEST_DIR}/test-dynamic-provisioning.sh" 10 | -------------------------------------------------------------------------------- /tests/complex/run-object.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TEST_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | 5 | source "${TEST_DIR}/lib.sh" 6 | 7 | run -e "${TEST_DIR}/test-gk-deploy-object.sh" 8 | 9 | run "${TEST_DIR}/test-object-store.sh" 10 | -------------------------------------------------------------------------------- /tests/complex/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # shellcheck disable=SC2034 4 | TESTNAME="" 5 | TEST_DIR="$(cd "$(dirname "${0}")" && pwd)" 6 | 7 | source "${TEST_DIR}/lib.sh" 8 | 9 | run -e "${TEST_DIR}/test-setup.sh" 10 | 11 | run -e "${TEST_DIR}/run-basic.sh" 12 | 13 | run "${TEST_DIR}/test-teardown.sh" 14 | -------------------------------------------------------------------------------- /tests/complex/test-dynamic-provisioning.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TEST_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | DOCKER_IMAGE="gcr.io/google_containers/nginx-slim:0.8" 5 | 6 | source "${TEST_DIR}/lib.sh" 7 | 8 | pull_docker_image "${DOCKER_IMAGE}" 9 | 10 | run -r master "${TEST_DIR}/test-inside-dynamic-provisioning.sh" "Test dynamic provisioning" 11 | -------------------------------------------------------------------------------- /tests/complex/test-gk-deploy-object.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TEST_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | 5 | source "${TEST_DIR}/lib.sh" 6 | 7 | copy_deploy 8 | 9 | run -r master -e "${TEST_DIR}/test-inside-gk-deploy.sh obj" "Test object deployment" 10 | 11 | run -r master "${TEST_DIR}/test-inside-gk-deploy.sh obj" "Test object deployment idempotence" 12 | -------------------------------------------------------------------------------- /tests/complex/test-gk-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TEST_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | 5 | source "${TEST_DIR}/lib.sh" 6 | 7 | copy_deploy 8 | 9 | run -r master -e "${TEST_DIR}/test-inside-gk-deploy.sh" "Test basic deployment" 10 | 11 | run -r master "${TEST_DIR}/test-inside-gk-deploy.sh" "Test basic deployment idempotence" 12 | -------------------------------------------------------------------------------- /tests/complex/test-inside-dynamic-provisioning.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # test dynamic provisioning 4 | 5 | HEKETI_CLI_SERVER=$(kubectl get svc/heketi --template 'http://{{.spec.clusterIP}}:{{(index .spec.ports 0).port}}') 6 | export HEKETI_CLI_SERVER 7 | 8 | 9 | # SC 10 | 11 | SC="mysc" 12 | 13 | cat > "${SC}.yaml" < "${PVC}.yaml" < "${APP}.yaml" <' ]] ; do 98 | sleep 1 99 | appIP=$(kubectl get pods -o wide | grep "${APP}" | awk '{print $6}') 100 | done 101 | 102 | 103 | echo "putting content into application" 104 | CONTENT="Does this work? Yes! Great!!!" 105 | kubectl exec "${APP}" -- /bin/bash -c "echo \"${CONTENT}\" > /usr/share/nginx/html/index.html" 106 | 107 | 108 | echo "verifying we get back our content from the app" 109 | OUTPUT="$(curl "http://${appIP}")" 110 | 111 | if [[ "${OUTPUT}" != "${CONTENT}" ]]; then 112 | echo "ERROR: did not get expected output from nginx pod" 113 | exit 1 114 | fi 115 | 116 | 117 | echo "verifying the content is actually stored on gluster" 118 | mountinfo=$(kubectl exec "${APP}" -- /bin/bash -c "cat /proc/mounts | grep nginx" | awk '{print $1}') 119 | volname=$(echo -n "${mountinfo}" | cut -d: -f2) 120 | glusterip=$(echo -n "${mountinfo}" |cut -d: -f1) 121 | glusterpod=$(kubectl get pods -o wide | grep "${glusterip}" | awk '{print $1}') 122 | 123 | brickinfopath="/var/lib/glusterd/vols/${volname}/bricks" 124 | brickinfofile=$(kubectl exec "${glusterpod}" -- /bin/bash -c "ls -1 ${brickinfopath} | head -n 1") 125 | brickpath=$(kubectl exec "${glusterpod}" -- /bin/bash -c "cat ${brickinfopath}/${brickinfofile} | grep real_path | cut -d= -f2") 126 | brickhost=$(kubectl exec "${glusterpod}" -- /bin/bash -c "cat ${brickinfopath}/${brickinfofile} | grep hostname | cut -d= -f2") 127 | brickpod=$(kubectl get pods -o wide | grep "${brickhost}" | awk '{print $1}') 128 | 129 | BRICK_CONTENT=$(kubectl exec "${brickpod}" -- /bin/bash -c "cat ${brickpath}/index.html") 130 | if [[ "${BRICK_CONTENT}" != "${CONTENT}" ]]; then 131 | echo "ERROR: did not get expected content from brick" 132 | exit 1 133 | fi 134 | 135 | echo "cleaning up" 136 | kubectl delete pod "${APP}" 137 | kubectl delete pvc "${PVC}" 138 | kubectl delete storageclass "${SC}" 139 | 140 | exit 0 141 | -------------------------------------------------------------------------------- /tests/complex/test-inside-gk-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # test gk-deploy 4 | 5 | OBJ=( --no-object ) 6 | 7 | while [[ "x${1}" != "x" ]]; do 8 | if [[ "${1}" == obj* ]]; then 9 | OBJ=( --object-account account --object-user user --object-password password ) 10 | fi 11 | shift 12 | done 13 | 14 | cd ~/deploy || exit 1 15 | 16 | # shellcheck disable=SC2086 17 | ./gk-deploy -v -y -g -n default ${OBJ[*]} 18 | 19 | if [[ $? -ne 0 ]]; then 20 | echo "ERROR: gk-deploy failed" 21 | exit 1 22 | fi 23 | 24 | # wait briefly for pods to settle down... 25 | sleep 2 26 | 27 | num_gluster_pods=$(kubectl get pods | grep -s "glusterfs-" | grep -cs "1/1[[:space:]]*Running") 28 | num_heketi_pods=$(kubectl get pods | grep -s "heketi-" | grep -vs "Terminating" | grep -cs "1/1[[:space:]]*Running") 29 | 30 | if (( num_heketi_pods != 1 )); then 31 | echo "ERROR: unexpected number of heketi pods: " \ 32 | "${num_heketi_pods} - " \ 33 | "expected 1" 34 | exit 1 35 | fi 36 | 37 | if (( num_gluster_pods != 3 )); then 38 | echo "ERROR: unexpected number of gluster pods: " \ 39 | "${num_gluster_pods} - " \ 40 | "expected 3" 41 | exit 1 42 | fi 43 | 44 | if [[ "${OBJ[*]}" != "--no-object" ]]; then 45 | num_object_pods=$(kubectl get pods | grep -s "gluster-s3-" | grep -cs "1/1[[:space:]]*Running") 46 | 47 | if (( num_object_pods != 1 )); then 48 | echo "ERROR: unexpected number of gluster-s3 pods: " \ 49 | "${num_object_pods} - " \ 50 | "expected 1" 51 | exit 1 52 | fi 53 | fi 54 | 55 | echo "PASS" 56 | exit 0 57 | -------------------------------------------------------------------------------- /tests/complex/test-inside-object-store-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # test object store setup 4 | 5 | S3_CURL_URL="${1}" 6 | shift 7 | S3_ACCOUNT="${1:-account}" 8 | shift 9 | S3_USER="${1:-user}" 10 | shift 11 | S3_PASSWORD="${1:-password}" 12 | shift 13 | 14 | if [ ! -f s3-curl/s3curl.pl ]; then 15 | wget -O s3-curl.zip "${S3_CURL_URL}" 16 | unzip s3-curl.zip 17 | fi 18 | 19 | sudo chmod -R a+rw s3-curl 20 | sudo chmod a+x s3-curl/s3curl.pl 21 | 22 | cat >~/.s3curl < { 25 | id => '${S3_ACCOUNT}:${S3_USER}', 26 | key => '${S3_PASSWORD}', 27 | }, 28 | ); 29 | END 30 | 31 | chmod 600 ~/.s3curl 32 | 33 | cd s3-curl || exit 1 34 | 35 | S3_IP=$(kubectl get svc/gluster-s3-service --template '{{.spec.clusterIP}}') 36 | 37 | 38 | python - <&1) 18 | if ! echo "${output}" | grep -q "HTTP/1.1 200"; then 19 | echo "${output}" 20 | exit 1 21 | fi 22 | 23 | } 24 | 25 | getbucket() { 26 | 27 | output=$(./s3curl.pl --debug --id "${S3_ID}" -- -k -v "http://${S3_SVC}/${BUCKET}" 2>&1) 28 | if ! echo "${output}" | grep -q "HTTP/1.1 200"; then 29 | echo "${output}" 30 | exit 1 31 | fi 32 | output=$(./s3curl.pl --id "${S3_ID}" -- -k -s "http://${S3_SVC}/${BUCKET}/" 2>&1) 33 | echo "${output}" 34 | 35 | } 36 | 37 | delbucket() { 38 | 39 | output=$(./s3curl.pl --debug --id "${S3_ID}" --delete -- -k -v "http://${S3_SVC}/${BUCKET}" 2>&1) 40 | if ! echo "${output}" | grep -q "HTTP/1.1 [204\|404]"; then 41 | echo "${output}" 42 | exit 1 43 | fi 44 | 45 | } 46 | 47 | putobject() { 48 | 49 | cat >"${OBJECT}" <<<"${OBJECT_CONTENTS}" 50 | output=$(./s3curl.pl --debug --id "${S3_ID}" --put ${OBJECT} -- -k -v "http://${S3_SVC}/${BUCKET}/${OBJECT}" 2>&1) 51 | if ! echo "${output}" | grep -q "HTTP/1.1 200"; then 52 | echo "${output}" 53 | exit 1 54 | fi 55 | 56 | objects=$(getbucket) 57 | if [[ "${objects}" != *${OBJECT}* ]]; then 58 | echo "Object '${OBJECT}' not found in bucket '${BUCKET}':" 59 | echo "${objects}" 60 | exit 1 61 | fi 62 | 63 | output=$(./s3curl.pl --id "${S3_ID}" -- -k -s "http://${S3_SVC}/${BUCKET}/${OBJECT}" 2>&1) 64 | if [[ "${output}" != "${OBJECT_CONTENTS}" ]]; then 65 | echo "Object contents don't match: '${output}' vs. '${OBJECT_CONTENTS}'" 66 | exit 1 67 | fi 68 | 69 | } 70 | 71 | delobject() { 72 | 73 | output=$(./s3curl.pl --debug --id "${S3_ID}" --delete -- -k -v "http://${S3_SVC}/${BUCKET}/${OBJECT}" 2>&1) 74 | if ! echo "${output}" | grep -q "HTTP/1.1 [204\|404]"; then 75 | echo "${output}" 76 | exit 1 77 | fi 78 | 79 | } 80 | 81 | clearbucket() { 82 | delobject 83 | delbucket 84 | } 85 | 86 | cd s3-curl || exit 1 87 | 88 | S3_SVC=$(kubectl get svc/gluster-s3-service --template '{{.spec.clusterIP}}:{{(index .spec.ports 0).port}}') 89 | 90 | "${S3_CMD}" "${S3_CMD_ARGS[@]}" 91 | 92 | exit 0 93 | -------------------------------------------------------------------------------- /tests/complex/test-object-store.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TEST_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | S3CURL_URL="http://s3.amazonaws.com/doc/s3-example-code/s3-curl.zip" 5 | S3_ACCOUNT="account" 6 | S3_USER="user" 7 | S3_PASSWORD="password" 8 | 9 | source "${TEST_DIR}/lib.sh" 10 | 11 | run -r master -e "${TEST_DIR}/test-inside-object-store-setup.sh ${S3CURL_URL} ${S3_ACCOUNT} ${S3_USER} ${S3_PASSWORD}" "Setup s3curl" 12 | 13 | run -r master -e "${TEST_DIR}/test-inside-object-store.sh ${S3_ACCOUNT}:${S3_USER} clearbucket" "Clear bucket" 14 | 15 | run -r master -e "${TEST_DIR}/test-inside-object-store.sh ${S3_ACCOUNT}:${S3_USER} putbucket" "Test put of bucket" 16 | 17 | run -r master "${TEST_DIR}/test-inside-object-store.sh ${S3_ACCOUNT}:${S3_USER} putobject" "Test put of object" 18 | 19 | run -r master "${TEST_DIR}/test-inside-object-store.sh ${S3_ACCOUNT}:${S3_USER} delobject" "Test delete of object" 20 | 21 | run -r master "${TEST_DIR}/test-inside-object-store.sh ${S3_ACCOUNT}:${S3_USER} delbucket" "Test delete of bucket" 22 | -------------------------------------------------------------------------------- /tests/complex/test-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TEST_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | 5 | source "${TEST_DIR}/lib.sh" 6 | 7 | create_vagrant 8 | -------------------------------------------------------------------------------- /tests/complex/test-teardown.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TEST_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | 5 | source "${TEST_DIR}/lib.sh" 6 | 7 | destroy_vagrant 8 | -------------------------------------------------------------------------------- /tests/simple/Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | ./run.sh 3 | 4 | .PHONY: test 5 | -------------------------------------------------------------------------------- /tests/simple/README.md: -------------------------------------------------------------------------------- 1 | # Testsuite - simple tests 2 | 3 | This directory contains simple tests for gluster-kubernetes. 4 | These are tests that do not test the full stack end-to-end 5 | but are syntax-checks or unit-tests, or use mocking or stubbing 6 | to test specific aspects. 7 | 8 | ## Prerequisites 9 | 10 | The yaml tests require the 'yamllint' program. 11 | Install it with e.g. 12 | 13 | * `dnf install yamllint`, or 14 | * `pip install yamllint` 15 | 16 | The gk-deploy test uses ShellCheck if installed. 17 | Install with 18 | 19 | * `dnf install ShellCheck`, or 20 | * `apt-get install shellcheck` 21 | 22 | ## TODOs 23 | 24 | * Write more tests 25 | * More elaborate basic tests need fuller mocking/stubbing of tools 26 | * Write full functional tests to be run in vms 27 | (like the kubernetes vagrant environment) 28 | 29 | -------------------------------------------------------------------------------- /tests/simple/common/shell_tests.sh: -------------------------------------------------------------------------------- 1 | test_shell_syntax() { 2 | bash -n "${1}" 3 | } 4 | 5 | test_shellcheck() { 6 | if ! which shellcheck ; then 7 | echo "ShellCheck not found: skipping..." 8 | return 0 9 | fi 10 | 11 | shellcheck -x -s bash -e SC2181,SC2029,SC1091,SC1090 "${1}" 12 | } 13 | 14 | test_real_path() { 15 | grep -s "[r]ealpath" "${1}" 16 | if [[ ${?} -eq 0 ]]; then 17 | return 1 18 | else 19 | return 0 20 | fi 21 | } 22 | -------------------------------------------------------------------------------- /tests/simple/common/subunit.sh: -------------------------------------------------------------------------------- 1 | # 2 | # subunit.sh: shell functions to report test status via the subunit protocol. 3 | # Copyright (C) 2006 Robert Collins 4 | # Copyright (C) 2008 Jelmer Vernooij 5 | # 6 | # This program is free software; you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation; either version 2 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with this program; if not, write to the Free Software 18 | # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 | # 20 | 21 | timestamp() { 22 | # mark the start time. With Gnu date, you get nanoseconds from %N 23 | # (here truncated to microseconds with %6N), but not on BSDs, 24 | # Solaris, etc, which will apparently leave either %N or N at the end. 25 | date -u +'time: %Y-%m-%d %H:%M:%S.%6NZ' | sed 's/\..*NZ$/.000000Z/' 26 | } 27 | 28 | subunit_start_test () { 29 | # emit the current protocol start-marker for test $1 30 | timestamp 31 | echo "test: $1" 32 | } 33 | 34 | 35 | subunit_pass_test () { 36 | # emit the current protocol test passed marker for test $1 37 | timestamp 38 | echo "success: $1" 39 | } 40 | 41 | # This is just a hack as we have some broken scripts 42 | # which use "exit $failed", without initializing failed. 43 | failed=0 44 | 45 | subunit_fail_test () { 46 | # emit the current protocol fail-marker for test $1, and emit stdin as 47 | # the error text. 48 | # we use stdin because the failure message can be arbitrarily long, and this 49 | # makes it convenient to write in scripts (using <&1` 84 | status=$? 85 | if [ x$status = x0 ]; then 86 | subunit_pass_test "$name" 87 | else 88 | echo "$output" | subunit_fail_test "$name" 89 | fi 90 | return $status 91 | } 92 | 93 | testit_expect_failure () { 94 | name="$1" 95 | shift 96 | cmdline="$*" 97 | subunit_start_test "$name" 98 | output=`$cmdline 2>&1` 99 | status=$? 100 | if [ x$status = x0 ]; then 101 | echo "$output" | subunit_fail_test "$name" 102 | else 103 | subunit_pass_test "$name" 104 | fi 105 | return $status 106 | } 107 | 108 | testok () { 109 | name=`basename $1` 110 | failed=$2 111 | 112 | exit $failed 113 | } 114 | -------------------------------------------------------------------------------- /tests/simple/gk-deploy/Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | ./run.sh 3 | 4 | .PHONY: Makefile 5 | -------------------------------------------------------------------------------- /tests/simple/gk-deploy/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | 5 | echo "running tests in ${SCRIPT_DIR}" 6 | 7 | failed=0 8 | 9 | for test in ${SCRIPT_DIR}/test_*.sh ; do 10 | $test 11 | rc=${?} 12 | if [[ ${rc} -ne 0 ]]; then 13 | ((failed+=rc)) 14 | fi 15 | done 16 | 17 | exit ${failed} 18 | -------------------------------------------------------------------------------- /tests/simple/gk-deploy/stubs/cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ "${*}" == *"get "* ]]; then 4 | while [[ "${1}" != "get" ]]; do 5 | shift 6 | done 7 | shift 8 | restype="${1}" 9 | shift 10 | while [[ "${1}" != --selector* ]] && [[ "${1}" == --* ]]; do 11 | shift 12 | done 13 | select="${1}" 14 | if [[ "${restype}" == namespace* ]] && [[ "${select}" == "invalid" ]]; then 15 | echo "Error" 16 | fi 17 | elif [[ "${*}" == *" config get-contexts" ]]; then 18 | if [[ "${0}" == *oc* ]]; then 19 | echo "* two three four storage" 20 | fi 21 | fi 22 | 23 | exit 0 24 | -------------------------------------------------------------------------------- /tests/simple/gk-deploy/stubs/kubectl: -------------------------------------------------------------------------------- 1 | ./cli.sh -------------------------------------------------------------------------------- /tests/simple/gk-deploy/stubs/oc: -------------------------------------------------------------------------------- 1 | ./cli.sh -------------------------------------------------------------------------------- /tests/simple/gk-deploy/test_gk_deploy_basic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | STUBS_DIR="${SCRIPT_DIR}/stubs" 5 | TESTS_DIR="${SCRIPT_DIR}/.." 6 | INC_DIR="${TESTS_DIR}/common" 7 | BASE_DIR="${SCRIPT_DIR}/../../.." 8 | DEPLOY_DIR="${BASE_DIR}/deploy" 9 | 10 | GK_DEPLOY="${DEPLOY_DIR}/gk-deploy" 11 | TOPOLOGY="${DEPLOY_DIR}/topology.json.sample" 12 | 13 | PATH="${STUBS_DIR}:$PATH" 14 | 15 | source "${INC_DIR}/subunit.sh" 16 | source "${INC_DIR}/shell_tests.sh" 17 | 18 | test_missing_topology () { 19 | local args=( -y ) 20 | 21 | OUT=$("${GK_DEPLOY}" "${args[@]}") 22 | local rc=${?} 23 | 24 | echo "cmd: '${GK_DEPLOY} ${args[*]}'" 25 | echo "output:" 26 | echo "${OUT}" 27 | 28 | if [[ ${rc} == 0 ]]; then 29 | echo "ERROR: script without topology succeeded" 30 | return 1 31 | fi 32 | 33 | return 0 34 | } 35 | 36 | test_cli_not_found () { 37 | local args=( -y ) 38 | local expected_out="Container platform CLI (e.g. kubectl, oc) not found." 39 | 40 | OUT=$(PATH='/doesnotexist' "${GK_DEPLOY}" "${args[@]}" "${TOPOLOGY}") 41 | local rc=${?} 42 | 43 | echo "cmd: 'PATH='/doesnotexist' ${GK_DEPLOY} ${args[*]} ${TOPOLOGY}'" 44 | echo "output:" 45 | echo "${OUT}" 46 | 47 | if [[ ${rc} == 0 ]]; then 48 | echo "ERROR: script succeeded" 49 | return 1 50 | fi 51 | 52 | if [[ ${rc} != 1 ]]; then 53 | echo "ERROR: script returned ${rc}, expected 1" 54 | return 1 55 | fi 56 | 57 | if [[ "${OUT}" != "${expected_out}" ]]; then 58 | echo "ERROR: expected \"${expected_out}\" in output" 59 | return 1 60 | fi 61 | 62 | return 0 63 | 64 | } 65 | 66 | test_cli_unknown () { 67 | local cli="${1}" 68 | local args=( -y -c "${cli}" ) 69 | local expected_out="Unknown CLI '${cli}'." 70 | 71 | OUT=$("${GK_DEPLOY}" "${args[@]}" "${TOPOLOGY}") 72 | local rc=${?} 73 | 74 | echo "cmd: '${GK_DEPLOY} ${args[*]} ${TOPOLOGY}'" 75 | echo "output:" 76 | echo "${OUT}" 77 | 78 | if [[ ${rc} == 0 ]]; then 79 | echo "ERROR: script succeeded" 80 | return 1 81 | fi 82 | 83 | if [[ ${rc} != 1 ]]; then 84 | echo "ERROR: script returned ${rc}, expected 1" 85 | return 1 86 | fi 87 | 88 | if [[ "${OUT}" != "${expected_out}" ]]; then 89 | echo "ERROR: expected \"${expected_out}\" in output" 90 | return 1 91 | fi 92 | 93 | return 0 94 | } 95 | 96 | test_namespace_invalid () { 97 | local cli="${1}" 98 | local args=( -y -c "${1}" -n invalid ) 99 | local expected_out="Namespace 'invalid' not found" 100 | 101 | # shellcheck disable=SC2086 102 | OUT=$("${GK_DEPLOY}" "${args[@]}" "${TOPOLOGY}") 103 | local rc=${?} 104 | 105 | echo "cmd: '${GK_DEPLOY} ${args[*]} ${TOPOLOGY}'" 106 | echo "output:" 107 | echo "${OUT}" 108 | 109 | if [[ ${rc} == 0 ]]; then 110 | echo "ERROR: script succeeded" 111 | return 1 112 | fi 113 | 114 | if [[ ${rc} != 1 ]]; then 115 | echo "ERROR: script returned ${rc}, expected 1" 116 | return 1 117 | fi 118 | 119 | if [[ "${OUT}" != *"${expected_out}"* ]]; then 120 | echo "ERROR: expected \"${expected_out}\" in output" 121 | return 1 122 | fi 123 | 124 | return 0 125 | } 126 | 127 | failed=0 128 | 129 | testit "test script syntax" \ 130 | test_shell_syntax "${GK_DEPLOY}" \ 131 | || ((failed++)) 132 | 133 | testit "test shellcheck" \ 134 | test_shellcheck "${GK_DEPLOY}" \ 135 | || ((failed++)) 136 | 137 | testit "test missing topology" \ 138 | test_missing_topology \ 139 | || ((failed++)) 140 | 141 | testit "test cli not found" \ 142 | test_cli_not_found \ 143 | || ((failed++)) 144 | 145 | testit "test cli does not exist" \ 146 | test_cli_unknown doesnotexist \ 147 | || ((failed++)) 148 | 149 | testit "test cli unknown" \ 150 | test_cli_unknown /usr/bin/true \ 151 | || ((failed++)) 152 | 153 | testit "test namespace invalid oc" \ 154 | test_namespace_invalid oc \ 155 | || ((failed++)) 156 | 157 | testit "test namespace invalid kubectl" \ 158 | test_namespace_invalid kubectl \ 159 | || ((failed++)) 160 | 161 | testok "${0}" "${failed}" 162 | -------------------------------------------------------------------------------- /tests/simple/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | 5 | failed=0 6 | 7 | for testdir in ${SCRIPT_DIR}/*; do 8 | if [[ ! -d ${testdir} ]]; then 9 | continue 10 | fi 11 | 12 | if [[ ! -x ${testdir}/run.sh ]]; then 13 | continue 14 | fi 15 | 16 | pushd "${testdir}" 17 | ./run.sh 18 | rc=${?} 19 | popd 20 | 21 | if [[ ${rc} -ne 0 ]]; then 22 | ((failed+=rc)) 23 | fi 24 | done 25 | 26 | exit ${failed} 27 | -------------------------------------------------------------------------------- /tests/simple/shell/Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | ./run.sh 3 | 4 | .PHONY: test 5 | -------------------------------------------------------------------------------- /tests/simple/shell/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | 5 | echo "running tests in ${SCRIPT_DIR}" 6 | 7 | failed=0 8 | 9 | for test in ${SCRIPT_DIR}/test_*.sh; do 10 | ${test} 11 | rc=${?} 12 | if [[ ${rc} -ne 0 ]]; then 13 | ((failed+=rc)) 14 | fi 15 | done 16 | 17 | exit ${failed} 18 | -------------------------------------------------------------------------------- /tests/simple/shell/test_realpath.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | TESTS_DIR="${SCRIPT_DIR}/.." 5 | INC_DIR="${TESTS_DIR}/common" 6 | BASE_DIR="${SCRIPT_DIR}/../../.." 7 | 8 | source "${INC_DIR}/subunit.sh" 9 | source "${INC_DIR}/shell_tests.sh" 10 | 11 | failed=0 12 | 13 | while read -r script; do 14 | # note: this is intentially mis-spelled realPath 15 | # so that this does not trigger an error. 16 | testit "check for use of realPath: $(basename "${script}")" \ 17 | test_real_path "${script}" \ 18 | || ((failed++)) 19 | done <<< "$(find "${BASE_DIR}" -name "*.sh" | grep -v "subunit.sh")" 20 | 21 | testok "${0}" "${failed}" 22 | -------------------------------------------------------------------------------- /tests/simple/shell/test_syntax.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | TESTS_DIR="${SCRIPT_DIR}/.." 5 | INC_DIR="${TESTS_DIR}/common" 6 | BASE_DIR="${SCRIPT_DIR}/../../.." 7 | 8 | source "${INC_DIR}/subunit.sh" 9 | source "${INC_DIR}/shell_tests.sh" 10 | 11 | failed=0 12 | 13 | while read -r script; do 14 | testit "check basic syntax: $(basename "${script}")" \ 15 | test_shell_syntax "${script}" \ 16 | || ((failed++)) 17 | testit "shellcheck: $(basename "${script}")" \ 18 | test_shellcheck "${script}" \ 19 | || ((failed++)) 20 | done <<< "$(find "${BASE_DIR}" -name "*.sh" | grep -v "subunit.sh")" 21 | 22 | testok "${0}" "${failed}" 23 | -------------------------------------------------------------------------------- /tests/simple/yaml/Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | ./run.sh 3 | 4 | .PHONY: test 5 | -------------------------------------------------------------------------------- /tests/simple/yaml/glusterfs-daemonset-wrong.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: DaemonSet 3 | apiVersion: extensions/v1beta1 4 | metadata: 5 | name: glusterfs 6 | labels: 7 | glusterfs: daemonset 8 | annotations: 9 | description: GlusterFS DaemonSet 10 | tags: glusterfs 11 | metadata: 12 | name: glusterfs 13 | labels: 14 | glusterfs-node: pod 15 | spec: 16 | nodeSelector: 17 | storagenode: glusterfs 18 | hostNetwork: true 19 | containers: 20 | - image: gluster/gluster-centos:latest 21 | imagePullPolicy: IfNotPresent 22 | name: glusterfs 23 | volumeMounts: 24 | - name: glusterfs-heketi 25 | mountPath: "/var/lib/heketi" 26 | - name: glusterfs-run 27 | --- 28 | kind: DaemonSet 29 | apiVersion: extensions/v1beta1 30 | metadata: 31 | name: glusterfs 32 | labels: 33 | glusterfs: daemonset 34 | annotations: 35 | description: GlusterFS DaemonSet 36 | tags: glusterfs 37 | metadata: 38 | name: glusterfs 39 | labels: 40 | glusterfs-node: pod 41 | spec: 42 | nodeSelector: 43 | storagenode: glusterfs 44 | hostNetwork: true 45 | containers: 46 | - image: gluster/gluster-centos:latest 47 | imagePullPolicy: IfNotPresent 48 | name: glusterfs 49 | volumeMounts: 50 | - name: glusterfs-heketi 51 | mountPath: "/var/lib/heketi" 52 | - name: glusterfs-run 53 | mountPath: "/run" 54 | - name: glusterfs-lvm 55 | mountPath: "/run/lvm" 56 | - name: glusterfs-etc 57 | mountPath: "/etc/glusterfs" 58 | - name: glusterfs-logs 59 | mountPath: "/var/log/glusterfs" 60 | - name: glusterfs-config 61 | mountPath: "/var/lib/glusterd" 62 | - name: glusterfs-dev 63 | mountPath: "/dev" 64 | - name: glusterfs-misc 65 | mountPath: "/var/lib/misc/glusterfsd" 66 | - name: glusterfs-cgroup 67 | mountPath: "/sys/fs/cgroup" 68 | readOnly: true 69 | command: 70 | - "/bin/bash" 71 | - "-c" 72 | - systemctl status glusterd.service 73 | livenessProbe: 74 | timeoutSeconds: 3 75 | initialDelaySeconds: 60 76 | exec: 77 | command: 78 | - "/bin/bash" 79 | - "-c" 80 | - systemctl status glusterd.service 81 | volumes: 82 | - name: glusterfs-heketi 83 | hostPath: 84 | path: "/var/lib/heketi" 85 | - name: glusterfs-run 86 | - name: glusterfs-lvm 87 | hostPath: 88 | path: "/run/lvm" 89 | - name: glusterfs-etc 90 | hostPath: 91 | path: "/etc/glusterfs" 92 | - name: glusterfs-logs 93 | hostPath: 94 | path: "/var/log/glusterfs" 95 | - name: glusterfs-config 96 | hostPath: 97 | path: "/var/lib/glusterd" 98 | - name: glusterfs-dev 99 | hostPath: 100 | path: "/dev" 101 | - name: glusterfs-misc 102 | hostPath: 103 | path: "/var/lib/misc/glusterfsd" 104 | - name: glusterfs-cgroup 105 | hostPath: 106 | path: "/sys/fs/cgroup" 107 | - name: glusterfs-ssl 108 | hostPath: 109 | path: "/etc/ssl" 110 | 111 | mountPath: "/run" 112 | - name: glusterfs-lvm 113 | mountPath: "/run/lvm" 114 | - name: glusterfs-etc 115 | mountPath: "/etc/glusterfs" 116 | - name: glusterfs-logs 117 | mountPath: "/var/log/glusterfs" 118 | - name: glusterfs-config 119 | mountPath: "/var/lib/glusterd" 120 | - name: glusterfs-dev 121 | mountPath: "/dev" 122 | - name: glusterfs-misc 123 | mountPath: "/var/lib/misc/glusterfsd" 124 | - name: glusterfs-cgroup 125 | mountPath: "/sys/fs/cgroup" 126 | readOnly: true 127 | - name: glusterfs-ssl 128 | mountPath: "/etc/ssl" 129 | readOnly: true 130 | securityContext: 131 | capabilities: {} 132 | privileged: true 133 | readinessProbe: 134 | timeoutSeconds: 3 135 | initialDelaySeconds: 60 136 | exec: 137 | command: 138 | - "/bin/bash" 139 | - "-c" 140 | - systemctl status glusterd.service 141 | livenessProbe: 142 | timeoutSeconds: 3 143 | initialDelaySeconds: 60 144 | exec: 145 | command: 146 | - "/bin/bash" 147 | - "-c" 148 | - systemctl status glusterd.service 149 | volumes: 150 | - name: glusterfs-heketi 151 | hostPath: 152 | path: "/var/lib/heketi" 153 | - name: glusterfs-run 154 | - name: glusterfs-lvm 155 | hostPath: 156 | path: "/run/lvm" 157 | - name: glusterfs-etc 158 | hostPath: 159 | path: "/etc/glusterfs" 160 | - name: glusterfs-logs 161 | hostPath: 162 | path: "/var/log/glusterfs" 163 | - name: glusterfs-config 164 | hostPath: 165 | path: "/var/lib/glusterd" 166 | - name: glusterfs-dev 167 | hostPath: 168 | path: "/dev" 169 | - name: glusterfs-misc 170 | hostPath: 171 | path: "/var/lib/misc/glusterfsd" 172 | - name: glusterfs-cgroup 173 | hostPath: 174 | path: "/sys/fs/cgroup" 175 | - name: glusterfs-ssl 176 | hostPath: 177 | path: "/etc/ssl" 178 | -------------------------------------------------------------------------------- /tests/simple/yaml/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | 5 | echo "running tests in ${SCRIPT_DIR}" 6 | 7 | failed=0 8 | 9 | for test in ${SCRIPT_DIR}/test_*.sh ; do 10 | ${test} 11 | rc=${?} 12 | if [[ ${?} -ne 0 ]]; then 13 | ((failed+=rc)) 14 | fi 15 | done 16 | 17 | exit ${failed} 18 | -------------------------------------------------------------------------------- /tests/simple/yaml/test_syntax.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | TESTS_DIR="${SCRIPT_DIR}/.." 5 | INC_DIR="${TESTS_DIR}/common" 6 | BASE_DIR="${SCRIPT_DIR}/../../.." 7 | DEPLOY_DIR="${BASE_DIR}/deploy" 8 | 9 | FAULTY_YAML="${SCRIPT_DIR}/glusterfs-daemonset-wrong.yaml" 10 | 11 | source "${INC_DIR}/subunit.sh" 12 | 13 | check_yaml () { 14 | local yaml=${1} 15 | yamllint -f parsable -d relaxed "${yaml}" 16 | } 17 | 18 | check_invalid_yaml () { 19 | check_yaml "${1}" 20 | if [[ "x$?" == "x0" ]]; then 21 | echo "ERROR: parsing invalid yaml succeeded" 22 | return 1 23 | fi 24 | 25 | return 0 26 | } 27 | 28 | failed=0 29 | 30 | if ! which yamllint >/dev/null 2>&1 ; then 31 | subunit_start_test "yaml syntax tests" 32 | subunit_skip_test "yaml syntax tests" <<< "yamllint not found" 33 | else 34 | testit "check invalid yaml" \ 35 | check_invalid_yaml "${FAULTY_YAML}" \ 36 | || ((failed++)) 37 | 38 | while read -r yaml; do 39 | testit "check $(basename "${yaml}")" \ 40 | check_yaml "${yaml}" \ 41 | || ((failed++)) 42 | done <<< "$(find "${DEPLOY_DIR}" -name "*.yaml")" 43 | fi 44 | 45 | testok "${0}" "${failed}" 46 | -------------------------------------------------------------------------------- /vagrant/README.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | Kubernetes on CentOS 7 based on [kubeadm](http://kubernetes.io/docs/admin/kubeadm/). 3 | Default setup is a single master with three nodes 4 | 5 | To setup type: 6 | 7 | ```bash 8 | $ ./up.sh 9 | $ vagrant ssh master 10 | [vagrant@master]$ kubectl get nodes 11 | ``` 12 | 13 | Works in Vagrant/Ansible on Linux with libvirt/KVM or on Mac OS X on Virtualbox 14 | 15 | ## Versions 16 | Currently it uses Kubernetes v1.7.8 17 | 18 | ## Features 19 | 20 | **CACHING:** This vagrant/ansible environment allows for caching of the yum 21 | cache. This allows you to reuse those caches on subsequent provisioning of the 22 | VMs, and it is intended to help situations where one is developing in an 23 | environment where they would rather not have to redownload many megabytes 24 | repeatedly, e.g. hotel WiFi. :) It stores the cache as a `tgz` file in your 25 | `VAGRANT_HOME` directory, `~/.vagrant.d` by default. To enable this, either 26 | specify `VAGRANT_CACHE=1` on the command line or change the `CACHE` variable 27 | near the top of the Vagrantfile from `false` to `true`. 28 | 29 | **CUSTOM REGISTRY:** Similar to the caching feature, this environment supports 30 | interaction with a custom Docker registry. The idea is that a registry would 31 | be running in a local environment somewhere that could be used as a primary 32 | source for pulling container images into the VMs. Just specify the variable 33 | `custom_registry` in the `global_vars.yml` file to configure it. The following 34 | scripts are available to facilitate this feature: 35 | 36 | * `docker-registry-run.sh`: A simple script to run a Docker registry in a 37 | container on your local machine, listening on port 5000. **NOTE:** You may 38 | need to open up the relevant firewall port on your local machine. 39 | * `docker-cache.sh`: This script will detect all current images on a given VM 40 | (default 'master') and push each image to the custom registry. Usage: 41 | ``` 42 | ./docker-cache.sh 192.168.121.1:5000 master 43 | ``` 44 | 45 | **GCR.IO PROXY:** An addition to the custom registry, this environment allows 46 | you to set up an nginx proxy on the master node VM that can redirect gcr.io 47 | traffic to your custom registry. This allows you to store any images from 48 | gcr.io in your custom registry and then have things like kubeadm pull from 49 | there instead of the actual gcr.io. Just specify `custom_registry_gcr=true` in 50 | `global_vars.yml`. The `gcr-proxy-state.sh` script is available to set the 51 | proxy redirect on or off at runtime. 52 | 53 | A typical workflow to start using this would look like: 54 | 55 | 1. Run `docker-registry-run.sh`. 56 | 2. Enable the custom registry in `global_vars.yml`. 57 | * **NOTE:** Since the registry is currently empty, any search for container 58 | images will proceed to the next registry (Docker Hub, by default). 59 | 3. Start the vagrant environment: `up.sh` 60 | 4. Run `docker-cache.sh :5000 master` 61 | 5. Run `docker-cache.sh :5000 node0` 62 | * **OPTIONAL:** Run `docker pull gcr.io/google_containers/nginx-slim:0.8` 63 | on `node0` before caching, since it is used in testing. 64 | 6. Tear down the vagrant environment: `vagrant destroy` 65 | 7. Set `custom_registry_gcr=true` in `global_vars.yml` 66 | 67 | Now Docker will pull gcr.io images and custom images from your custom registry, 68 | and check your custom registry before pulling from Docker Hub. You will want to 69 | periodically set `custom_registry_add=false` and `custom_registry_gcr=false` to 70 | pull updated images and then cache them with `docker-cache.sh`. 71 | 72 | **CUSTOM YUM REPOS:** As an alternative or complementary tool to the rpm caching 73 | feature mentioned above, the `custom_yum_repos` variable can be enabled to 74 | supply custom yum repos to the VMs. These custom repos can be used to cache 75 | packages across multiple projects or inject custom RPMs into the VMs. 76 | 77 | To configure it, uncomment or copy the `custom_yum_repos` variable in 78 | `global_vars.yml`. Supply key-value pairs, where the key is the name of the 79 | yum repository and the value is the repository's url. Example: 80 | ``` 81 | custom_yum_repos: 82 | kubernetes_el7: http://mypkgs/path/to/repo1 83 | epel_el7: http://mypkgs/path/to/repo2 84 | gluster_el7: http://mypkgs/another/repo/path/repo3 85 | ``` 86 | 87 | 88 | **CUSTOM HOST ALIASES:** If you want or need to use a name for the yum 89 | repository hosts or custom docker registry that does not resolve normally, 90 | you can define a `custom_host_aliases` in `global_vars.yml`. This value takes 91 | a list of items where each item is a mapping with the keys `addr`, an ip 92 | address, and `names`, a list of host names. Example: 93 | ``` 94 | custom_host_aliases: 95 | - addr: 192.168.122.164 96 | names: 97 | - myserver 98 | - myserver.localdomain 99 | - addr: 192.168.122.166 100 | names: 101 | - foo 102 | - foo.example.org 103 | ``` 104 | -------------------------------------------------------------------------------- /vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | NODES = 3 5 | DISKS = 3 6 | CACHE = false 7 | LIBVIRT_DISK_CACHE = "default" 8 | 9 | Vagrant.configure("2") do |config| 10 | config.ssh.insert_key = false 11 | config.vm.box = "centos/7" 12 | config.vm.box_url = "https://app.vagrantup.com/centos/7" 13 | 14 | # Override 15 | config.vm.provider :libvirt do |v,override| 16 | override.vm.synced_folder '.', '/home/vagrant/sync', disabled: true 17 | 18 | # change cpu mode to passthrough as workaround, refer bugs: 19 | #https://bugzilla.redhat.com/show_bug.cgi?id=1467599 20 | #https://bugzilla.redhat.com/show_bug.cgi?id=1386223#c10 21 | #vagrant-libvirt/vagrant-libvirt#667 22 | v.cpu_mode = 'host-passthrough' 23 | end 24 | 25 | # Make kube master 26 | config.vm.define :master do |master| 27 | master.vm.network :private_network, ip: "192.168.10.90" 28 | master.vm.host_name = "master" 29 | 30 | master.vm.provider :virtualbox do |vb| 31 | vb.memory = 1024 32 | vb.cpus = 2 33 | end 34 | master.vm.provider :libvirt do |lv| 35 | lv.memory = 1024 36 | lv.cpus = 2 37 | lv.volume_cache = LIBVIRT_DISK_CACHE 38 | end 39 | 40 | # View the documentation for the provider you're using for more 41 | # information on available options. 42 | master.vm.provision :ansible do |ansible| 43 | ansible.verbose = true 44 | ansible.limit = "all" 45 | ansible.playbook = "site.yml" 46 | ansible.groups = { 47 | "master" => ["master"], 48 | "nodes" => (0..NODES-1).map {|j| "node#{j}"}, 49 | } 50 | ansible.extra_vars = { 51 | "vagrant_home" => ENV['VAGRANT_HOME'] ? ENV['VAGRANT_HOME'] : "~/.vagrant.d", 52 | "vagrant_cache" => ENV['VAGRANT_CACHE'] ? ENV['VAGRANT_CACHE'] : CACHE, 53 | "vagrant_master" => "192.168.10.90" 54 | } 55 | end 56 | 57 | end 58 | 59 | # Make the glusterfs cluster, each with DISKS number of drives 60 | (0..NODES-1).each do |i| 61 | config.vm.define "node#{i}" do |node| 62 | node.vm.hostname = "node#{i}" 63 | node.vm.network :private_network, ip: "192.168.10.10#{i}" 64 | 65 | # Settings for Virtualbox 66 | node.vm.provider :virtualbox do |vb| 67 | unless File.exist?("disk-#{i}-0.vdi") 68 | vb.customize ["storagectl", :id,"--name", "VboxSata", "--add", "sata"] 69 | end 70 | end 71 | 72 | (0..DISKS-1).each do |d| 73 | node.vm.provider :virtualbox do |vb| 74 | unless File.exist?("disk-#{i}-#{d}.vdi") 75 | vb.customize [ "createmedium", "--filename", "disk-#{i}-#{d}.vdi", "--size", 1024*1024 ] 76 | end 77 | vb.customize [ "storageattach", :id, "--storagectl", "VboxSata", "--port", 3+d, "--device", 0, "--type", "hdd", "--medium", "disk-#{i}-#{d}.vdi" ] 78 | vb.memory = 1024 79 | vb.cpus = 2 80 | end 81 | node.vm.provider :libvirt do |lv| 82 | driverletters = ('b'..'z').to_a 83 | lv.storage :file, :device => "vd#{driverletters[d]}", :path => "atomic-disk-#{i}-#{d}.disk", :size => '1024G' 84 | lv.memory = 1024 85 | lv.cpus =2 86 | lv.volume_cache = LIBVIRT_DISK_CACHE 87 | end 88 | end 89 | end 90 | end 91 | end 92 | -------------------------------------------------------------------------------- /vagrant/ansible-step: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # This little helper can be used to step through the 4 | # ansible provisioning for debugging purposes. 5 | # It can optionally take as an argument the name of 6 | # the task with which to start stepping. 7 | # 8 | # E.g call it as 9 | # 10 | # ./ansible-step 11 | # 12 | # or 13 | # 14 | # ./ansible-step "kubeadm init" 15 | 16 | START="" 17 | 18 | if [[ $# -ge 1 ]]; then 19 | START="--start-at-task=\"$*\"" 20 | fi 21 | 22 | cmd="ansible-playbook --inventory=.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory -v --step ${START} site.yml" 23 | 24 | export ANSIBLE_HOST_KEY_CHECKING=False 25 | eval "$cmd" 26 | -------------------------------------------------------------------------------- /vagrant/demo/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Demo Scripts 2 | 3 | These demos can be run on the Kubernetes test environment 4 | after bringing it up with `up.sh`. The `-inside-` scripts 5 | are to be run in the VMs. You should run the wrapping 6 | demo scripts: 7 | 8 | * `demo-prepare.sh` : some preparations 9 | * `demo-status.sh` : a status demo that can be run at any time 10 | * `demo-deploy.sh` : demo `gk-deploy` 11 | * `demo-test-heketi.sh` : demo heketi after gk-deploy 12 | * `demo-dynamic-provisioning.sh` : dynamic provisioning with a simple nginx app 13 | -------------------------------------------------------------------------------- /vagrant/demo/demo-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DEMO_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | VAGRANT_DIR="${DEMO_DIR}/.." 5 | 6 | . "${DEMO_DIR}/util.sh" 7 | 8 | cd "${VAGRANT_DIR}" || exit 1 9 | 10 | desc "show machines" 11 | run "vagrant status" 12 | 13 | desc "running demo on master..." 14 | run "" 15 | 16 | "${DEMO_DIR}/demo-inside-wrapper.sh" "${DEMO_DIR}/demo-inside-deploy.sh" 17 | -------------------------------------------------------------------------------- /vagrant/demo/demo-dynamic-provisioning.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DEMO_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | VAGRANT_DIR="${DEMO_DIR}/.." 5 | 6 | cd "${VAGRANT_DIR}" || exit 1 7 | 8 | "${DEMO_DIR}/demo-inside-wrapper.sh" "${DEMO_DIR}/demo-inside-dynamic-provisioning.sh" 9 | -------------------------------------------------------------------------------- /vagrant/demo/demo-inside-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #sudo yum install -y pv > /dev/null 2>&1 4 | 5 | . ./util.sh 6 | 7 | desc "show kubernetes nodes" 8 | run "kubectl get nodes" 9 | 10 | desc "show pods" 11 | run "kubectl get pods" 12 | 13 | cd deploy || exit 1 14 | 15 | desc "look at topology" 16 | run "vim topology.json" 17 | 18 | desc "run gk-deploy" 19 | run "./gk-deploy -g topology.json" 20 | 21 | desc "show pods etc" 22 | run "kubectl get nodes,all,ep" 23 | 24 | desc "demo-deploy: done" 25 | -------------------------------------------------------------------------------- /vagrant/demo/demo-inside-dynamic-provisioning.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo yum install -y pv > /dev/null 2>&1 4 | 5 | . ./util.sh 6 | 7 | desc "show kubernetes nodes" 8 | run "kubectl get nodes,pods" 9 | 10 | desc "show storage classes" 11 | run "kubectl get storageclass" 12 | 13 | HEKETI_CLI_SERVER=$(kubectl get svc/heketi --template 'http://{{.spec.clusterIP}}:{{(index .spec.ports 0).port}}') 14 | export HEKETI_CLI_SERVER 15 | 16 | #echo HEKETI_CLI_SERVER: $HEKETI_CLI_SERVER 17 | 18 | cat > mysc.yaml < mypvc.yaml < myapp.yaml <' ]] ; do 97 | sleep 1 98 | appIP=$(kubectl get pods -o wide | grep myapp | awk '{print $6}') 99 | done 100 | 101 | run "kubectl get pods -o wide" 102 | 103 | run "curl http://$appIP" 104 | 105 | run "kubectl exec myapp -- /bin/bash -c \"echo 'Hello, world...' > /usr/share/nginx/html/index.html\"" 106 | run "curl http://$appIP" 107 | 108 | run "kubectl exec myapp -- /bin/bash -c \"cat /proc/mounts | grep nginx\"" 109 | 110 | mountinfo=$(kubectl exec myapp -- /bin/bash -c "cat /proc/mounts | grep nginx" | awk '{print $1}') 111 | volname=$(echo -n "${mountinfo}" | cut -d: -f2) 112 | glusterip=$(echo -n "${mountinfo}" |cut -d: -f1) 113 | glusterpod=$(kubectl get pods -o wide | grep "${glusterip}" | awk '{print $1}') 114 | 115 | brickinfopath="/var/lib/glusterd/vols/${volname}/bricks" 116 | brickinfofile=$(kubectl exec "${glusterpod}" -- /bin/bash -c "ls -1 ${brickinfopath} | head -n 1") 117 | brickpath=$(kubectl exec "${glusterpod}" -- /bin/bash -c "cat ${brickinfopath}/${brickinfofile} | grep real_path | cut -d= -f2") 118 | brickhost=$(kubectl exec "${glusterpod}" -- /bin/bash -c "cat ${brickinfopath}/${brickinfofile} | grep hostname | cut -d= -f2") 119 | brickpod=$(kubectl get pods -o wide | grep "${brickhost}" | awk '{print $1}') 120 | 121 | run "kubectl exec ${brickpod} -- /bin/bash -c \"cat ${brickpath}/index.html\"" 122 | 123 | desc "demo-dynamic-provisioning: done" 124 | -------------------------------------------------------------------------------- /vagrant/demo/demo-inside-prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "installing pv" 4 | sudo yum install -y pv > /dev/null 2>&1 5 | echo "preparing .vimrc" 6 | echo "set bg=dark" >> ~/.vimrc 7 | echo "done" 8 | -------------------------------------------------------------------------------- /vagrant/demo/demo-inside-status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #sudo yum install -y pv > /dev/null 2>&1 4 | 5 | . ./util.sh 6 | 7 | run "kubectl get nodes,all,ep" 8 | 9 | desc "demo-status: done" 10 | -------------------------------------------------------------------------------- /vagrant/demo/demo-inside-test-heketi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #sudo yum install -y pv > /dev/null 2>&1 4 | 5 | . ./util.sh 6 | 7 | desc "show kubernetes nodes" 8 | run "kubectl get nodes,pods" 9 | 10 | cd deploy || exit 1 11 | 12 | HEKETI_CLI_SERVER=$(kubectl get svc/heketi --template 'http://{{.spec.clusterIP}}:{{(index .spec.ports 0).port}}') 13 | export HEKETI_CLI_SERVER 14 | 15 | desc "test heketi with curl" 16 | run "kubectl get svc/heketi --template 'http://{{.spec.clusterIP}}:{{(index .spec.ports 0).port}}' ; echo" 17 | echo "HEKETI_CLI_SERVER: ${HEKETI_CLI_SERVER}" 18 | run "curl ${HEKETI_CLI_SERVER}/hello ; echo" 19 | 20 | desc "test heketi-cli" 21 | run "heketi-cli cluster list" 22 | run "heketi-cli node list" 23 | run "heketi-cli volume list" 24 | 25 | desc "create a volume" 26 | run "heketi-cli volume create --size=2 | tee volume-create.out" 27 | volumeId=$(grep "Volume Id" volume-create.out | awk '{print $3}') 28 | run "heketi-cli volume list" 29 | run "heketi-cli volume info ${volumeId}" 30 | 31 | desc "delete the volume again" 32 | run "heketi-cli volume delete ${volumeId}" 33 | run "heketi-cli volume list" 34 | 35 | desc "demo-test-heketi: done" 36 | -------------------------------------------------------------------------------- /vagrant/demo/demo-inside-wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DEMO_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | VAGRANT_DIR="${DEMO_DIR}/.." 5 | SSH_CONFIG=${DEMO_DIR}/ssh-config 6 | 7 | cd "${VAGRANT_DIR}" || exit 1 8 | 9 | DEMO=$1 10 | 11 | vagrant ssh-config > "${SSH_CONFIG}" 12 | 13 | scp -F "${SSH_CONFIG}" "${DEMO_DIR}/util.sh" "${DEMO}" master: >/dev/null 2>&1 14 | ssh -t -F "${SSH_CONFIG}" master "./$(basename "${DEMO}")" 15 | -------------------------------------------------------------------------------- /vagrant/demo/demo-prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DEMO_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | VAGRANT_DIR="${DEMO_DIR}/.." 5 | DEPLOY_DIR="${VAGRANT_DIR}/../deploy" 6 | SSH_CONFIG="${DEMO_DIR}/ssh-config" 7 | 8 | cd "${VAGRANT_DIR}" || exit 1 9 | 10 | ./up.sh 11 | 12 | vagrant ssh-config > "${SSH_CONFIG}" 13 | 14 | scp -r -F "${SSH_CONFIG}" "${DEPLOY_DIR}" master: 15 | 16 | for NODE in node0 node1 node2 ; do 17 | ssh -t -F "${SSH_CONFIG}" "${NODE}" "sudo docker pull gcr.io/google_containers/nginx-slim:0.8" 18 | done 19 | 20 | "${DEMO_DIR}/demo-inside-wrapper.sh" "${DEMO_DIR}/demo-inside-prepare.sh" 21 | -------------------------------------------------------------------------------- /vagrant/demo/demo-status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DEMO_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | VAGRANT_DIR="${DEMO_DIR}/.." 5 | 6 | cd "${VAGRANT_DIR}" || exit 1 7 | 8 | "${DEMO_DIR}/demo-inside-wrapper.sh" "${DEMO_DIR}/demo-inside-status.sh" 9 | -------------------------------------------------------------------------------- /vagrant/demo/demo-test-heketi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DEMO_DIR="$(cd "$(dirname "${0}")" && pwd)" 4 | VAGRANT_DIR="${DEMO_DIR}/.." 5 | 6 | cd "${VAGRANT_DIR}" || exit 1 7 | 8 | "${DEMO_DIR}/demo-inside-wrapper.sh" "${DEMO_DIR}/demo-inside-test-heketi.sh" 9 | -------------------------------------------------------------------------------- /vagrant/demo/util.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2016 The Kubernetes Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | readonly reset=$(tput sgr0) 17 | readonly green=$(tput bold; tput setaf 2) 18 | readonly yellow=$(tput bold; tput setaf 3) 19 | readonly blue=$(tput bold; tput setaf 6) 20 | readonly timeout=$(if [ "$(uname)" == "Darwin" ]; then echo "1"; else echo "0.1"; fi) 21 | 22 | function desc() { 23 | maybe_first_prompt 24 | echo "${blue}# ${*}${reset}" 25 | prompt 26 | } 27 | 28 | function prompt() { 29 | echo -n "${yellow}\$ ${reset}" 30 | } 31 | 32 | started="" 33 | function maybe_first_prompt() { 34 | if [ -z "${started}" ]; then 35 | prompt 36 | started=true 37 | fi 38 | } 39 | 40 | function run() { 41 | maybe_first_prompt 42 | rate=25 43 | if [ -n "${DEMO_RUN_FAST}" ]; then 44 | rate=1000 45 | fi 46 | echo "${green}${1}${reset}" | pv -qL ${rate} 47 | if [ -n "${DEMO_RUN_FAST}" ]; then 48 | sleep 0.5 49 | fi 50 | OFILE="$(mktemp -t "$(basename "${0}").XXXXXX")" 51 | script -eq -c "${1}" -f "${OFILE}" 52 | r=${?} 53 | read -r -d '' -t "${timeout}" -n 10000 # clear stdin 54 | prompt 55 | if [ -z "${DEMO_AUTO_RUN}" ]; then 56 | read -r -s 57 | fi 58 | return ${r} 59 | } 60 | 61 | #SSH_NODE=$(kubectl get nodes | tail -1 | cut -f1 -d' ') 62 | 63 | trap "echo" EXIT 64 | -------------------------------------------------------------------------------- /vagrant/docker-cache.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | REGISTRY=${1} 4 | MACHINE=${2:-master} 5 | IMAGES=$(vagrant ssh "${MACHINE}" -c "sudo -s -- docker images --format \"{{.Repository}} {{.Tag}}\"" -- -q | tr "[:cntrl:]" "\n") 6 | 7 | while read -r IMAGE; do 8 | if [[ "$IMAGE" == "" ]]; then continue; fi 9 | REPO=$(echo "$IMAGE" | cut -f1 -d " " -) 10 | NAME=${REPO#[^/]*/} 11 | TAG=$(echo "$IMAGE" | cut -f2 -d " " -) 12 | if [[ "${TAG}" == "" ]]; then 13 | TAG="" 14 | else 15 | TAG=":${TAG}" 16 | fi 17 | if [[ "${REPO}" != *${REGISTRY}* ]]; then 18 | echo "Tagging $NAME$TAG" 19 | vagrant ssh "${MACHINE}" -c "sudo docker tag ${REPO}${TAG} ${REGISTRY}/${NAME}${TAG}" -- -qn 20 | echo "Pushing $NAME$TAG" 21 | vagrant ssh "${MACHINE}" -c "sudo docker push ${REGISTRY}/${NAME}${TAG}" -- -qn 22 | fi 23 | done <<< "$IMAGES" 24 | -------------------------------------------------------------------------------- /vagrant/docker-registry-run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo docker run -d \ 4 | -p 5000:5000 \ 5 | --restart=always \ 6 | --name registry \ 7 | -v ~/docker/registry:/var/lib/registry \ 8 | registry:latest 9 | -------------------------------------------------------------------------------- /vagrant/gcr-proxy-state.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ "${1}" != "present" ]] && [[ "${1}" != "absent" ]]; then 4 | echo "Error: must supply state, either 'present' or 'absent'" 5 | echo " Example: ./${0} absent" 6 | exit 1 7 | fi 8 | 9 | ansible-playbook -i /vagrant/ansible-inventory -e gcr_proxy_state="${1}" gcr_proxy.yml 10 | -------------------------------------------------------------------------------- /vagrant/gcr_proxy.yml: -------------------------------------------------------------------------------- 1 | - hosts: master 2 | become: yes 3 | become_method: sudo 4 | vars_files: 5 | - "global_vars.yml" 6 | tasks: 7 | - include_role: 8 | name: master 9 | tasks_from: gcr_proxy.yml 10 | when: custom_registry_gcr | default(false) 11 | 12 | - hosts: nodes 13 | become: yes 14 | become_method: sudo 15 | vars_files: 16 | - "global_vars.yml" 17 | tasks: 18 | - include_role: 19 | name: nodes 20 | tasks_from: gcr_proxy.yml 21 | when: custom_registry_gcr | default(false) 22 | -------------------------------------------------------------------------------- /vagrant/global_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Specify either "latest" or a version such as "1.7.10" 3 | kubernetes_version: latest 4 | kubernetes_token: abcdef.1234567890abcdef 5 | kubernetes_version_tag: "{% if kubernetes_version != 'latest' %}-{{ kubernetes_version }}{% endif %}" 6 | install_pkgs: 7 | - wget 8 | - screen 9 | - git 10 | - vim 11 | - glusterfs-client 12 | - heketi-client 13 | - iptables 14 | - iptables-utils 15 | - iptables-services 16 | - docker 17 | - kubeadm{{ kubernetes_version_tag }} 18 | - kubelet{{ kubernetes_version_tag }} 19 | - kubectl{{ kubernetes_version_tag }} 20 | - ntp 21 | # The following variables control the use and configuration of a custom docker 22 | # registry and/or custom rpm repositories: 23 | # * custom_registry: Specifies and enables a custom registry (default none) 24 | # * custom_registry_insecure: Marks the custom registry as insecure. This is 25 | # typically the case if you're running a local registry on your host machine 26 | # (default true) 27 | # * custom_registry_add: Adds the custom registry as the first registry, making 28 | # it the default registry to search for images if no registry is specified. 29 | # (default true) 30 | # * custom_registry_gcr: Sets up an nginx proxy on the master node VM to allow 31 | # redirection of requests to gcr.io to your custom registry. This allows you 32 | # to pull Google's images without changing image names/URLs. (default false) 33 | # It is recommended to increase your master node's RAM to at least 2048 MB 34 | # in the Vagrantfile for better performance. 35 | # * gcr_proxy_state: Whether or not the VMs' /etc/hosts will be updated to 36 | # redirect gcr.io traffic to the nginx proxy running on the master node VM. 37 | # Valid values are 'present' and 'absent'. (default 'present') 38 | # * gcr_proxy_nginx: The version of nginx-slim to use for the gcr.io proxy. 39 | # (default '0.8') 40 | # * custom_yum_repos: Define custom yum repos as mapping of repo names 41 | # (short strings) to repo urls. One or more repo must be specified if this 42 | # option is enabled. Using this option will disable the normal user of 43 | # the centos epel, centos gluster, and google kubernetes repos. 44 | # * custom_host_aliases: If the custom_repos or registry refer to hostnames 45 | # that cannot be resolved normally a custom_host_aliases variable can be 46 | # used to add specific entries to /etc/hosts. 47 | #custom_registry: 192.168.10.1:5000 48 | #custom_registry_insecure: false 49 | #custom_registry_add: false 50 | #custom_registry_gcr: true 51 | #gcr_proxy_state: absent 52 | #gcr_proxy_nginx: 0.23 53 | #custom_yum_repos: 54 | # kubernetes_el7: http://mypkgs/path/to/repo1 55 | # epel_el7: http://mypkgs/path/to/repo2 56 | # gluster_el7: http://mypkgs/another/repo/path/repo3 57 | #custom_host_aliases: 58 | # - addr: 192.168.122.164 59 | # names: 60 | # - mypkgs 61 | # - mypkgs.localdomain 62 | # - rpms.example.com 63 | # - addr: 192.168.122.168 64 | # names: 65 | # - marble 66 | # - addr: 192.168.122.169 67 | # names: 68 | # - jimmy 69 | # - cricket 70 | -------------------------------------------------------------------------------- /vagrant/roles/common/files/10-kubeadm-post-1.8.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" 3 | Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true" 4 | Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" 5 | Environment="KUBELET_DNS_ARGS=--cluster-dns=10.96.0.10 --cluster-domain=cluster.local" 6 | Environment="KUBELET_AUTHZ_ARGS=--authorization-mode=Webhook --client-ca-file=/etc/kubernetes/pki/ca.crt" 7 | Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=systemd" 8 | ExecStart= 9 | #ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_EXTRA_ARGS 10 | ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_EXTRA_ARGS $KUBELET_CGROUP_ARGS 11 | -------------------------------------------------------------------------------- /vagrant/roles/common/files/10-kubeadm.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment="KUBELET_KUBECONFIG_ARGS=--kubeconfig=/etc/kubernetes/kubelet.conf --require-kubeconfig=true" 3 | Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true" 4 | Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" 5 | Environment="KUBELET_DNS_ARGS=--cluster-dns=10.96.0.10 --cluster-domain=cluster.local" 6 | Environment="KUBELET_AUTHZ_ARGS=--authorization-mode=Webhook --client-ca-file=/etc/kubernetes/pki/ca.crt" 7 | Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=systemd" 8 | ExecStart= 9 | #ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_EXTRA_ARGS 10 | ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_EXTRA_ARGS $KUBELET_CGROUP_ARGS 11 | -------------------------------------------------------------------------------- /vagrant/roles/common/files/dm_snapshot.conf: -------------------------------------------------------------------------------- 1 | dm_snapshot 2 | -------------------------------------------------------------------------------- /vagrant/roles/common/files/k8s.conf: -------------------------------------------------------------------------------- 1 | net.bridge.bridge-nf-call-ip6tables = 1 2 | net.bridge.bridge-nf-call-iptables = 1 3 | -------------------------------------------------------------------------------- /vagrant/roles/common/files/rc.local: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # THIS FILE IS ADDED FOR COMPATIBILITY PURPOSES 3 | # 4 | # It is highly advisable to create own systemd services or udev rules 5 | # to run scripts during boot instead of using this file. 6 | # 7 | # In contrast to previous versions due to parallel execution during boot 8 | # this script will NOT be run after all other services. 9 | # 10 | # Please note that you must run 'chmod +x /etc/rc.d/rc.local' to ensure 11 | # that this script will be executed during boot. 12 | 13 | ifup eth1 14 | touch /var/lock/subsys/local 15 | -------------------------------------------------------------------------------- /vagrant/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart network 2 | systemd: 3 | state: restarted 4 | service: network 5 | when: ansible_eth1.ipv4 is not defined 6 | 7 | - name: refresh inventory 8 | setup: 9 | when: ansible_eth1.ipv4 is not defined 10 | 11 | - name: build hosts file 12 | lineinfile: dest=/etc/hosts regexp='.*{{ item }}$' line="{{ hostvars[item].ansible_eth1.ipv4.address }} {{item}}" state=present 13 | when: hostvars[item].ansible_eth1.ipv4.address is defined 14 | with_items: "{{ groups['all'] }}" 15 | 16 | - name: disable selinux 17 | selinux: state=disabled 18 | 19 | - name: disable of selinux - now 20 | command: setenforce 0 21 | ignore_errors: yes 22 | 23 | - name: Does cache exist? 24 | local_action: stat path="{{ vagrant_home }}/cache/{{ ansible_distribution }}/{{ ansible_distribution_version }}/yum.tgz" 25 | become: false 26 | register: cache_exists 27 | 28 | - name: Keep yum cache 29 | shell: yum-config-manager --setopt 'keepcache=1' --save > /dev/null 30 | when: 31 | - vagrant_cache 32 | 33 | - name: Copy cache from host 34 | unarchive: 35 | src: "{{ vagrant_home }}/cache/{{ ansible_distribution }}/{{ ansible_distribution_version }}/yum.tgz" 36 | dest: "/var/cache/" 37 | when: 38 | - cache_exists.stat.exists == True 39 | - vagrant_cache 40 | 41 | - name: customize hosts file 42 | lineinfile: dest=/etc/hosts line="{{ item.addr }} {{ item.names | join(\" \") }}" 43 | when: custom_host_aliases is defined 44 | with_items: "{{ custom_host_aliases | default(omit) }}" 45 | 46 | - block: 47 | # setup standard repos 48 | - name: install centos and epel repos 49 | yum: 50 | name: "{{ item }}" 51 | state: present 52 | with_items: 53 | - epel-release 54 | - centos-release-gluster 55 | 56 | - name: setup kubernetes repo 57 | yum_repository: 58 | name: kubernetes 59 | description: Kubernetes 60 | baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 61 | enabled: yes 62 | when: custom_repos is not defined 63 | 64 | - block: 65 | - name: setup custom yum repos 66 | yum_repository: 67 | name: "{{ item.key }}" 68 | description: "Custom-Repo-{{ item.key }}" 69 | baseurl: "{{ item.value }}" 70 | enabled: yes 71 | with_dict: "{{ custom_yum_repos }}" 72 | 73 | - name: disable yum fastestmirror plugin 74 | lineinfile: 75 | path: /etc/yum/pluginconf.d/fastestmirror.conf 76 | regexp: "^enabled=" 77 | line: enabled=0 78 | when: custom_yum_repos is defined 79 | 80 | - name: install base packages 81 | yum: 82 | name: "{{ item }}" 83 | state: present 84 | disable_gpg_check: yes 85 | update_cache: yes 86 | with_items: "{{ install_pkgs }}" 87 | 88 | - name: Ensure net.bridge.bridge-nf-call-iptables is set. See kubeadm 89 | copy: src=k8s.conf owner=root group=root dest=/etc/sysctl.d/k8s.conf 90 | 91 | - name: determine installed kubeadm version 92 | command: rpm -q --qf '%{version}' kubeadm 93 | register: kube_ver 94 | 95 | #- name: save iptables 96 | # command: service iptables save 97 | 98 | - name: configure dm_snapshot module 99 | copy: src=dm_snapshot.conf owner=root group=root mode=644 dest=/etc/modules-load.d 100 | 101 | - name: Ensure firewalld.service 102 | service: name=firewalld state=started enabled=yes 103 | 104 | - name: firewall trust eth1 105 | firewalld: zone=trusted interface=eth1 permanent=true state=enabled immediate=true 106 | 107 | - name: firewall trust weave 108 | firewalld: zone=trusted interface=weave permanent=true state=enabled immediate=true 109 | 110 | - name: firewall trust 172.42.42.0/24 111 | firewalld: source=172.42.42.0/24 zone=trusted permanent=true state=enabled immediate=true 112 | 113 | # 10.32.0.0/12 is the default pod CIDR for Weave Net 114 | # you will need to update this if you are using a different 115 | # network provider, or a different CIDR for whatever reason 116 | - name: firewall trust weave net CIDR 117 | firewalld: source=10.32.0.0/12 zone=trusted permanent=true state=enabled immediate=true 118 | 119 | - name: firewall trust port 10250 120 | firewalld: port=10250/tcp zone=trusted permanent=true state=enabled immediate=true 121 | 122 | - name: firewall trust port 6443 123 | firewalld: port=6443/tcp zone=trusted permanent=true state=enabled immediate=true 124 | 125 | - name: firewall trust port 9898 126 | firewalld: port=9898/tcp zone=trusted permanent=true state=enabled immediate=true 127 | 128 | # kubeadm 1.8+ does not like swap 129 | - block: 130 | - name: disable current swaps 131 | command: swapoff --all 132 | 133 | - name: disable fstab swaps 134 | mount: 135 | state: absent 136 | path: swap 137 | 138 | - name: install fixed kubelet-kubeadm config file (1.8 and later) 139 | copy: src=10-kubeadm-post-1.8.conf dest=/etc/systemd/system/kubelet.service.d/10-kubeadm.conf owner=root group=root mode=644 backup=yes 140 | when: kube_ver.stdout | version_compare('1.8', '>=') 141 | 142 | - name: install fixed kubelet-kubeadm config file 143 | copy: src=10-kubeadm.conf dest=/etc/systemd/system/kubelet.service.d/ owner=root group=root mode=644 backup=yes 144 | when: kube_ver.stdout | version_compare('1.8', '<') 145 | 146 | - name: copy docker conf 147 | template: src=docker.j2 dest=/etc/sysconfig/docker force=yes mode=0644 148 | when: custom_registry is defined 149 | 150 | - name: enable kube services 151 | service: name={{ item }} state=restarted enabled=yes daemon_reload=yes 152 | with_items: 153 | - docker 154 | - kubelet 155 | - ntpd 156 | -------------------------------------------------------------------------------- /vagrant/roles/common/templates/docker.j2: -------------------------------------------------------------------------------- 1 | OPTIONS=' --selinux-enabled --log-driver=journald' 2 | if [ -z "${DOCKER_CERT_PATH}" ]; then 3 | DOCKER_CERT_PATH=/etc/docker 4 | fi 5 | 6 | {% if custom_registry_add | default(true) -%} 7 | ADD_REGISTRY='--add-registry {{ custom_registry }}' 8 | {% endif %} 9 | {% if custom_registry_insecure | default(true) -%} 10 | INSECURE_REGISTRY='--insecure-registry {{ custom_registry }}{% if custom_registry_gcr | default(false) %} --insecure-registry gcr.io{% endif %}' 11 | {% endif %} 12 | -------------------------------------------------------------------------------- /vagrant/roles/master/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | custom_registry_add: false 3 | custom_registry_gcr: false 4 | gcr_proxy_nginx: 0.8 5 | -------------------------------------------------------------------------------- /vagrant/roles/master/tasks/gcr_proxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install ansible-docker depencendies 3 | yum: 4 | name: "{{ item }}" 5 | state: present 6 | disable_gpg_check: yes 7 | with_items: 8 | - docker-python 9 | 10 | - name: check for nginx-slim image 11 | uri: 12 | url: "http://{{ custom_registry }}/v2/google_containers/nginx-slim/manifests/{{ gcr_proxy_nginx }}" 13 | status_code: "200,404" 14 | register: nginx_exists 15 | 16 | - name: pull nginx-slim image (custom) 17 | docker_image: 18 | repository: "{{ custom_registry }}" 19 | name: google_containers/nginx-slim 20 | tag: "{{ gcr_proxy_nginx }}" 21 | when: 22 | - nginx_exists.status == "200" 23 | 24 | - name: pull nginx-slim image (gcr.io) 25 | docker_image: 26 | repository: gcr.io 27 | name: google_containers/nginx-slim 28 | tag: "{{ gcr_proxy_nginx }}" 29 | when: 30 | - nginx_exists.status == "404" 31 | 32 | - name: push nginx-slim image 33 | docker_image: 34 | repository: "{{ custom_registry }}" 35 | name: google_containers/nginx-slim 36 | tag: "{{ gcr_proxy_nginx }}" 37 | push: yes 38 | when: 39 | - nginx_exists.status == "404" 40 | 41 | - name: copy nginx.conf 42 | template: src=nginx.conf.j2 dest=/vagrant/nginx.conf force=yes mode=0644 43 | 44 | - name: start proxy container 45 | docker_container: 46 | name: gcr_proxy 47 | image: "google_containers/nginx-slim:{{ gcr_proxy_nginx }}" 48 | ports: 49 | - "80:80" 50 | state: started 51 | restart_policy: always 52 | volumes: 53 | - /vagrant/nginx.conf:/etc/nginx/nginx.conf:ro 54 | 55 | - lineinfile: 56 | path: /etc/hosts 57 | state: "{{ gcr_proxy_state | default('present') }}" 58 | line: "{{ vagrant_master }} gcr.io" 59 | -------------------------------------------------------------------------------- /vagrant/roles/master/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: gcr_proxy.yml 2 | when: 3 | - custom_registry is defined 4 | - custom_registry_gcr | default(false) 5 | 6 | - name: install s3-curl depencendies 7 | yum: 8 | name: "{{ item }}" 9 | state: present 10 | disable_gpg_check: yes 11 | with_items: 12 | - perl-Digest-HMAC 13 | - unzip 14 | 15 | - name: get s3curl 16 | unarchive: 17 | src: http://s3.amazonaws.com/doc/s3-example-code/s3-curl.zip 18 | dest: /home/vagrant/ 19 | remote_src: yes 20 | 21 | - name: make s3curl executable 22 | file: 23 | path: /home/vagrant/s3-curl/s3curl.pl 24 | state: file 25 | mode: "a+x" 26 | 27 | - name: check for kubelet config 28 | stat: 29 | path: /etc/kubernetes/kubelet.conf 30 | register: kubelet 31 | 32 | - block: 33 | - name: get parameters of kubeadm init 34 | command: kubeadm init --help 35 | register: kubeadm_init_help 36 | 37 | - name: kubeadm needs deprecated --skip-preflight-checks 38 | set_fact: 39 | kubeadm_preflight: '--skip-preflight-checks' 40 | when: kubeadm_init_help.stdout.find('--skip-preflight-checks') != -1 41 | 42 | - name: kubeadm init 43 | command: kubeadm init {{ kubeadm_preflight | default('--ignore-preflight-errors=all') }} --token={{ kubernetes_token }} --kubernetes-version=v{{ kube_ver.stdout }} --apiserver-advertise-address={{ ansible_eth1.ipv4.address }} 44 | 45 | - name: create root kube dir 46 | file: 47 | path: /root/.kube 48 | state: directory 49 | owner: root 50 | group: root 51 | 52 | - name: create root kube config 53 | copy: 54 | src: /etc/kubernetes/admin.conf 55 | dest: /root/.kube/config 56 | remote_src: True 57 | owner: root 58 | group: root 59 | 60 | - name: create user kube dir 61 | file: 62 | path: /home/vagrant/.kube 63 | state: directory 64 | owner: vagrant 65 | group: vagrant 66 | 67 | - name: create user kube config 68 | copy: 69 | src: /etc/kubernetes/admin.conf 70 | dest: /home/vagrant/.kube/config 71 | remote_src: True 72 | owner: vagrant 73 | group: vagrant 74 | when: not kubelet.stat.exists 75 | 76 | - name: wait for k8s to be ready 77 | wait_for: 78 | host: localhost 79 | port: 6443 80 | state: started 81 | timeout: 60 82 | 83 | - name: get k8s server and client versions 84 | command: kubectl version 85 | register: kubectl_version 86 | 87 | - name: create weave network 88 | command: kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version={{ kubectl_version.stdout | b64encode }}" 89 | 90 | # jsonpath tested with kubeadm 1.6, 1.7, 1.8 91 | - name: get dns service address 92 | command: kubectl get services --all-namespaces -ojsonpath='{.items[?(@.metadata.name=="kube-dns")].spec.clusterIP}' 93 | register: kubednsaddress 94 | 95 | - name: wait for dns to be ready 96 | wait_for: host={{ kubednsaddress.stdout }} port=53 state=started timeout=1800 97 | -------------------------------------------------------------------------------- /vagrant/roles/master/tasks/yum_cache.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Archive yum cache 3 | archive: 4 | path: "/var/cache/yum" 5 | dest: "/tmp/yum.tgz" 6 | when: vagrant_cache 7 | 8 | - name: Copy yum cache to host 9 | fetch: 10 | src: "/tmp/yum.tgz" 11 | dest: "{{ vagrant_home }}/cache/{{ ansible_distribution }}/{{ ansible_distribution_version }}/" 12 | flat: true 13 | when: vagrant_cache 14 | -------------------------------------------------------------------------------- /vagrant/roles/master/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | events { 2 | worker_connections 1024; 3 | } 4 | http { 5 | upstream my_gcr { 6 | server {{ custom_registry }}; 7 | } 8 | server { 9 | listen *:80; 10 | location / { 11 | proxy_http_version 1.1; 12 | proxy_buffering off; 13 | proxy_set_header Proxy ""; 14 | proxy_pass http://my_gcr; 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /vagrant/roles/nodes/tasks/gcr_proxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - lineinfile: 3 | path: /etc/hosts 4 | state: "{{ gcr_proxy_state | default('present') }}" 5 | line: "{{ vagrant_master }} gcr.io" 6 | -------------------------------------------------------------------------------- /vagrant/roles/nodes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: establish route for 10.96.0.0/16 2 | command: ip route add 10.96.0.0/16 dev eth1 src {{ hostvars[item].ansible_eth1.ipv4.address }} 3 | with_items: "{{ ansible_hostname }}" 4 | register: route_add 5 | failed_when: route_add.rc != 0 and "File exists" not in route_add.stderr 6 | 7 | - name: Open port 24007 (GlusterFS management) 8 | firewalld: port=24007/tcp zone=trusted permanent=true state=enabled immediate=true 9 | 10 | - name: Open port 24008 (GlusterFS RDMA) 11 | firewalld: port=24008/tcp zone=trusted permanent=true state=enabled immediate=true 12 | 13 | - name: Open port 2222 (GlusterFS sshd) 14 | firewalld: port=2222/tcp zone=trusted permanent=true state=enabled immediate=true 15 | 16 | - name: Open ports 49152-49251 (GlusterFS bricks) 17 | firewalld: port=49152-49251/tcp zone=trusted permanent=true state=enabled immediate=true 18 | 19 | - include: gcr_proxy.yml 20 | when: 21 | - custom_registry is defined 22 | - custom_registry_gcr | default(false) 23 | 24 | - name: Pull GlusterFS Docker image 25 | command: docker pull gluster/gluster-centos:latest 26 | register: task_result 27 | until: task_result.rc == 0 28 | retries: 5 29 | delay: 1 30 | 31 | - name: Pull heketi Docker image 32 | command: docker pull heketi/heketi:dev 33 | register: task_result 34 | until: task_result.rc == 0 35 | retries: 5 36 | delay: 1 37 | 38 | - name: clean up kubernetes /etc directory 39 | file: path=/etc/kubernetes/manifests state=absent 40 | 41 | - name: check for kubelet config 42 | stat: 43 | path: /etc/kubernetes/kubelet.conf 44 | register: kubelet 45 | 46 | - block: 47 | - name: kubeadm join with master 48 | command: kubeadm join --discovery-token-unsafe-skip-ca-verification --ignore-preflight-errors=all --token={{ kubernetes_token }} {{ hostvars['master'].ansible_eth1.ipv4.address }}:6443 49 | 50 | - name: create root kube dir on node 51 | file: 52 | path: /root/.kube 53 | state: directory 54 | owner: root 55 | group: root 56 | when: not kubelet.stat.exists 57 | 58 | - name: wait for kubelet to create kubernetes.conf file 59 | wait_for: 60 | path: /etc/kubernetes/kubelet.conf 61 | 62 | - name: create root kube config on node 63 | copy: 64 | src: /etc/kubernetes/kubelet.conf 65 | dest: /root/.kube/config 66 | remote_src: True 67 | owner: root 68 | group: root 69 | 70 | - name: create user kube dir on node 71 | file: 72 | path: /home/vagrant/.kube 73 | state: directory 74 | owner: vagrant 75 | group: vagrant 76 | 77 | - name: create user kube config on node 78 | copy: 79 | src: /etc/kubernetes/kubelet.conf 80 | dest: /home/vagrant/.kube/config 81 | remote_src: True 82 | owner: vagrant 83 | group: vagrant 84 | 85 | - name: wait for node to be ready 86 | shell: test "$(kubectl get nodes {{ ansible_hostname }} --no-headers | awk '{ print $2 }')" = "Ready" 87 | register: task_result 88 | until: task_result.rc == 0 89 | delay: 10 90 | retries: 30 91 | changed_when: false 92 | when: not kubelet.stat.exists 93 | -------------------------------------------------------------------------------- /vagrant/rollback.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MACHINES=(${@:-$(vagrant status | grep running | awk '{print $1}')}) 4 | 5 | vagrant sandbox rollback "${MACHINES[@]}" || exit 1 6 | 7 | for m in ${MACHINES[*]}; do 8 | echo "[${m}] Restarting services..." 9 | vagrant ssh "${m}" -c "sudo systemctl restart docker kubelet ntpd" 1>/dev/null 10 | done 11 | -------------------------------------------------------------------------------- /vagrant/site.yml: -------------------------------------------------------------------------------- 1 | - hosts: 127.0.0.1 2 | connection: local 3 | tasks: 4 | - name: Make sure cache directory exists 5 | file: 6 | path: "{{ vagrant_home }}/cache/{{ ansible_distribution }}/{{ ansible_distribution_version }}/" 7 | state: directory 8 | when: vagrant_cache 9 | 10 | - hosts: all 11 | become: yes 12 | become_method: sudo 13 | vars_files: 14 | - "global_vars.yml" 15 | roles: 16 | - common 17 | 18 | - hosts: master 19 | become: yes 20 | become_method: sudo 21 | vars_files: 22 | - "global_vars.yml" 23 | roles: 24 | - master 25 | 26 | - hosts: nodes 27 | become: yes 28 | become_method: sudo 29 | vars_files: 30 | - "global_vars.yml" 31 | roles: 32 | - nodes 33 | 34 | - hosts: master 35 | become: yes 36 | become_method: sudo 37 | vars_files: 38 | - "global_vars.yml" 39 | tasks: 40 | - include_role: 41 | name: master 42 | tasks_from: yum_cache.yml 43 | when: vagrant_cache 44 | -------------------------------------------------------------------------------- /vagrant/up.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | export ANSIBLE_TIMEOUT=60 4 | vagrant up --no-provision "${@}" \ 5 | && vagrant provision 6 | 7 | if [ $? -eq 0 ] && [[ "x$(vagrant plugin list | grep sahara)" != "x" ]]; then 8 | vagrant sandbox on 9 | fi 10 | --------------------------------------------------------------------------------