├── .gitignore ├── CONTRIBUTING.md ├── ChangeLog.md ├── LICENSE ├── Makefile ├── README.md ├── circle.yml ├── docker-compose.yml ├── docs ├── docker.png ├── hyperion-k8s.png ├── hyperion-k8s.svg ├── kubernetes.png ├── packer.png └── terraform.png ├── packer ├── digitalocean │ ├── README.md │ ├── hyperion.json │ └── settings.json ├── ec2 │ ├── README.md │ ├── ec2-ami-tools.zip │ ├── hyperion.json │ └── settings.json ├── google │ ├── README.md │ ├── hyperion.json │ └── settings.json ├── units │ ├── docker.service │ ├── etcd.service │ ├── flannel.service │ ├── kube-apiserver.service │ ├── kube-controller-manager.service │ ├── kube-kubelet.service │ ├── kube-proxy.service │ └── kube-scheduler.service └── virtualbox │ ├── README.md │ ├── http │ └── debian-8.2.0-amd64 │ │ └── preseed.cfg │ ├── hyperion.json │ └── scripts │ └── debian-8.2.0-amd64 │ ├── cleanup.sh │ ├── sudoers.sh │ ├── update.sh │ ├── vagrant.sh │ └── vbguest.sh ├── services ├── kube-ui │ ├── kube-ui-rc.yaml │ └── kube-ui-svc.yaml ├── kubedash │ └── kube-config.yaml ├── logging │ ├── es-controller.yaml │ ├── es-service.yaml │ ├── kibana-controller.yaml │ └── kibana-service.yaml ├── monitoring │ ├── grafana-service.yaml │ ├── heapster-controller.yaml │ ├── heapster-service.yaml │ ├── influxdb-grafana-controller.yaml │ └── influxdb-service.yaml └── namespaces │ ├── namespace-admin.json │ ├── namespace-dev.json │ └── namespace-prod.json └── terraform ├── digitalocean ├── README.md ├── hyperion.tf ├── outputs.tf ├── provider.tf ├── terraform.tfvars.example └── variables.tf ├── ec2 ├── README.md ├── hyperion.tf ├── provider.tf └── variables.tf ├── etcd.env ├── google ├── README.md ├── hyperion.tf ├── output.tf ├── provider.tf └── variables.tf ├── kubernetes.env └── openstack ├── README.md ├── hyperion.tf ├── provider.tf └── variables.tf /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant/ 2 | ansible/roles/master/files/etcd 3 | ansible/roles/master/files/kube-apiserver 4 | ansible/roles/master/files/kube-controller-manager 5 | ansible/roles/master/files/kube-scheduler 6 | ansible/roles/minion/files/kube-proxy 7 | ansible/roles/minion/files/kubelet 8 | bin/ 9 | output/ 10 | hyperion-*.tar.gz 11 | 12 | terraform.tfvars 13 | *.tfplan* 14 | *.tfstate* 15 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | If you discover issues, have ideas for improvements or new features, or 4 | want to contribute a new module, please report them to the 5 | [issue tracker][1] of the repository or submit a pull request. Please, 6 | try to follow these guidelines when you do so. 7 | 8 | ## Issue reporting 9 | 10 | * Check that the issue has not already been reported. 11 | * Check that the issue has not already been fixed in the latest code 12 | (a.k.a. `develop`). 13 | * Be clear, concise and precise in your description of the problem. 14 | * Open an issue with a descriptive title and a summary in grammatically correct, 15 | complete sentences. 16 | * Include any relevant code to the issue summary. 17 | 18 | ## Pull requests 19 | 20 | * Branch *develop* is for new features/bugfix. *master* branch is the stable code. 21 | * Use a topic branch to easily amend a pull request later, if necessary. 22 | * Write [good commit messages][2]. 23 | * Open a [pull request][3] that relates to *only* one subject with a clear title 24 | and description in grammatically correct, complete sentences. 25 | 26 | 27 | [1]: https://github.com/portefaix/hyperion-k8s/issues 28 | [2]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html 29 | [3]: https://help.github.com/articles/using-pull-requests 30 | -------------------------------------------------------------------------------- /ChangeLog.md: -------------------------------------------------------------------------------- 1 | Hyperion ChangeLog 2 | ====================== 3 | 4 | # Version 0.10.0 (01/27/2016) 5 | 6 | - Add EC2 provider for Terraform 7 | - Add Digital Ocean provider for Terraform 8 | 9 | # Version 0.9.0 (08/18/2015) 10 | 11 | - ``FIX`` Terraform deployment for Google Compute Engine 12 | - ``FIX`` Packer definitions for Google Compute Engine 13 | - Update Packer definitions with binary 0.9.0 14 | - Update to Kubernetes 1.1.3 15 | - Update to Docker 1.9.1 16 | - Update to Flannel 0.5.5 17 | 18 | # Version 0.8.0 (12/14/2015) 19 | 20 | - Add EC2 provider for Packer 21 | - Add Digital Ocean provider for Packer 22 | - Add Google Compute Engine provider for Packer 23 | - Init Packer configurations 24 | - Remove Ansibles 25 | 26 | # Version 0.7.0 (08/18/2015) 27 | 28 | - Update Ansible configuration 29 | 30 | # Version 0.6.0 (11/30/2014) 31 | 32 | - Update Kubernetes version 33 | - Setup CoreOS virtual machine 34 | - Update Kibana service 35 | - Update Grafana service 36 | - UpdateElasticsearch service 37 | 38 | # Version 0.5.0 (06/16/2014) 39 | 40 | - ``#7`` : Update Python tests configuration to validate CoreOS installation 41 | - ``#6`` : Add shared volume on Vagrant installation 42 | - ``#4`` : Upgrade to Elasticsearch 1.2.1 43 | - Update CoreOS setup using Etcd and Fleet 44 | - Add documentation for Fluentd and Heka usage 45 | 46 | # Version 0.4.0 (06/05/2014) 47 | 48 | - Add installation on CoreOS using Vagrant 49 | - Update Dockerfile using Docker best practices 50 | - Add Kibana default dashboard 51 | - Add Redis configuration 52 | 53 | # Version 0.3.0 (05/23/2014) 54 | 55 | - Add Hyperion default dashboard to Grafana 56 | - ``FIX`` Graphite configuration 57 | 58 | # Version 0.2.0 (05/21/2014) 59 | 60 | - Add nginx configuration for Hyperion file 61 | - Add Elasticsearch plugins 62 | - ``FIX`` nginx, graphite, grafana configuration. 63 | - Update StatsD configuration 64 | 65 | # Version 0.1.0 (05/16/2014) 66 | 67 | - Setup Kubernetes cluster with Vagrant 68 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [2014-2016] [Nicolas Lamirault ] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2014-2016 Nicolas Lamirault 2 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | APP = hyperion 16 | VERSION = 0.10.0 17 | 18 | SHELL := /bin/bash 19 | 20 | VAGRANT = vagrant 21 | DOCKER = "docker" 22 | 23 | K8S_URI=https://storage.googleapis.com/kubernetes-release/release 24 | K8S_VERSION=1.1.3 25 | K8S_ARCH=linux/amd64 26 | K8S_BINARIES = \ 27 | kube-apiserver \ 28 | kube-proxy \ 29 | kube-scheduler \ 30 | kube-controller-manager \ 31 | kubelet \ 32 | kubectl 33 | 34 | ETCD_URI=https://github.com/coreos/etcd/releases/download 35 | ETCD_VERSION=2.1.1 36 | 37 | DOCKER_URI=https://get.docker.com/builds/Linux/x86_64 38 | DOCKER_VERSION=1.9.1 39 | 40 | FLANNEL_URI=https://github.com/coreos/flannel/releases/download 41 | FLANNEL_VERSION=0.5.5 42 | 43 | PACKER ?= packer 44 | TERRAFORM = ?= terraform 45 | 46 | NO_COLOR=\033[0m 47 | OK_COLOR=\033[32;01m 48 | ERROR_COLOR=\033[31;01m 49 | WARN_COLOR=\033[33;01m 50 | 51 | OUTPUT=bin 52 | 53 | all: help 54 | 55 | help: 56 | @echo -e "$(OK_COLOR) ==== [$(APP)] [$(VERSION)]====$(NO_COLOR)" 57 | @echo -e "$(WARN_COLOR)- binaries$(NO_COLOR) : Download binaries$(NO_COLOR)" 58 | @echo -e "$(WARN_COLOR)- archive$(NO_COLOR) : Build K8S binaries archive$(NO_COLOR)" 59 | @echo -e "$(WARN_COLOR)- build provider=xxx$(NO_COLOR) : Build box for provider$(NO_COLOR)" 60 | 61 | clean: 62 | rm -fr output hyperion-*.tar.gz 63 | 64 | configure: 65 | @mkdir -p ./bin 66 | 67 | etcd: configure 68 | @echo -e "$(OK_COLOR)[$(APP)] Download Etcd$(NO_COLOR)" 69 | @curl --silent -L -o /tmp/etcd-v$(ETCD_VERSION)-linux-amd64.tar.gz $(ETCD_URI)/v$(ETCD_VERSION)/etcd-v$(ETCD_VERSION)-linux-amd64.tar.gz && \ 70 | tar zxf /tmp/etcd-v$(ETCD_VERSION)-linux-amd64.tar.gz -C /tmp/ && \ 71 | cp -f /tmp/etcd-v$(ETCD_VERSION)-linux-amd64/etcdctl bin && \ 72 | cp -f /tmp/etcd-v$(ETCD_VERSION)-linux-amd64/etcd bin && \ 73 | rm -rf /tmp/etcd-v$(ETCD_VERSION)-linux-amd64.tar.gz /tmp/etcd-v$(ETCD_VERSION)-linux-amd64 74 | 75 | .PHONY: k8s 76 | k8s: configure 77 | @echo -e "$(OK_COLOR)[$(APP)] Download Kubernetes$(NO_COLOR)" 78 | for i in $(K8S_BINARIES); do \ 79 | curl --silent -o $(OUTPUT)/$$i -L ${K8S_URI}/v${K8S_VERSION}/bin/$(K8S_ARCH)/$$i; \ 80 | chmod +x $(OUTPUT)/$$i; \ 81 | done 82 | 83 | .PHONY: docker 84 | docker: configure 85 | @echo -e "$(OK_COLOR)[$(APP)] Download Docker$(NO_COLOR)" 86 | curl --silent -o $(OUTPUT)/docker -L ${DOCKER_URI}/docker-${DOCKER_VERSION} 87 | chmod +x $(OUTPUT)/docker 88 | 89 | .PHONY: flannel 90 | flannel: configure 91 | @echo -e "$(OK_COLOR)[$(APP)] Download Flannel$(NO_COLOR)" 92 | curl --silent -o /tmp/flannel-v${FLANNEL_VERSION}.tar.gz -L ${FLANNEL_URI}/v${FLANNEL_VERSION}/flannel-${FLANNEL_VERSION}-linux-amd64.tar.gz && \ 93 | tar zxvf /tmp/flannel-v${FLANNEL_VERSION}.tar.gz -C /tmp/ && \ 94 | cp /tmp/flannel-${FLANNEL_VERSION}/flanneld $(OUTPUT)/flanneld && \ 95 | rm -fr /tmp/flannel-${FLANNEL_VERSION} /tmp/flannel-v${FLANNEL_VERSION}.tar.gz 96 | 97 | .PHONY: binaries 98 | binaries: etcd k8s terraform docker flannel 99 | 100 | .PHONY: archive 101 | archive: 102 | @echo -e "$(OK_COLOR)[$(APP)] Create Kubernetes binaries archive$(NO_COLOR)" 103 | rm -rf output && mkdir -p output 104 | cp $(OUTPUT)/etcd output 105 | cp $(OUTPUT)/etcdctl output 106 | cp $(OUTPUT)/kubectl output 107 | cp $(OUTPUT)/kube-apiserver output 108 | cp $(OUTPUT)/kube-controller-manager output 109 | cp $(OUTPUT)/kube-scheduler output 110 | cp $(OUTPUT)/kube-proxy output 111 | cp $(OUTPUT)/kubelet output 112 | cp $(OUTPUT)/docker output 113 | cp $(OUTPUT)/flanneld output 114 | cd output && sha256sum * > CHECKSUM 115 | tar -zcvf hyperion-k8s-$(VERSION).tar.gz -C output . 116 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hyperion 2 | 3 | [![License Apache 2][badge-license]][LICENSE][] 4 | ![Version][badge-release] 5 | 6 | ## Description 7 | 8 | ![Image of components](https://github.com/portefaix/hyperion-k8s/raw/master/docs/hyperion-k8s.png "Hyperion Kubernetes") 9 | 10 | [hyperion][] creates a Cloud environment : 11 | 12 | - Identical machine images creation is performed using [Packer][] 13 | - Orchestrated provisioning is performed using [Terraform][] 14 | - Applications managment is performed using [Kubernetes][] 15 | 16 | ## Kubernetes 17 | 18 | ### Kubernetes master 19 | 20 | - maintains the state of the [Kubernetes][] server runtime 21 | - API server 22 | - Scheduler 23 | - Registries (nodes, pod, service) 24 | - Storage 25 | 26 | ### Kubernetes nodes 27 | 28 | - Represents the Host where containers are created 29 | - Components : PODs, Kubelet, Proxy 30 | 31 | ### Kubernetes components 32 | 33 | - [etcd][] : A highly available key-value store for shared configuration and service discovery. 34 | - apiserver : Provides the API for Kubernetes orchestration. 35 | - controller-manager : Enforces Kubernetes services. 36 | - scheduler : Schedules containers on hosts. 37 | - proxy : Provides network proxy services. 38 | - kubelet : Processes a container manifest so the containers are launched according to 39 | how they are described. 40 | 41 | 42 | ## Local 43 | 44 | Initialize environment: 45 | 46 | $ make init 47 | 48 | Create Kubernetes binaries archive : 49 | 50 | $ make archive 51 | 52 | 53 | ## Cloud 54 | 55 | ### Images 56 | 57 | Read guides to creates the machine for a cloud provider : 58 | 59 | * [Google cloud](https://github.com/portefaix/hyperion-k8s/blob/packer/google/README.md) 60 | * [AWS](https://github.com/portefaix/hyperion-k8s/blob/packer/ec2/README.md) 61 | * [Digitalocean](https://github.com/portefaix/hyperion-k8s/blob/packer/digitalocean/README.md) 62 | 63 | ### Infratructure 64 | 65 | Read guides to creates the infrastructure : 66 | 67 | * [Google cloud](https://github.com/portefaix/hyperion-k8s/blob/infra/google/README.md) 68 | * [AWS](https://github.com/portefaix/hyperion-k8s/blob/infra/ec2/README.md) 69 | * [Digitalocean](https://github.com/portefaix/hyperion-k8s/blob/infra/digitalocean/README.md) 70 | * [Openstack](https://github.com/portefaix/hyperion-k8s/blob/infra/openstack/README.md) 71 | 72 | 73 | ## Usage 74 | 75 | * Setup your Kubernetes configuration : 76 | 77 | $ export K8S_MASTER=x.x.x.x 78 | $ kubectl config set-cluster hyperion \ 79 | --insecure-skip-tls-verify=true --server=${K8S_MASTER} 80 | $ kubectl config set-context hyperion --cluster=hyperion 81 | $ kubectl config use-context hyperion 82 | 83 | * Check [Kubernetes][] status : 84 | 85 | $ curl http://${K8S_MASTER}:8080/ 86 | { 87 | "paths": [ 88 | "/api", 89 | "/api/v1", 90 | "/api/v1beta3", 91 | "/healthz", 92 | "/healthz/ping", 93 | "/logs/", 94 | "/metrics", 95 | "/static/", 96 | "/swagger-ui/", 97 | "/swaggerapi/", 98 | "/ui/", 99 | "/version" 100 | ] 101 | } 102 | 103 | * You could use the ``kubectl`` binary to manage your cluster : 104 | 105 | $ bin/kubectl version 106 | Client Version: version.Info{Major:"1", Minor:"0", GitVersion:"v1.0.3", GitCommit:"61c6ac5f350253a4dc002aee97b7db7ff01ee4ca", GitTreeState:"clean"} 107 | Server Version: version.Info{Major:"1", Minor:"0", GitVersion:"v1.0.3", GitCommit:"61c6ac5f350253a4dc002aee97b7db7ff01ee4ca", GitTreeState:"clean"} 108 | 109 | $ bin/kubectl get cs 110 | NAME STATUS MESSAGE ERROR 111 | controller-manager Healthy ok nil 112 | scheduler Healthy ok nil 113 | etcd-0 Healthy {"health": "true"} nil 114 | 115 | $ bin/kubectl get nodes 116 | NAME LABELS STATUS 117 | NAME LABELS STATUS 118 | x.x.x.x kubernetes.io/hostname=x.x.x.x Ready 119 | x.x.x.x kubernetes.io/hostname=x.x.x.x Ready 120 | 121 | $ bin/kubectl cluster-info 122 | Kubernetes master is running at x.x.x.x:8080 123 | 124 | 125 | * Creates namespaces : 126 | 127 | $ bin/kubectl -s ${K8S_MASTER}:8080 create -f namespaces/namespace-admin.json 128 | $ bin/kubectl -s ${K8S_MASTER}:8080 create -f namespaces/namespace-dev.json 129 | $ bin/kubectl -s ${K8S_MASTER}:8080 create -f namespaces/namespace-prod.json 130 | 131 | $ bin/kubectl -s ${K8S_MASTER}:8080 get namespaces 132 | NAME LABELS STATUS 133 | admin name=admin Active 134 | default Active 135 | development name=development Active 136 | kube-system name=kube-system Active 137 | production name=production Active 138 | 139 | 140 | # Services # 141 | 142 | [cAdvisor][] exposes a simple UI for on-machine containers on port `4194` 143 | 144 | For others services, see [hyperion-services][] 145 | 146 | 147 | ## Debug 148 | 149 | ### Ping all hosts 150 | 151 | $ ansible all -m ping -i 152 | 153 | ### Check connection to hosts 154 | 155 | You could retrieve facts from hosts to check connections : 156 | 157 | $ ansible all -m setup -a "filter=ansible_distribution*" -i 158 | 159 | 160 | 161 | ## Contributing 162 | 163 | See [CONTRIBUTING](CONTRIBUTING.md). 164 | 165 | 166 | ## License 167 | 168 | See [LICENSE][] for the complete license. 169 | 170 | 171 | ## Changelog 172 | 173 | A [changelog](ChangeLog.md) is available 174 | 175 | 176 | ## Contact 177 | 178 | Nicolas Lamirault 179 | 180 | 181 | [hyperion]: https://github.com/portefaix/hyperion-k8s 182 | [hyperion-services]: https://github.com/portefaix/hyperion-services 183 | [LICENSE]: https://github.com/portefaix/hyperion-k8s/blob/master/LICENSE 184 | [Issue tracker]: https://github.com/portefaix/hyperion-k8s/issues 185 | 186 | [kubernetes]: http://kubernetes.io/ 187 | [etcd]: https://github.com/coreos/etcd 188 | [terraform]: https://terraform.io 189 | [packer]: https://packer.io 190 | 191 | [vagrant]: https://www.vagrantup.com 192 | [virtualbox]: https://www.virtualbox.org/ 193 | 194 | [cAdvisor]: https://github.com/google/cadvisor 195 | 196 | [badge-license]: https://img.shields.io/badge/license-Apache_2-green.svg 197 | [badge-release]: https://img.shields.io/github/release/portefaix/hyperion-k8s.svg 198 | -------------------------------------------------------------------------------- /circle.yml: -------------------------------------------------------------------------------- 1 | machine: 2 | timezone: Europe/Paris 3 | 4 | environment: 5 | PACKER_VERSION: 0.8.6 6 | TERRAFORM_VERSION: 0.6.9 7 | 8 | dependencies: 9 | pre: 10 | - > 11 | sudo apt-get install -qq wget unzip && 12 | pushd /usr/bin && 13 | echo "Downloading packer ${PACKER_VERSION}..." && 14 | sudo wget --no-verbose https://dl.bintray.com/mitchellh/packer/packer_${PACKER_VERSION}_linux_amd64.zip && 15 | echo "Installing packer ${PACKER_VERSION}..." && 16 | sudo unzip packer_${PACKER_VERSION}_linux_amd64.zip && 17 | sudo rm packer_${PACKER_VERSION}_linux_amd64.zip && 18 | sudo wget --no-verbose https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip && 19 | sudo unzip terraform_${TERRAFORM_VERSION}_linux_amd64.zip && 20 | sudo rm terraform_${TERRAFORM_VERSION}_linux_amd64.zip 21 | 22 | test: 23 | override: 24 | - | 25 | ssh-keygen -t rsa -N foobar -f /home/ubuntu/.ssh/id_rsa.pub 26 | sudo mkdir -p /etc/gcloud 27 | sudo mkdir -p /etc/ec2 28 | sudo mkdir -p /etc/do 29 | - | 30 | echo "{\"type\": \"service_account\", \"project_id\": \"hyperion-k8s\", \"private_key_id\": \"xxxxxxxxxx\", \"private_key\": \"-----BEGIN PRIVATE KEY-----\nxxxxxn-----END PRIVATE KEY-----\n\", \"client_email\": \"hyperion-k8s\", \"client_id\": \"1xxxxxxx\", \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\", \"token_uri\": \"https://accounts.google.com/o/oauth2/token\", \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\", \"client_x509_cert_url\": \"https://www.googleapis.com/\" }" > /tmp/account.json 31 | sudo mv /tmp/account.json /etc/gcloud/ 32 | echo -e "gce_credentials=\"/etc/gcloud/account.json\"\ngce_project=\"hyperion-k8s\"\ngce_ssh_user=\"user\"\ngce_ssh_public_key = \"/home/ubuntu/.ssh/id_rsa.pub\"\ngce_ssh_private_key_file=\"/home/ubuntu/.ssh/id_rsa\"\nhyperion_nb_nodes = \"2\"\ngce_region = \"us-central1\"\ngce_cluster_name = \"hyperion-k8s\"\n" > /tmp/var.tf 33 | sudo mv /tmp/var.tf /etc/gcloud/ 34 | - | 35 | echo -e "aws_access_key = \"xxxxxxxxxxxxxxx\"\naws_secret_key = \"xxxxxxxx\"\naws_region = \"eu-west-1\"\naws_key_name = \"hyperion\"\naws_ssh_public_key = \"/home/ubuntu/.ssh/id_rsa.pub\"\naws_ssh_private_key_file=\"/home/ubuntu/.ssh/id_rsa\"\nhyperion_nb_nodes = \"2\"\naws_image = \"ami-e31a6594\"\naws_ssh_user = \"admin\"\naws_instance_type_master = \"t2.micro\"\naws_instance_type_node = \"t2.micro\"\n" > /tmp/ec2.tf 36 | sudo mv /tmp/ec2.tf /etc/ec2/var.tf 37 | - | 38 | echo -e "do_token = \"xxxxxxxxxxxxxxx\"\ndo_ssh_fingerprint = \"xx:xx:xx:10:22:aa:45:45:84:5a:02:b5:81:ae:58:94\"\ndo_pub_key = \"/home/ubuntu/.ssh/id_rsa.pub\"\ndo_pvt_key = \"/home/ubuntu/.ssh/id_rsa\"\n" > /tmp/settings.tf 39 | sudo mv /tmp/do.tf /etc/do/var.tf 40 | - packer version 41 | - cd packer/google/ && packer validate --var-file=settings.json hyperion.json 42 | - cd packer/ec2 && packer validate --var-file=settings.json hyperion.json 43 | - cd packer/digitalocean && packer validate --var-file=settings.json hyperion.json 44 | - cd terraform/google && terraform plan --var-file=/etc/gcloud/var.tf 45 | - cd terraform/ec2 && terraform plan --var-file=/etc/ec2/var.tf 46 | - cd terraform/digitalocean && terraform plan --var-file=/etc/do/var.tf 47 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | etcd: 2 | image: gcr.io/google_containers/etcd:2.0.13 3 | net: "host" 4 | command: ['/usr/local/bin/etcd', '--bind-addr=0.0.0.0:4001', '--data-dir=/var/etcd/data'] 5 | 6 | apiserver: 7 | image: gcr.io/google_containers/hyperkube:v1.1.1-beta.1 8 | net: "host" 9 | command: ["/hyperkube", "apiserver", "--service-cluster-ip-range=172.17.17.1/24", "--address=0.0.0.0", "--etcd_servers=http://127.0.0.1:4001", "--cluster_name=hyperion", "--v=2"] 10 | 11 | controller: 12 | image: gcr.io/google_containers/hyperkube:v1.1.1-beta.1 13 | net: "host" 14 | command: ["/hyperkube", "controller-manager", "--address=0.0.0.0", "--master=http://127.0.0.1:8080", "--v=2"] 15 | 16 | scheduler: 17 | image: gcr.io/google_containers/hyperkube:v1.1.1-beta.1 18 | net: "host" 19 | command: ["/hyperkube", "scheduler", "--address=0.0.0.0", "--master=http://127.0.0.1:8080", "--v=2"] 20 | 21 | kubelet: 22 | image: gcr.io/google_containers/hyperkube:v1.1.1-beta.1 23 | net: "host" 24 | command: ['/hyperkube', 'kubelet', '--containerized' , '--api_servers=http://127.0.0.1:8080', '--v=2', '--address=0.0.0.0', '--enable_server'] 25 | volumes: 26 | - /:/rootfs:ro 27 | - /sys:/sys:ro 28 | - /dev:/dev 29 | - /var/run/docker.sock:/var/run/docker.sock 30 | - /var/lib/docker/:/var/lib/docker:ro 31 | - /var/lib/kubelet/:/var/lib/kubelet:rw 32 | - /var/run:/var/run:rw 33 | privileged: true 34 | 35 | proxy: 36 | image: gcr.io/google_containers/hyperkube:v1.1.1-beta.1 37 | net: "host" 38 | command: ['/hyperkube', 'proxy', '--master=http://127.0.0.1:8080', '--v=2'] 39 | privileged: true 40 | 41 | # apiserver: 42 | # image: portefaix/hyperkube:1.1.1-beta.1 43 | # net: host 44 | # command: ["/hyperkube", "apiserver", "--service-cluster-ip-range=172.17.17.1/24", "--address=127.0.0.1", "--etcd_servers=http://127.0.0.1:4001", "--cluster_name=kubernetes", "--v=2"] 45 | 46 | # controller: 47 | # image: portefaix/hyperkube:1.1.1-beta.1 48 | # net: host 49 | # command: ["/hyperkube", "controller-manager", "--master=127.0.0.1:8080", "--v=2"] 50 | 51 | # scheduler: 52 | # image: portefaix/hyperkube:1.1.1-beta.1 53 | # net: host 54 | # command: ["/hyperkube", "scheduler", "--master=127.0.0.1:8080", "--v=2"] 55 | 56 | # kubelet: 57 | # image: portefaix/hyperkube:1.1.1-beta.1 58 | # net: host 59 | # command: ["/hyperkube", "kubelet", "--api_servers=http://127.0.0.1:8080", "--address=0.0.0.0", "--enable_server", "--v=2"] 60 | # volumes: 61 | # - /var/run/docker.sock:/var/run/docker.sock 62 | -------------------------------------------------------------------------------- /docs/docker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ploio/hyperion-k8s/2ae32b8094e669714e65cad2b445fb73f8edfcab/docs/docker.png -------------------------------------------------------------------------------- /docs/hyperion-k8s.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ploio/hyperion-k8s/2ae32b8094e669714e65cad2b445fb73f8edfcab/docs/hyperion-k8s.png -------------------------------------------------------------------------------- /docs/kubernetes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ploio/hyperion-k8s/2ae32b8094e669714e65cad2b445fb73f8edfcab/docs/kubernetes.png -------------------------------------------------------------------------------- /docs/packer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ploio/hyperion-k8s/2ae32b8094e669714e65cad2b445fb73f8edfcab/docs/packer.png -------------------------------------------------------------------------------- /docs/terraform.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ploio/hyperion-k8s/2ae32b8094e669714e65cad2b445fb73f8edfcab/docs/terraform.png -------------------------------------------------------------------------------- /packer/digitalocean/README.md: -------------------------------------------------------------------------------- 1 | # Packer templates for DigitalOcean 2 | 3 | This project contains [Packer][] templates to help you deploy [hyperion][] on [DigitalOcean][]. 4 | 5 | ## Prerequisites 6 | 7 | * A Digital Ocean account 8 | 9 | ## Configure 10 | 11 | The available variables that can be configured are: 12 | 13 | * **api_token**: The client TOKEN to use to access your account 14 | * **image**: The name (or slug) of the base image to use 15 | * **region**: The name (or slug) of the region to launch the droplet in 16 | * **size**: The name (or slug) of the droplet size to use 17 | 18 | Edit *settings.json* and setup your data. 19 | 20 | ## Deploy 21 | 22 | Build the image 23 | 24 | $ packer build --var-file=settings.json hyperion.json 25 | 26 | 27 | 28 | [Packer]: https://www.packer.io/ 29 | [DigitalOcean]: https://www.digitalocean.com/ 30 | 31 | [hyperion]: http://github.com/portefaix/hyperion 32 | -------------------------------------------------------------------------------- /packer/digitalocean/hyperion.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "api_token": "", 4 | "image": "", 5 | "region": "", 6 | "size": "", 7 | "kube_binaries": "https://storage.googleapis.com/hyperion-k8s/hyperion-k8s-0.9.0.tar.gz", 8 | "snapshot_name": "hyperion-k8s-0-9-0 {{timestamp}}" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "digitalocean", 13 | "api_token": "{{user `api_token`}}", 14 | "image": "{{user `image`}}", 15 | "region": "{{user `region`}}", 16 | "snapshot_name": "{{user `snapshot_name`}}", 17 | "size": "{{user `size`}}" 18 | } 19 | ], 20 | "provisioners": [ 21 | { 22 | "type": "file", 23 | "source": "../units", 24 | "destination": "/tmp" 25 | }, 26 | { 27 | "type": "shell", 28 | "inline": [ 29 | "sudo mkdir -p /opt/bin", 30 | "wget -N -O k8s.tar.gz {{user `kube_binaries`}}", 31 | "sudo tar -xvf k8s.tar.gz -C /opt/bin", 32 | "sudo chmod +x -R /opt/bin", 33 | "sudo chown root:root -R /opt/bin", 34 | "sudo mv /tmp/units/*.service /etc/systemd/system", 35 | "sudo ls /etc/systemd/system/", 36 | "rm -rf /tmp/units" 37 | ] 38 | } 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /packer/digitalocean/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "digitalocean", 3 | "api_token": "YOUR API KEY", 4 | "image": "ubuntu-15-10-x64", 5 | "region": "nyc2", 6 | "size": "512mb" 7 | } 8 | -------------------------------------------------------------------------------- /packer/ec2/README.md: -------------------------------------------------------------------------------- 1 | # Packer templates for Amazon Elastic Compute Cloud 2 | 3 | This project contains [Packer][] templates to help you deploy [hyperion][] on [Amazon EC2][]. 4 | 5 | ## Prerequisites 6 | 7 | * An [Amazon Web Services account](http://aws.amazon.com/) 8 | * An [AWS Access and Secret Access Keys](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html) 9 | * An [AWS EC2 Key Pairs](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) 10 | 11 | 12 | ## Configure 13 | 14 | The available variables that can be configured are: 15 | 16 | * `access_key`: The access key used to communicate with AWS. 17 | * `account_id`: Your AWS account ID. 18 | * `instance_type`: The EC2 instance type to use while building the AMI (default `t2.micro`) 19 | * `region`: The name of the region (default `eu-west-1`) 20 | * `s3_bucket` : The name of the S3 bucket to upload the AMI 21 | * `secret_key`: The secret key used to communicate with AWS. 22 | * `source_ami`: The initial AMI used as a base for the newly created machine (default `Debian Jessie 64bit hvm ami`) 23 | * `ssh_username`: The username to use in order to communicate over SSH to the running machine. 24 | * `x509_cert_path`: The local path to a valid X509 certificate for your AWS account. This is used for bundling the AMI. This X509 certificate must be registered with your account from the security credentials page in the AWS console. 25 | * `x509_key_path`: The local path to the private key for the X509 certificate specified by x509_cert_path. 26 | 27 | Edit *settings.json* and setup your data. 28 | 29 | ## Build 30 | 31 | Build the image : 32 | 33 | $ packer build --var-file=settings.json hyperion.json 34 | 35 | 36 | 37 | [Packer]: https://www.packer.io/ 38 | [Amazon EC2]: https://aws.amazon.com/ec2/ 39 | 40 | [hyperion]: http://github.com/portefaix/hyperion 41 | -------------------------------------------------------------------------------- /packer/ec2/ec2-ami-tools.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ploio/hyperion-k8s/2ae32b8094e669714e65cad2b445fb73f8edfcab/packer/ec2/ec2-ami-tools.zip -------------------------------------------------------------------------------- /packer/ec2/hyperion.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "access_key": "", 4 | "secret_key": "", 5 | "account_id": "", 6 | "key_name": "", 7 | "ssh_public_key": "", 8 | "ssh_private_key_file": "", 9 | "ssh_user": "", 10 | "source_ami": "ami-e31a6594", 11 | "instance_type": "t2.micro", 12 | "region": "eu-west-1", 13 | "kube_binaries": "https://storage.googleapis.com/hyperion-k8s/hyperion-k8s-0.9.0.tar.gz", 14 | "ec2_version": "1.5.7", 15 | "ami_name": "hyperion-k8s-0-9-0 {{timestamp}}", 16 | "ami_description": "Base Image for Hyperion" 17 | }, 18 | "builders": [ 19 | { 20 | "type": "amazon-ebs", 21 | "access_key": "{{user `access_key`}}", 22 | "secret_key": "{{user `secret_key`}}", 23 | "ami_users": ["{{user `account_id`}}"], 24 | "region": "{{user `region`}}", 25 | "source_ami": "{{user `source_ami`}}", 26 | "instance_type": "{{user `instance_type`}}", 27 | "ssh_username": "{{user `ssh_user`}}", 28 | "ami_name": "{{user `ami_name`}}", 29 | "ami_description": "{{user `ami_description`}}" 30 | } 31 | ], 32 | "provisioners": [ 33 | { 34 | "type": "file", 35 | "source": "../units", 36 | "destination": "/tmp" 37 | }, 38 | { 39 | "type": "shell", 40 | "inline": [ 41 | "sudo apt-get update", 42 | "sudo apt-get install -y ruby unzip kpartx parted grub", 43 | "wget http://s3.amazonaws.com/ec2-downloads/ec2-ami-tools.zip", 44 | "sudo mkdir -p /usr/local/ec2", 45 | "sudo unzip ec2-ami-tools.zip -d /usr/local/ec2", 46 | "sudo mv /usr/local/ec2/ec2-ami-tools-* /usr/local/ec2/ec2-ami-tools" 47 | ] 48 | }, 49 | { 50 | "type": "shell", 51 | "inline": [ 52 | "sudo mkdir -p /opt/bin", 53 | "wget -N -O k8s.tar.gz {{user `kube_binaries`}}", 54 | "sudo tar -xvf k8s.tar.gz -C /opt/bin", 55 | "sudo chmod +x -R /opt/bin", 56 | "sudo chown root:root -R /opt/bin", 57 | "sudo mv /tmp/units/*.service /etc/systemd/system", 58 | "sudo ls /etc/systemd/system/", 59 | "rm -rf /tmp/units" 60 | ] 61 | } 62 | ] 63 | } 64 | -------------------------------------------------------------------------------- /packer/ec2/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "access_key": "xxx", 3 | "secret_key": "xxx", 4 | "account_id": "xxx", 5 | "s3_bucket": "xxx", 6 | "key_name": "xxx", 7 | "region": "eu-west-1", 8 | "ssh_public_key": "xxx", 9 | "ssh_private_key_file": "xxx", 10 | "source_ami": "ami-e31a6594", 11 | "ssh_user": "admin", 12 | "x509_cert_path": "xxx", 13 | "x509_key_path": "xxx", 14 | "instance_type": "t2.micro" 15 | } 16 | -------------------------------------------------------------------------------- /packer/google/README.md: -------------------------------------------------------------------------------- 1 | # Packer templates for Google Cloud 2 | 3 | This project contains [Packer][] templates to help you deploy [hyperion][] on [Google cloud][/]. 4 | 5 | ## Prerequisites 6 | 7 | * A Google Cloud account 8 | * A Google Compute Engine project 9 | * A Google Compute Engine account file 10 | * A Google Compute Engine Password-less SSH Key 11 | 12 | ## Configure 13 | 14 | The available variables that can be configured are: 15 | 16 | * **account_file**: Path to the JSON file used to describe your account credentials, downloaded from Google Cloud Console 17 | * **project_id**: The name of the project to apply any resources to 18 | * **ssh_user**: SSH user 19 | * **region**: The region to operate under (default us-central1) 20 | * **zone**: The zone that the machines should be created in (default us-central1-a) 21 | * **source_image**: The name of the image to base the launched instances (default `debian-8-jessie-v20150818`) 22 | * **machine_type**: The machine type to use for the Hyperion instance (default `n1-standard-1`) 23 | 24 | Edit *settings.json* and setup your data. 25 | 26 | ## Deploy 27 | 28 | Build the image 29 | 30 | $ packer build --var-file=settings.json hyperion.json 31 | 32 | 33 | 34 | [Packer]: https://www.packer.io/ 35 | [Google cloud]: https://cloud.google.com 36 | 37 | [hyperion]: http://github.com/portefaix/hyperion 38 | -------------------------------------------------------------------------------- /packer/google/hyperion.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "account_file": "", 4 | "project_id": "", 5 | "ssh_user": "", 6 | "zone": "", 7 | "source_image": "", 8 | "machine_type": "", 9 | "cluster_name": "", 10 | "kube_binaries": "https://storage.googleapis.com/hyperion-k8s/hyperion-k8s-0.9.0.tar.gz", 11 | "image_name": "hyperion-0-9-4-v20151224", 12 | "image_description": "Kubernetes Base Image for Hyperion" 13 | }, 14 | "builders": [ 15 | { 16 | "type": "googlecompute", 17 | "account_file": "{{user `account_file`}}", 18 | "project_id": "{{user `project_id`}}", 19 | "source_image": "{{user `source_image`}}", 20 | "zone": "{{user `zone`}}", 21 | "disk_size": 20, 22 | "image_name": "{{user `image_name`}}", 23 | "image_description": "{{user `image_description`}}", 24 | "machine_type": "{{user `machine_type`}}", 25 | "ssh_username": "{{user `ssh_user`}}" 26 | } 27 | ], 28 | "provisioners": [ 29 | { 30 | "type": "file", 31 | "source": "../units", 32 | "destination": "/tmp" 33 | }, 34 | { 35 | "type": "shell", 36 | "inline": [ 37 | "sudo mkdir -p /opt/bin", 38 | "wget -N -O k8s.tar.gz {{user `kube_binaries`}}", 39 | "sudo tar -xvf k8s.tar.gz -C /opt/bin", 40 | "sudo chmod +x -R /opt/bin", 41 | "sudo chown root:root -R /opt/bin", 42 | "sudo mv /tmp/units/*.service /etc/systemd/system", 43 | "sudo ls /etc/systemd/system/", 44 | "rm -rf /tmp/units" 45 | ] 46 | } 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /packer/google/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "account_file": "/etc/gcloud/account.json", 3 | "project_id": "project-id", 4 | "ssh_user": "username", 5 | "source_image": "debian-8-jessie-v20150818", 6 | "zone": "us-central1-a", 7 | "machine_type": "n1-standard-1" 8 | } 9 | -------------------------------------------------------------------------------- /packer/units/docker.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker Application Container Engine 3 | Documentation=http://docs.docker.io 4 | Requires=flannel.service 5 | After=flannel.service 6 | 7 | [Service] 8 | EnvironmentFile=/etc/kubernetes.env 9 | EnvironmentFile=/run/flannel/subnet.env 10 | ExecStartPre=/bin/mount --make-rprivate / 11 | ExecStart=/opt/bin/docker $DOCKER_OPTS 12 | Restart=on-failure 13 | RestartSec=5 14 | 15 | [Install] 16 | WantedBy=multi-user.target 17 | -------------------------------------------------------------------------------- /packer/units/etcd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=etcd 3 | 4 | [Service] 5 | EnvironmentFile=/etc/etcd.env 6 | ExecStart=/opt/bin/etcd $ETCD_OPTS 7 | Restart=on-failure 8 | RestartSec=5 9 | 10 | [Install] 11 | WantedBy=multi-user.target 12 | -------------------------------------------------------------------------------- /packer/units/flannel.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=flannel is an etcd backed network fabric for containers 3 | Documentation=https://github.com/coreos/flannel 4 | Requires=etcd.service 5 | After=etcd.service 6 | 7 | [Service] 8 | Type=notify 9 | EnvironmentFile=/etc/kubernetes.env 10 | ExecStartPre=-/opt/bin/etcdctl $FLANNEL_ETCDCTL_OPTS \ 11 | mk $FLANNEL_ETCD_PREFIX $FLANNEL_NETWORK_CONFIG 12 | ExecStart=/opt/bin/flanneld $FLANNEL_OPTS 13 | Restart=on-failure 14 | RestartSec=5 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /packer/units/kube-apiserver.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes API Server 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | EnvironmentFile=/etc/kubernetes.env 7 | ExecStart=/opt/bin/kube-apiserver $KUBE_APISERVER_OPTS 8 | Restart=on-failure 9 | RestartSec=5 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /packer/units/kube-controller-manager.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Controller Manager 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | Requires=kube-apiserver.service 5 | After=kube-apiserver.service 6 | 7 | [Service] 8 | EnvironmentFile=/etc/kubernetes.env 9 | ExecStart=/opt/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS 10 | Restart=on-failure 11 | RestartSec=5 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /packer/units/kube-kubelet.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kubelet 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | Requires=docker.service 5 | After=docker.service 6 | 7 | [Service] 8 | EnvironmentFile=/etc/kubernetes.env 9 | ExecStart=/opt/bin/kubelet $KUBE_KUBELET_OPTS 10 | Restart=on-failure 11 | RestartSec=5 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /packer/units/kube-proxy.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Proxy 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | EnvironmentFile=/etc/kubernetes.env 7 | ExecStart=/opt/bin/kube-proxy $KUBE_PROXY_OPTS 8 | Restart=on-failure 9 | RestartSec=5 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /packer/units/kube-scheduler.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Scheduler 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | Requires=kube-apiserver.service 5 | After=kube-apiserver.service 6 | 7 | [Service] 8 | EnvironmentFile=/etc/kubernetes.env 9 | ExecStart=/opt/bin/kube-scheduler $KUBE_SCHEDULER_OPTS 10 | Restart=on-failure 11 | RestartSec=5 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /packer/virtualbox/README.md: -------------------------------------------------------------------------------- 1 | # Packer templates for Virtualbox 2 | 3 | This project contains [Packer][] templates to help you deploy [hyperion][] on [Virtualbox][/]. 4 | 5 | ## Prerequisites 6 | 7 | * Virtualbox 8 | 9 | ## Configure 10 | 11 | ## Deploy 12 | 13 | Build the image 14 | 15 | $ export ATLAS_TOKEN="xxx" 16 | $ packer build hyperion.json 17 | 18 | Publish to [Atlas][] : 19 | 20 | $ packer push \ 21 | --name hyperion/core -token=${ATLAS_TOKEN} \ 22 | hyperion.json 23 | 24 | 25 | [Packer]: https://www.packer.io/ 26 | [Virtualbox]: https://www.virtualbox.org/ 27 | [Atlas]: https://atlas.hashicorp.com/hyperion 28 | 29 | [hyperion]: http://github.com/portefaix/hyperion 30 | -------------------------------------------------------------------------------- /packer/virtualbox/http/debian-8.2.0-amd64/preseed.cfg: -------------------------------------------------------------------------------- 1 | #### Contents of the preconfiguration file (for jessie) 2 | ### Localization 3 | # Preseeding only locale sets language, country and locale. 4 | # d-i debian-installer/locale string fr_FR 5 | 6 | # The values can also be preseeded individually for greater flexibility. 7 | #d-i debian-installer/language string en 8 | #d-i debian-installer/country string NL 9 | #d-i debian-installer/locale string en_GB.UTF-8 10 | # Optionally specify additional locales to be generated. 11 | d-i localechooser/supported-locales multiselect en_US.UTF-8, fr_FR.UTF-8 12 | 13 | # Keyboard selection. 14 | # keymap is an alias for keyboard-configuration/xkb-keymap 15 | d-i keymap select fr 16 | d-i console-keymaps-at/keymap select fr 17 | d-i keyboard-configuration/variant select Français 18 | d-i keyboard-configuration/unsupported_config_layout boolean true 19 | d-i keyboard-configuration/xkb-keymap select fr(latin9) 20 | # D-i keyboard-configuration/toggle select No toggling 21 | 22 | ### Network configuration 23 | # Disable network configuration entirely. This is useful for cdrom 24 | # installations on non-networked devices where the network questions, 25 | # warning and long timeouts are a nuisance. 26 | #d-i netcfg/enable boolean false 27 | 28 | # netcfg will choose an interface that has link if possible. This makes it 29 | # skip displaying a list if there is more than one interface. 30 | d-i netcfg/choose_interface select auto 31 | 32 | # To pick a particular interface instead: 33 | #d-i netcfg/choose_interface select eth1 34 | 35 | # To set a different link detection timeout (default is 3 seconds). 36 | # Values are interpreted as seconds. 37 | #d-i netcfg/link_detection_timeout string 10 38 | 39 | # If you have a slow dhcp server and the installer times out waiting for 40 | # it, this might be useful. 41 | #d-i netcfg/dhcp_timeout string 60 42 | 43 | # If you prefer to configure the network manually, uncomment this line and 44 | # the static network configuration below. 45 | #d-i netcfg/disable_dhcp boolean true 46 | 47 | # If you want the preconfiguration file to work on systems both with and 48 | # without a dhcp server, uncomment these lines and the static network 49 | # configuration below. 50 | #d-i netcfg/dhcp_failed note 51 | #d-i netcfg/dhcp_options select Configure network manually 52 | 53 | # Static network configuration. 54 | #d-i netcfg/get_nameservers string 192.168.1.1 55 | #d-i netcfg/get_ipaddress string 192.168.1.42 56 | #d-i netcfg/get_netmask string 255.255.255.0 57 | #d-i netcfg/get_gateway string 192.168.1.1 58 | #d-i netcfg/confirm_static boolean true 59 | 60 | # Any hostname and domain names assigned from dhcp take precedence over 61 | # values set here. However, setting the values still prevents the questions 62 | # from being shown, even if values come from dhcp. 63 | d-i netcfg/get_hostname string unassigned-hostname 64 | d-i netcfg/get_domain string unassigned-domain 65 | 66 | # If you want to force a hostname, regardless of what either the DHCP 67 | # server returns or what the reverse DNS entry for the IP is, uncomment 68 | # and adjust the following line. 69 | #d-i netcfg/hostname string somehost 70 | 71 | # Disable that annoying WEP key dialog. 72 | d-i netcfg/wireless_wep string 73 | # The wacky dhcp hostname that some ISPs use as a password of sorts. 74 | #d-i netcfg/dhcp_hostname string radish 75 | 76 | # If non-free firmware is needed for the network or other hardware, you can 77 | # configure the installer to always try to load it, without prompting. Or 78 | # change to false to disable asking. 79 | d-i hw-detect/load_firmware boolean true 80 | 81 | ### Network console 82 | # Use the following settings if you wish to make use of the network-console 83 | # component for remote installation over SSH. This only makes sense if you 84 | # intend to perform the remainder of the installation manually. 85 | #d-i anna/choose_modules string network-console 86 | #d-i network-console/authorized_keys_url string http://10.0.0.1/openssh-key 87 | #d-i network-console/password password r00tme 88 | #d-i network-console/password-again password r00tme 89 | 90 | ### Mirror settings 91 | # If you select ftp, the mirror/country string does not need to be set. 92 | #d-i mirror/protocol string ftp 93 | d-i mirror/country string manual 94 | d-i mirror/http/directory string /debian 95 | d-i mirror/http/hostname string http.debian.net 96 | d-i mirror/http/proxy string 97 | 98 | # Suite to install. 99 | #d-i mirror/suite string testing 100 | # Suite to use for loading installer components (optional). 101 | #d-i mirror/udeb/suite string testing 102 | 103 | ### Account setup 104 | # Skip creation of a root account (normal user account will be able to 105 | # use sudo). 106 | d-i passwd/root-login boolean false 107 | # Alternatively, to skip creation of a normal user account. 108 | # d-i passwd/make-user boolean false 109 | 110 | # Root password, either in clear text 111 | d-i passwd/root-password password vagrant 112 | d-i passwd/root-password-again password vagrant 113 | # or encrypted using an MD5 hash. 114 | # d-i passwd/root-password-crypted password vagrant 115 | 116 | # To create a normal user account. 117 | d-i passwd/user-fullname string vagrant 118 | d-i passwd/username string vagrant 119 | # Normal user's password, either in clear text 120 | d-i passwd/user-password password vagrant 121 | d-i passwd/user-password-again password vagrant 122 | # or encrypted using an MD5 hash. 123 | #d-i passwd/user-password-crypted password [MD5 hash] 124 | # Create the first user with the specified UID instead of the default. 125 | d-i passwd/user-uid string 1010 126 | 127 | # The user account will be added to some standard initial groups. To 128 | # override that, use this. 129 | #d-i passwd/user-default-groups string audio cdrom video 130 | 131 | ### Clock and time zone setup 132 | # Controls whether or not the hardware clock is set to UTC. 133 | d-i clock-setup/utc boolean true 134 | 135 | # You may set this to any valid setting for $TZ; see the contents of 136 | # /usr/share/zoneinfo/ for valid values. 137 | d-i time/zone string Europe/Paris 138 | 139 | # Controls whether to use NTP to set the clock during the install 140 | d-i clock-setup/ntp boolean true 141 | # NTP server to use. The default is almost always fine here. 142 | d-i clock-setup/ntp-server string ntp.via.ecp.fr 143 | 144 | ### Partitioning 145 | ## Partitioning example 146 | # If the system has free space you can choose to only partition that space. 147 | # This is only honoured if partman-auto/method (below) is not set. 148 | #d-i partman-auto/init_automatically_partition select biggest_free 149 | 150 | # Alternatively, you may specify a disk to partition. If the system has only 151 | # one disk the installer will default to using that, but otherwise the device 152 | # name must be given in traditional, non-devfs format (so e.g. /dev/hda or 153 | # /dev/sda, and not e.g. /dev/discs/disc0/disc). 154 | # For example, to use the first SCSI/SATA hard disk: 155 | #d-i partman-auto/disk string /dev/sda 156 | # In addition, you'll need to specify the method to use. 157 | # The presently available methods are: 158 | # - regular: use the usual partition types for your architecture 159 | # - lvm: use LVM to partition the disk 160 | # - crypto: use LVM within an encrypted partition 161 | d-i partman-auto/method string lvm 162 | 163 | # If one of the disks that are going to be automatically partitioned 164 | # contains an old LVM configuration, the user will normally receive a 165 | # warning. This can be preseeded away... 166 | d-i partman-lvm/device_remove_lvm boolean true 167 | # The same applies to pre-existing software RAID array: 168 | d-i partman-md/device_remove_md boolean true 169 | # And the same goes for the confirmation to write the lvm partitions. 170 | d-i partman-lvm/confirm boolean true 171 | d-i partman-lvm/confirm_nooverwrite boolean true 172 | 173 | # You can choose one of the three predefined partitioning recipes: 174 | # - atomic: all files in one partition 175 | # - home: separate /home partition 176 | # - multi: separate /home, /usr, /var, and /tmp partitions 177 | d-i partman-auto/choose_recipe select atomic 178 | 179 | # Or provide a recipe of your own... 180 | # If you have a way to get a recipe file into the d-i environment, you can 181 | # just point at it. 182 | #d-i partman-auto/expert_recipe_file string /hd-media/recipe 183 | 184 | # If not, you can put an entire recipe into the preconfiguration file in one 185 | # (logical) line. This example creates a small /boot partition, suitable 186 | # swap, and uses the rest of the space for the root partition: 187 | #d-i partman-auto/expert_recipe string \ 188 | # boot-root :: \ 189 | # 40 50 100 ext3 \ 190 | # $primary{ } $bootable{ } \ 191 | # method{ format } format{ } \ 192 | # use_filesystem{ } filesystem{ ext3 } \ 193 | # mountpoint{ /boot } \ 194 | # . \ 195 | # 500 10000 1000000000 ext3 \ 196 | # method{ format } format{ } \ 197 | # use_filesystem{ } filesystem{ ext3 } \ 198 | # mountpoint{ / } \ 199 | # . \ 200 | # 64 512 300% linux-swap \ 201 | # method{ swap } format{ } \ 202 | # . 203 | 204 | # The full recipe format is documented in the file partman-auto-recipe.txt 205 | # included in the 'debian-installer' package or available from D-I source 206 | # repository. This also documents how to specify settings such as file 207 | # system labels, volume group names and which physical devices to include 208 | # in a volume group. 209 | 210 | # This makes partman automatically partition without confirmation, provided 211 | # that you told it what to do using one of the methods above. 212 | d-i partman-partitioning/confirm_write_new_label boolean true 213 | d-i partman/choose_partition select finish 214 | d-i partman/confirm boolean true 215 | d-i partman/confirm_nooverwrite boolean true 216 | 217 | ## Partitioning using RAID 218 | # The method should be set to "raid". 219 | #d-i partman-auto/method string raid 220 | # Specify the disks to be partitioned. They will all get the same layout, 221 | # so this will only work if the disks are the same size. 222 | #d-i partman-auto/disk string /dev/sda /dev/sdb 223 | 224 | # Next you need to specify the physical partitions that will be used. 225 | #d-i partman-auto/expert_recipe string \ 226 | # multiraid :: \ 227 | # 1000 5000 4000 raid \ 228 | # $primary{ } method{ raid } \ 229 | # . \ 230 | # 64 512 300% raid \ 231 | # method{ raid } \ 232 | # . \ 233 | # 500 10000 1000000000 raid \ 234 | # method{ raid } \ 235 | # . 236 | 237 | # Last you need to specify how the previously defined partitions will be 238 | # used in the RAID setup. Remember to use the correct partition numbers 239 | # for logical partitions. RAID levels 0, 1, 5, 6 and 10 are supported; 240 | # devices are separated using "#". 241 | # Parameters are: 242 | # \ 243 | # 244 | 245 | #d-i partman-auto-raid/recipe string \ 246 | # 1 2 0 ext3 / \ 247 | # /dev/sda1#/dev/sdb1 \ 248 | # . \ 249 | # 1 2 0 swap - \ 250 | # /dev/sda5#/dev/sdb5 \ 251 | # . \ 252 | # 0 2 0 ext3 /home \ 253 | # /dev/sda6#/dev/sdb6 \ 254 | # . 255 | 256 | # For additional information see the file partman-auto-raid-recipe.txt 257 | # included in the 'debian-installer' package or available from D-I source 258 | # repository. 259 | 260 | # This makes partman automatically partition without confirmation. 261 | d-i partman-md/confirm boolean true 262 | d-i partman-partitioning/confirm_write_new_label boolean true 263 | d-i partman/choose_partition select finish 264 | d-i partman/confirm boolean true 265 | d-i partman/confirm_nooverwrite boolean true 266 | 267 | ## Controlling how partitions are mounted 268 | # The default is to mount by UUID, but you can also choose "traditional" to 269 | # use traditional device names, or "label" to try filesystem labels before 270 | # falling back to UUIDs. 271 | #d-i partman/mount_style select uuid 272 | 273 | ### Base system installation 274 | # Configure APT to not install recommended packages by default. Use of this 275 | # option can result in an incomplete system and should only be used by very 276 | # experienced users. 277 | #d-i base-installer/install-recommends boolean false 278 | 279 | # The kernel image (meta) package to be installed; "none" can be used if no 280 | # kernel is to be installed. 281 | #d-i base-installer/kernel/image string linux-image-2.6-486 282 | 283 | ### Apt setup 284 | # You can choose to install non-free and contrib software. 285 | d-i apt-setup/non-free boolean true 286 | d-i apt-setup/contrib boolean true 287 | # Uncomment this if you don't want to use a network mirror. 288 | #d-i apt-setup/use_mirror boolean false 289 | # Select which update services to use; define the mirrors to be used. 290 | # Values shown below are the normal defaults. 291 | d-i apt-setup/services-select multiselect security, volatile 292 | d-i apt-setup/security_host string security.debian.org 293 | 294 | # Additional repositories, local[0-9] available 295 | #d-i apt-setup/local0/repository string \ 296 | # http://local.server/debian stable main 297 | #d-i apt-setup/local0/comment string local server 298 | # Enable deb-src lines 299 | #d-i apt-setup/local0/source boolean true 300 | # URL to the public key of the local repository; you must provide a key or 301 | # apt will complain about the unauthenticated repository and so the 302 | # sources.list line will be left commented out 303 | #d-i apt-setup/local0/key string http://local.server/key 304 | 305 | # By default the installer requires that repositories be authenticated 306 | # using a known gpg key. This setting can be used to disable that 307 | # authentication. Warning: Insecure, not recommended. 308 | d-i debian-installer/allow_unauthenticated boolean true 309 | 310 | ### Package selection 311 | #tasksel tasksel/first multiselect standard, web-server 312 | # If the desktop task is selected, install the kde and xfce desktops 313 | # instead of the default gnome desktop. 314 | #tasksel tasksel/desktop multiselect kde, xfce 315 | tasksel tasksel/first multiselect 316 | 317 | # Individual additional packages to install 318 | d-i pkgsel/include string openssh-server build-essential 319 | # Whether to upgrade packages after debootstrap. 320 | # Allowed values: none, safe-upgrade, full-upgrade 321 | #d-i pkgsel/upgrade select none 322 | 323 | # Some versions of the installer can report back on what software you have 324 | # installed, and what software you use. The default is not to report back, 325 | # but sending reports helps the project determine what software is most 326 | # popular and include it on CDs. 327 | popularity-contest popularity-contest/participate boolean false 328 | 329 | ### Grub 330 | 331 | d-i grub-installer/bootdev string /dev/sda 332 | 333 | ### Finishing up the installation 334 | # During installations from serial console, the regular virtual consoles 335 | # (VT1-VT6) are normally disabled in /etc/inittab. Uncomment the next 336 | # line to prevent this. 337 | #d-i finish-install/keep-consoles boolean true 338 | 339 | # Avoid that last message about the install being complete. 340 | d-i finish-install/reboot_in_progress note 341 | 342 | # This will prevent the installer from ejecting the CD during the reboot, 343 | # which is useful in some situations. 344 | #d-i cdrom-detect/eject boolean false 345 | 346 | # This is how to make the installer shutdown when finished, but not 347 | # reboot into the installed system. 348 | #d-i debian-installer/exit/halt boolean true 349 | # This will power off the machine instead of just halting it. 350 | #d-i debian-installer/exit/poweroff boolean true 351 | 352 | ### Preseeding other packages 353 | # Depending on what software you choose to install, or if things go wrong 354 | # during the installation process, it's possible that other questions may 355 | # be asked. You can preseed those too, of course. To get a list of every 356 | # possible question that could be asked during an install, do an 357 | # installation, and then run these commands: 358 | # debconf-get-selections --installer > file 359 | # debconf-get-selections >> file 360 | 361 | 362 | #### Advanced options 363 | ### Running custom commands during the installation 364 | # d-i preseeding is inherently not secure. Nothing in the installer checks 365 | # for attempts at buffer overflows or other exploits of the values of a 366 | # preconfiguration file like this one. Only use preconfiguration files from 367 | # trusted locations! To drive that home, and because it's generally useful, 368 | # here's a way to run any shell command you'd like inside the installer, 369 | # automatically. 370 | 371 | # This first command is run as early as possible, just after 372 | # preseeding is read. 373 | #d-i preseed/early_command string anna-install some-udeb 374 | # This command is run immediately before the partitioner starts. It may be 375 | # useful to apply dynamic partitioner preseeding that depends on the state 376 | # of the disks (which may not be visible when preseed/early_command runs). 377 | #d-i partman/early_command \ 378 | # string debconf-set partman-auto/disk "$(list-devices disk | head -n1)" 379 | # This command is run just before the install finishes, but when there is 380 | # still a usable /target directory. You can chroot to /target and use it 381 | # directly, or use the apt-install and in-target commands to easily install 382 | # packages and run commands in the target system. 383 | #d-i preseed/late_command string apt-install zsh; in-target chsh -s /bin/zsh 384 | -------------------------------------------------------------------------------- /packer/virtualbox/hyperion.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "kube_binaries": "https://storage.googleapis.com/portefaix/hyperion-0.8.1.tar.gz", 4 | "vm_name": "hyperion-core", 5 | "disk_size": "2000", 6 | "memory_size": "1024", 7 | "cpus": "1", 8 | "iso_checksum": "923cd1bfbfa62d78aecaa92d919ee54a95c8fca834b427502847228cf06155e7243875f59279b0bf6bfd1b579cbe2f1bc80528a265dafddee9a9d2a197ef3806", 9 | "iso_checksum_type": "sha512", 10 | "debian_version": "8.2.0", 11 | "debian_arch": "amd64", 12 | "atlas_token": "{{env `ATLAS_TOKEN`}}", 13 | "atlas_username": "{{env `ATLAS_USERNAME`}}", 14 | "atlas_name": "hyperion", 15 | "version": "0.1.0" 16 | }, 17 | "builders": [ 18 | { 19 | "type": "virtualbox-iso", 20 | "boot_command": [ 21 | "", 22 | "install ", 23 | "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg ", 24 | "debian-installer=en_US ", 25 | "auto ", 26 | "locale=en_US ", 27 | "kbd-chooser/method=fr ", 28 | "keyboard-configuration/xkb-keymap=fr ", 29 | "netcfg/get_hostname={{ .Name }} ", 30 | "netcfg/get_domain=vagrantup.com ", 31 | "fb=false ", 32 | "debconf/frontend=noninteractive ", 33 | "console-setup/ask_detect=false ", 34 | "console-keymaps-at/keymap=us ", 35 | "" 36 | ], 37 | "boot_wait": "10s", 38 | "disk_size": "{{ user `disk_size` }}", 39 | "guest_os_type": "Debian_64", 40 | "http_directory": "http/debian-{{ user `debian_version` }}-{{ user `debian_arch` }}", 41 | "iso_checksum": "{{ user `iso_checksum` }}", 42 | "iso_checksum_type": "{{ user `iso_checksum_type` }}", 43 | "iso_url": "http://cdimage.debian.org/cdimage/release/{{ user `debian_version` }}/{{ user `debian_arch` }}/iso-cd/debian-{{ user `debian_version` }}-{{ user `debian_arch` }}-netinst.iso", 44 | "ssh_username": "vagrant", 45 | "ssh_password": "vagrant", 46 | "ssh_port": 22, 47 | "ssh_wait_timeout": "10000s", 48 | "shutdown_command": "sudo -S /sbin/shutdown -hP now", 49 | "guest_additions_path": "VBoxGuestAdditions_{{.Version}}.iso", 50 | "virtualbox_version_file": ".vbox_version", 51 | "vm_name": "{{ user `vm_name` }}", 52 | "vboxmanage": [ 53 | ["modifyvm", "{{.Name}}", "--memory", "{{ user `memory_size` }}"], 54 | ["modifyvm", "{{.Name}}", "--cpus", "{{ user `cpus` }}"] 55 | ] 56 | } 57 | ], 58 | "provisioners": [ 59 | { 60 | "type": "shell", 61 | "execute_command": "echo 'vagrant' | {{.Vars}} sudo -E -S bash '{{.Path}}'", 62 | "scripts": [ 63 | "scripts/debian-{{ user `debian_version` }}-{{ user `debian_arch` }}/update.sh", 64 | "scripts/debian-{{ user `debian_version` }}-{{ user `debian_arch` }}/sudoers.sh", 65 | "scripts/debian-{{ user `debian_version` }}-{{ user `debian_arch` }}/vagrant.sh", 66 | "scripts/debian-{{ user `debian_version` }}-{{ user `debian_arch` }}/vbguest.sh", 67 | "scripts/debian-{{ user `debian_version` }}-{{ user `debian_arch` }}/cleanup.sh" 68 | ] 69 | } 70 | ], 71 | "push": { 72 | "name": "{{user `atlas_username`}}/{{user `atlas_name`}}", 73 | "token": "{{user `atlas_name`}}", 74 | "vcs": true 75 | }, 76 | "post-processors": [ 77 | [ 78 | { 79 | "type": "vagrant", 80 | "compression_level": 9, 81 | "output":"vagrant/debian/debian-{{ user `debian_version` }}-{{.Provider}}.box" 82 | }, 83 | { 84 | "type": "atlas", 85 | "only": ["virtualbox-iso"], 86 | "artifact": "{{user `atlas_username`}}/{{user `atlas_name`}}", 87 | "artifact_type": "vagrant.box", 88 | "metadata": { 89 | "created_at": "{{timestamp}}", 90 | "version": "{{user `version`}}", 91 | "provider": "virtualbox" 92 | } 93 | } 94 | ] 95 | ] 96 | } 97 | -------------------------------------------------------------------------------- /packer/virtualbox/scripts/debian-8.2.0-amd64/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Clean up 4 | apt-get -y --purge remove linux-headers-$(uname -r) build-essential 5 | apt-get -y --purge autoremove 6 | apt-get -y purge $(dpkg --list |grep '^rc' |awk '{print $2}') 7 | apt-get -y purge $(dpkg --list |egrep 'linux-image-[0-9]' |awk '{print $3,$2}' |sort -nr |tail -n +2 |grep -v $(uname -r) |awk '{ print $2}') 8 | apt-get -y clean 9 | 10 | # Remove history file 11 | unset HISTFILE 12 | rm ~/.bash_history /home/vagrant/.bash_history 13 | 14 | # sync data to disk (fix packer) 15 | sync 16 | -------------------------------------------------------------------------------- /packer/virtualbox/scripts/debian-8.2.0-amd64/sudoers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt-get -y install sudo 4 | 5 | # Set up password-less sudo for user vagrant 6 | echo 'vagrant ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/vagrant 7 | chmod 440 /etc/sudoers.d/vagrant 8 | 9 | # no tty 10 | echo "Defaults !requiretty" >> /etc/sudoers 11 | -------------------------------------------------------------------------------- /packer/virtualbox/scripts/debian-8.2.0-amd64/update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt-get update 4 | apt-get dist-upgrade 5 | # install curl to fix broken wget while retrieving content from secured sites 6 | apt-get -y install curl 7 | -------------------------------------------------------------------------------- /packer/virtualbox/scripts/debian-8.2.0-amd64/vagrant.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Installing vagrant keys 4 | mkdir /home/vagrant/.ssh 5 | chmod 700 /home/vagrant/.ssh 6 | cd /home/vagrant/.ssh 7 | wget --no-check-certificate 'https://raw.github.com/mitchellh/vagrant/master/keys/vagrant.pub' -O authorized_keys 8 | chmod 600 /home/vagrant/.ssh/authorized_keys 9 | chown -R vagrant /home/vagrant/.ssh 10 | chmod -R go-rwsx /home/vagrant/.ssh 11 | -------------------------------------------------------------------------------- /packer/virtualbox/scripts/debian-8.2.0-amd64/vbguest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install additional guests 4 | mkdir /tmp/vbox 5 | VER=$(cat /home/vagrant/.vbox_version) 6 | mount -o loop /home/vagrant/VBoxGuestAdditions_$VER.iso /tmp/vbox 7 | yes | sh /tmp/vbox/VBoxLinuxAdditions.run 8 | umount /tmp/vbox 9 | rmdir /tmp/vbox 10 | rm /home/vagrant/*.iso 11 | ln -s /opt/VBoxGuestAdditions-*/lib/VBoxGuestAdditions /usr/lib/VBoxGuestAdditions 12 | 13 | # Cleanup 14 | rm -rf VBoxGuestAdditions_*.iso VBoxGuestAdditions_*.iso.? 15 | rm -rf /usr/src/virtualbox-ose-guest* 16 | rm -rf /usr/src/vboxguest* 17 | -------------------------------------------------------------------------------- /services/kube-ui/kube-ui-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: kube-ui-v1 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-ui 8 | version: v1 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | replicas: 1 12 | selector: 13 | k8s-app: kube-ui 14 | version: v1 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: kube-ui 19 | version: v1 20 | kubernetes.io/cluster-service: "true" 21 | spec: 22 | containers: 23 | - name: kube-ui 24 | image: gcr.io/google_containers/kube-ui:v1.1 25 | resources: 26 | limits: 27 | cpu: 100m 28 | memory: 50Mi 29 | ports: 30 | - containerPort: 8080 31 | livenessProbe: 32 | httpGet: 33 | path: / 34 | port: 8080 35 | initialDelaySeconds: 30 36 | timeoutSeconds: 5 37 | -------------------------------------------------------------------------------- /services/kube-ui/kube-ui-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-ui 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-ui 8 | kubernetes.io/cluster-service: "true" 9 | kubernetes.io/name: "KubeUI" 10 | spec: 11 | selector: 12 | k8s-app: kube-ui 13 | ports: 14 | - port: 80 15 | targetPort: 8080 16 | -------------------------------------------------------------------------------- /services/kubedash/kube-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: "v1" 3 | kind: "ReplicationController" 4 | metadata: 5 | labels: 6 | name: "kubedash" 7 | name: "kubedash" 8 | namespace: "kube-system" 9 | spec: 10 | replicas: 1 11 | selector: 12 | name: "kubedash" 13 | template: 14 | metadata: 15 | labels: 16 | name: "kubedash" 17 | spec: 18 | containers: 19 | - 20 | image: "gcr.io/google_containers/kubedash:v0.2.1" 21 | name: "kubedash" 22 | command: 23 | - "/kubedash" 24 | resources: 25 | limits: 26 | cpu: 50m 27 | memory: 100Mi 28 | volumeMounts: 29 | - 30 | name: "ssl-certs" 31 | mountPath: "/etc/ssl/certs" 32 | readOnly: true 33 | volumes: 34 | - 35 | name: "ssl-certs" 36 | hostPath: 37 | path: "/etc/ssl/certs" 38 | 39 | --- 40 | apiVersion: "v1" 41 | kind: "Service" 42 | metadata: 43 | labels: 44 | name: "kubedash" 45 | name: "kubedash" 46 | namespace: "kube-system" 47 | spec: 48 | type: "LoadBalancer" 49 | ports: 50 | - 51 | port: 80 52 | targetPort: 8289 53 | selector: 54 | name: "kubedash" 55 | -------------------------------------------------------------------------------- /services/logging/es-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: elasticsearch-logging-v1 5 | namespace: kube-system 6 | labels: 7 | k8s-app: elasticsearch-logging 8 | version: v1 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | replicas: 2 12 | selector: 13 | k8s-app: elasticsearch-logging 14 | version: v1 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: elasticsearch-logging 19 | version: v1 20 | kubernetes.io/cluster-service: "true" 21 | spec: 22 | containers: 23 | - image: gcr.io/google_containers/elasticsearch:1.7 24 | name: elasticsearch-logging 25 | resources: 26 | limits: 27 | cpu: 100m 28 | ports: 29 | - containerPort: 9200 30 | name: db 31 | protocol: TCP 32 | - containerPort: 9300 33 | name: transport 34 | protocol: TCP 35 | volumeMounts: 36 | - name: es-persistent-storage 37 | mountPath: /data 38 | volumes: 39 | - name: es-persistent-storage 40 | emptyDir: {} 41 | -------------------------------------------------------------------------------- /services/logging/es-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: elasticsearch-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: elasticsearch-logging 8 | kubernetes.io/cluster-service: "true" 9 | kubernetes.io/name: "Elasticsearch" 10 | spec: 11 | ports: 12 | - port: 9200 13 | protocol: TCP 14 | targetPort: db 15 | selector: 16 | k8s-app: elasticsearch-logging 17 | -------------------------------------------------------------------------------- /services/logging/kibana-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: kibana-logging-v1 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kibana-logging 8 | version: v1 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | replicas: 1 12 | selector: 13 | k8s-app: kibana-logging 14 | version: v1 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: kibana-logging 19 | version: v1 20 | kubernetes.io/cluster-service: "true" 21 | spec: 22 | containers: 23 | - name: kibana-logging 24 | image: gcr.io/google_containers/kibana:1.3 25 | resources: 26 | limits: 27 | cpu: 100m 28 | env: 29 | - name: "ELASTICSEARCH_URL" 30 | value: "http://elasticsearch-logging:9200" 31 | ports: 32 | - containerPort: 5601 33 | name: ui 34 | protocol: TCP 35 | -------------------------------------------------------------------------------- /services/logging/kibana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kibana-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kibana-logging 8 | kubernetes.io/cluster-service: "true" 9 | kubernetes.io/name: "Kibana" 10 | spec: 11 | ports: 12 | - port: 5601 13 | protocol: TCP 14 | targetPort: ui 15 | selector: 16 | k8s-app: kibana-logging 17 | -------------------------------------------------------------------------------- /services/monitoring/grafana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: monitoring-grafana 5 | namespace: kube-system 6 | labels: 7 | kubernetes.io/cluster-service: "true" 8 | kubernetes.io/name: "Grafana" 9 | spec: 10 | ports: 11 | - port: 80 12 | targetPort: 8080 13 | selector: 14 | k8s-app: influxGrafana 15 | 16 | -------------------------------------------------------------------------------- /services/monitoring/heapster-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: monitoring-heapster-v6 5 | namespace: kube-system 6 | labels: 7 | k8s-app: heapster 8 | version: v6 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | replicas: 1 12 | selector: 13 | k8s-app: heapster 14 | version: v6 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: heapster 19 | version: v6 20 | kubernetes.io/cluster-service: "true" 21 | spec: 22 | containers: 23 | - image: gcr.io/google_containers/heapster:v0.16.1 24 | name: heapster 25 | resources: 26 | limits: 27 | cpu: 100m 28 | memory: 300Mi 29 | command: 30 | - /heapster 31 | - --source=kubernetes:'' 32 | - --sink=influxdb:http://monitoring-influxdb:8086 33 | -------------------------------------------------------------------------------- /services/monitoring/heapster-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: monitoring-heapster 5 | namespace: kube-system 6 | labels: 7 | kubernetes.io/cluster-service: "true" 8 | kubernetes.io/name: "Heapster" 9 | spec: 10 | ports: 11 | - port: 80 12 | targetPort: 8082 13 | selector: 14 | k8s-app: heapster 15 | -------------------------------------------------------------------------------- /services/monitoring/influxdb-grafana-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: monitoring-influx-grafana-v1 5 | namespace: kube-system 6 | labels: 7 | k8s-app: influxGrafana 8 | version: v1 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | replicas: 1 12 | selector: 13 | k8s-app: influxGrafana 14 | version: v1 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: influxGrafana 19 | version: v1 20 | kubernetes.io/cluster-service: "true" 21 | spec: 22 | containers: 23 | - image: gcr.io/google_containers/heapster_influxdb:v0.3 24 | name: influxdb 25 | resources: 26 | limits: 27 | cpu: 100m 28 | memory: 200Mi 29 | ports: 30 | - containerPort: 8083 31 | hostPort: 8083 32 | - containerPort: 8086 33 | hostPort: 8086 34 | volumeMounts: 35 | - name: influxdb-persistent-storage 36 | mountPath: /data 37 | - image: gcr.io/google_containers/heapster_grafana:v0.7 38 | name: grafana 39 | resources: 40 | limits: 41 | cpu: 100m 42 | memory: 100Mi 43 | env: 44 | - name: INFLUXDB_EXTERNAL_URL 45 | value: /api/v1/proxy/namespaces/kube-system/services/monitoring-influxdb:api/db/ 46 | - name: INFLUXDB_HOST 47 | value: monitoring-influxdb 48 | - name: INFLUXDB_PORT 49 | value: "8086" 50 | volumes: 51 | - name: influxdb-persistent-storage 52 | emptyDir: {} 53 | 54 | -------------------------------------------------------------------------------- /services/monitoring/influxdb-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: monitoring-influxdb 5 | namespace: kube-system 6 | labels: 7 | kubernetes.io/cluster-service: "true" 8 | kubernetes.io/name: "InfluxDB" 9 | spec: 10 | ports: 11 | - name: http 12 | port: 8083 13 | targetPort: 8083 14 | - name: api 15 | port: 8086 16 | targetPort: 8086 17 | selector: 18 | k8s-app: influxGrafana 19 | 20 | -------------------------------------------------------------------------------- /services/namespaces/namespace-admin.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Namespace", 3 | "apiVersion": "v1", 4 | "metadata": { 5 | "name": "kube-system", 6 | "labels": { 7 | "name": "kube-system" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /services/namespaces/namespace-dev.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Namespace", 3 | "apiVersion": "v1", 4 | "metadata": { 5 | "name": "development", 6 | "labels": { 7 | "name": "development" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /services/namespaces/namespace-prod.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Namespace", 3 | "apiVersion": "v1", 4 | "metadata": { 5 | "name": "production", 6 | "labels": { 7 | "name": "production" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /terraform/digitalocean/README.md: -------------------------------------------------------------------------------- 1 | # Terraform templates for DigitalOcean 2 | 3 | This project contains [Terraform][] templates to help you deploy [hyperion-k8s][] on [DigitalOcean][]. 4 | 5 | ## Prerequisites 6 | 7 | * A DigitalOcean account 8 | * A DigitalOcean API Token 9 | * A DigitalOcean Password-less SSH Key 10 | * A DigitalOcean Region supporting private networking (all regions except sfo1) 11 | 12 | ## Configure 13 | 14 | The available variables that can be configured are: 15 | 16 | * **do_token**: Digital Ocean API token 17 | * **pub_key**: SSH public key id. Key ID of your uploaded SSH key. 18 | * **pvt_key**: Path to the SSH private key file 19 | * **ssh_fingerprint**: fingerprint of the SSH public key 20 | 21 | Copy and renamed *terraform.tfvars.example* to *terraform.tfvars*. 22 | Follow the instructions in the comments of the terraform.tfvars.example and 23 | variables.tf file. 24 | 25 | ## Deploy 26 | 27 | Deploy your cluster 28 | 29 | $ terraform apply --var-file=terraform.tfvars 30 | 31 | ## Destroy 32 | 33 | Destroy the cluster : 34 | 35 | $ terraform destroy --var-file=terraform.tfvars 36 | 37 | ## Updating 38 | 39 | 40 | 41 | [Terraform]: https://www.terraform.io/ 42 | [DigitalOcean]: https://www.digitalocean.com/ 43 | 44 | [hyperion-k8s]: http://github.com/portefaix/hyperion-k8s 45 | -------------------------------------------------------------------------------- /terraform/digitalocean/hyperion.tf: -------------------------------------------------------------------------------- 1 | resource "digitalocean_ssh_key" "hyperion-ssh-key" { 2 | name = "hyperion-ssh-key" 3 | public_key = "${file("${var.do_pub_key}")}" 4 | } 5 | 6 | resource "digitalocean_droplet" "hyperion-master" { 7 | name = "hyperion-master" 8 | region = "${var.do_region}" 9 | image = "${var.do_image}" 10 | size = "${var.do_size_master}" 11 | private_networking = true 12 | ssh_keys = ["${var.do_ssh_fingerprint}"] 13 | depends_on = [ "digitalocean_ssh_key.hyperion-ssh-key" ] 14 | 15 | connection { 16 | key_file = "${var.do_pvt_key}" 17 | agent = false 18 | } 19 | 20 | provisioner "remote-exec" { 21 | inline = [ 22 | "sudo apt-get update", 23 | "sudo apt-get install -y python2.7" 24 | ] 25 | } 26 | } 27 | 28 | resource "digitalocean_droplet" "hyperion-nodes" { 29 | count = "${var.hyperion_nb_nodes}" 30 | name = "hyperion-node-${count.index}" // => `hyperion-node-{0,1}` 31 | region = "${var.do_region}" 32 | image = "${var.do_image}" 33 | size = "${var.do_size_node}" 34 | private_networking = true 35 | ssh_keys = ["${var.do_ssh_fingerprint}"] 36 | depends_on = [ "digitalocean_ssh_key.hyperion-ssh-key" ] 37 | 38 | connection { 39 | key_file = "${var.do_pvt_key}" 40 | agent = false 41 | } 42 | 43 | provisioner "remote-exec" { 44 | inline = [ 45 | "sudo apt-get update", 46 | # "sudo apt-get -y upgrade", 47 | "sudo apt-get install -y python2.7" 48 | ] 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /terraform/digitalocean/outputs.tf: -------------------------------------------------------------------------------- 1 | output "hyperion_master" { 2 | value = "${digitalocean_droplet.hyperion-master.ipv4_address}" 3 | } 4 | 5 | output "hyperion_nodes" { 6 | value = "${join(" - ", digitalocean_droplet.hyperion-nodes.*.ipv4_address)}" 7 | } 8 | -------------------------------------------------------------------------------- /terraform/digitalocean/provider.tf: -------------------------------------------------------------------------------- 1 | provider "digitalocean" { 2 | token = "${var.do_token}" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/digitalocean/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | # 2 | # Hyperion Digital Ocean setup 3 | # 4 | 5 | do_token = "xxxxx" 6 | 7 | # Get keys using the following command 8 | # curl -X GET "https://api.digitalocean.com/v2/account/keys" -H "Authorization: Bearer $DO_TOKEN" 9 | 10 | # awk '{print $2}' ~/.ssh/id_rsa.pub | base64 -d | md5sum -b | sed 's/../&:/g; s/: .*$//' 11 | do_ssh_fingerprint = "xxxx" 12 | 13 | # Public key 14 | do_pub_key = "/home/user/.ssh/id_rsa.pub" 15 | 16 | # Private key for connecting via SSH 17 | do_pvt_key = "/home/user/.ssh/id_rsa" 18 | -------------------------------------------------------------------------------- /terraform/digitalocean/variables.tf: -------------------------------------------------------------------------------- 1 | variable "hyperion_nb_nodes" { 2 | description = "The number of nodes." 3 | default = "2" 4 | } 5 | 6 | variable "do_token" { 7 | description = "Digital Ocean API token." 8 | } 9 | 10 | variable "do_pub_key" { 11 | description = "SSH public key id." 12 | } 13 | 14 | variable "do_pvt_key" { 15 | description = "Path to the SSH private key file." 16 | } 17 | 18 | variable "do_ssh_fingerprint" { 19 | description = "Fingerprint of the SSH public key file." 20 | } 21 | 22 | variable "do_region" { 23 | description = "The DO region to operate under." 24 | default = "nyc2" 25 | } 26 | 27 | variable "do_image" { 28 | description = "The droplet image ID or slug to base the launched instances." 29 | default = "ubuntu-15-04-x64" 30 | } 31 | 32 | variable "do_size_master" { 33 | description = "The DO size to use for the Hyperion master instance." 34 | default = "512mb" 35 | } 36 | 37 | variable "do_size_node" { 38 | description = "The DO size to use for the Hyperion node instance." 39 | default = "512mb" 40 | } 41 | -------------------------------------------------------------------------------- /terraform/ec2/README.md: -------------------------------------------------------------------------------- 1 | # Terraform templates for Amazon Web Services 2 | 3 | This project contains [Terraform][] templates to help you deploy [hyperion-k8s][] on [AWS][]. 4 | 5 | ## Prerequisites 6 | 7 | * An [Amazon Web Services account](http://aws.amazon.com/) 8 | * An [AWS Access and Secret Access Keys](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html) 9 | * An [AWS EC2 Key Pairs](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) 10 | 11 | 12 | ## Configure 13 | 14 | The available variables that can be configured are: 15 | 16 | * `aws_access_key`: AWS access key 17 | * `aws_secret_key`: AWS secret key 18 | * `aws_key_name`: The SSH key name to use for the instances 19 | * `aws_ssh_private_key_file`: Path to the SSH private key file 20 | * `aws_ssh_user`: SSH user (default `admin`) 21 | * `aws_region`: AWS region (default `eu-west-1`) 22 | * `aws_vpc_cidr_block`: The IPv4 address range that machines in the network are assigned to, represented as a CIDR block (default `10.0.0.0/16`) 23 | * `aws_subnet_cidr_block`: The IPv4 address range that machines in the network are assigned to, represented as a CIDR block (default `10.0.1.0/24`) 24 | * `aws_image`: The name of the image to base the launched instances (default `Debian Jessie 64bit hvm ami`) 25 | * `aws_instance_type_master`: The machine type to use for the Hyperion master instance (default `m3.medium`) 26 | * `aws_instance_type_node`: The machine type to use for the Hyperion nodes instances (default `m3.medium`) 27 | * `hyperion_nb_nodes`: The number of Hyperino nodes to launch (default `2`) 28 | 29 | Copy and renamed *terraform.tfvars.example* to *terraform.tfvars*. 30 | 31 | ## Deploy 32 | 33 | Deploy your cluster 34 | 35 | $ terraform apply --var-file=terraform.tfvars 36 | 37 | ## Destroy 38 | 39 | Destroy the cluster : 40 | 41 | $ terraform destroy --var-file=terraform.tfvars 42 | 43 | ## Updating 44 | 45 | 46 | 47 | [Terraform]: https://www.terraform.io/ 48 | [AWS]: https://aws.amazon.com/ 49 | 50 | [hyperion-k8s]: http://github.com/portefaix/hyperion-k8s 51 | -------------------------------------------------------------------------------- /terraform/ec2/hyperion.tf: -------------------------------------------------------------------------------- 1 | resource "aws_key_pair" "deployer" { 2 | key_name = "${var.aws_key_name}" 3 | public_key = "${file("${var.aws_ssh_public_key}")}" 4 | } 5 | 6 | resource "aws_vpc" "hyperion-network" { 7 | cidr_block = "${var.aws_vpc_cidr_block}" 8 | enable_dns_support = true 9 | enable_dns_hostnames = true 10 | tags { 11 | Name = "hyperion" 12 | } 13 | } 14 | 15 | resource "aws_subnet" "hyperion-network" { 16 | vpc_id = "${aws_vpc.hyperion-network.id}" 17 | cidr_block = "${var.aws_subnet_cidr_block}" 18 | map_public_ip_on_launch = true 19 | tags { 20 | Name = "hyperion" 21 | } 22 | } 23 | 24 | resource "aws_internet_gateway" "hyperion-network" { 25 | vpc_id = "${aws_vpc.hyperion-network.id}" 26 | } 27 | 28 | resource "aws_route_table" "hyperion-network" { 29 | vpc_id = "${aws_vpc.hyperion-network.id}" 30 | route { 31 | cidr_block = "0.0.0.0/0" 32 | gateway_id = "${aws_internet_gateway.hyperion-network.id}" 33 | } 34 | } 35 | 36 | resource "aws_route_table_association" "hyperion-network" { 37 | subnet_id = "${aws_subnet.hyperion-network.id}" 38 | route_table_id = "${aws_route_table.hyperion-network.id}" 39 | } 40 | 41 | resource "aws_security_group" "hyperion-network" { 42 | name = "hyperion" 43 | description = "Hyperion security group" 44 | vpc_id = "${aws_vpc.hyperion-network.id}" 45 | ingress { 46 | protocol = "tcp" 47 | from_port = 1 48 | to_port = 65535 49 | cidr_blocks = ["0.0.0.0/0"] 50 | } 51 | ingress { 52 | protocol = "udp" 53 | from_port = 1 54 | to_port = 65535 55 | cidr_blocks = ["0.0.0.0/0"] 56 | } 57 | egress { 58 | protocol = "tcp" 59 | from_port = 1 60 | to_port = 65535 61 | cidr_blocks = ["0.0.0.0/0"] 62 | } 63 | egress { 64 | protocol = "udp" 65 | from_port = 1 66 | to_port = 65535 67 | cidr_blocks = ["0.0.0.0/0"] 68 | } 69 | tags { 70 | Name = "hyperion" 71 | } 72 | } 73 | 74 | resource "aws_eip" "ip" { 75 | instance = "${aws_instance.hyperion-master.id}" 76 | vpc = true 77 | connection { 78 | # host = "${aws_eip.ip.public_ip}" 79 | user = "${var.aws_ssh_user}" 80 | key_file = "${var.aws_ssh_private_key_file}" 81 | agent = false 82 | } 83 | } 84 | 85 | resource "aws_instance" "hyperion-master" { 86 | ami = "${var.aws_image}" 87 | instance_type = "${var.aws_instance_type_master}" 88 | key_name = "${var.aws_key_name}" 89 | subnet_id = "${aws_subnet.hyperion-network.id}" 90 | security_groups = [ 91 | "${aws_security_group.hyperion-network.id}", 92 | ] 93 | tags { 94 | Name = "hyperion-master" 95 | } 96 | 97 | connection { 98 | user = "${var.aws_ssh_user}" 99 | key_file = "${var.aws_ssh_private_key_file}" 100 | agent = false 101 | } 102 | 103 | provisioner "remote-exec" { 104 | inline = [ 105 | "sudo apt-get update", 106 | # "sudo apt-get -y upgrade", 107 | "sudo apt-get install -y python2.7" 108 | ] 109 | } 110 | 111 | } 112 | 113 | resource "aws_instance" "hyperion-nodes" { 114 | depends_on = ["aws_eip.ip"] 115 | count = "${var.hyperion_nb_nodes}" 116 | ami = "${var.aws_image}" 117 | instance_type = "${var.aws_instance_type_node}" 118 | key_name = "${var.aws_key_name}" 119 | subnet_id = "${aws_subnet.hyperion-network.id}" 120 | security_groups = [ 121 | "${aws_security_group.hyperion-network.id}", 122 | ] 123 | tags { 124 | Name = "hyperion-node-${count.index}" 125 | } 126 | 127 | connection { 128 | user = "${var.aws_ssh_user}" 129 | key_file = "${var.aws_ssh_private_key_file}" 130 | agent = false 131 | } 132 | 133 | provisioner "remote-exec" { 134 | inline = [ 135 | "sudo apt-get update", 136 | # "sudo apt-get -y upgrade", 137 | "sudo apt-get install -y python2.7" 138 | ] 139 | } 140 | 141 | } 142 | -------------------------------------------------------------------------------- /terraform/ec2/provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | access_key = "${var.aws_access_key}" 3 | secret_key = "${var.aws_secret_key}" 4 | region = "${var.aws_region}" 5 | } 6 | -------------------------------------------------------------------------------- /terraform/ec2/variables.tf: -------------------------------------------------------------------------------- 1 | variable "hyperion_nb_nodes" { 2 | description = "The number of nodes." 3 | default = "2" 4 | } 5 | 6 | variable "aws_access_key" { 7 | description = "AWS access key." 8 | } 9 | 10 | variable "aws_secret_key" { 11 | description = "AWS secret key." 12 | } 13 | 14 | variable "aws_region" { 15 | description = "AWS region." 16 | default = "eu-west-1" 17 | } 18 | 19 | variable "aws_key_name" { 20 | description = "The SSH key name to use for the instances." 21 | } 22 | 23 | variable "aws_ssh_public_key" { 24 | description = "Path to the SSH public key." 25 | } 26 | 27 | variable "aws_ssh_private_key_file" { 28 | description = "Path to the SSH private key file." 29 | } 30 | 31 | variable "aws_ssh_user" { 32 | description = "SSH user." 33 | default = "ubuntu" 34 | } 35 | 36 | variable "aws_vpc_cidr_block" { 37 | description = "The IPv4 address range that machines in the network are assigned to, represented as a CIDR block." 38 | default = "10.0.0.0/16" 39 | } 40 | 41 | variable "aws_subnet_cidr_block" { 42 | description = "The IPv4 address range that machines in the network are assigned to, represented as a CIDR block." 43 | default = "10.0.1.0/24" 44 | } 45 | 46 | variable "aws_image" { 47 | description = "The name of the image to base the launched instances." 48 | } 49 | 50 | variable "aws_instance_type_master" { 51 | description = "The machine type to use for the Hyperion master instance." 52 | default = "m3.medium" 53 | } 54 | 55 | variable "aws_instance_type_node" { 56 | description = "The machine type to use for the Hyperion nodes instances." 57 | default = "m3.medium" 58 | } 59 | -------------------------------------------------------------------------------- /terraform/etcd.env: -------------------------------------------------------------------------------- 1 | ETCD_OPTS='--listen-client-urls=http://0.0.0.0:4001 --data-dir=/etcd --advertise-client-urls=http://0.0.0.0:4001' 2 | 3 | ETCD_INITIAL_CLUSTER_TOKEN=${cluster_token} -------------------------------------------------------------------------------- /terraform/google/README.md: -------------------------------------------------------------------------------- 1 | # Terraform templates for Google Cloud 2 | 3 | This project contains [Terraform][] templates to help you deploy [hyperion-k8s][] on [Google cloud][]. 4 | 5 | ## Prerequisites 6 | 7 | * A Google Cloud account 8 | * A Google Compute Engine project 9 | * A Google Compute Engine account file 10 | * A Google Compute Engine Password-less SSH Key 11 | 12 | ## Configure 13 | 14 | The available variables that can be configured are: 15 | 16 | * **gce_account_file**: Path to the JSON file used to describe your account credentials, downloaded from Google Cloud Console 17 | * **gce_project**: The name of the project to apply any resources to 18 | * **gce_ssh_user**: SSH user 19 | * **pvt_key_file**: Path to the SSH private key file 20 | * **gce_region**: The region to operate under (default us-central1) 21 | * **gce_zone**: The zone that the machines should be created in (default us-central1-a) 22 | * **gce_ipv4_range**: The IPv4 address range that machines in the network are assigned to, represented as a CIDR block (default 10.0.0.0/16) 23 | * **gce_image**: The name of the image to base the launched instances (default ubuntu-1404-trusty-v20141212) 24 | 25 | Copy and renamed *terraform.tfvars.example* to *terraform.tfvars*. 26 | Follow the instructions in the comments of the terraform.tfvars.example and 27 | variables.tf file. 28 | 29 | ## Deploy 30 | 31 | Deploy your cluster 32 | 33 | $ terraform apply --var-file=terraform.tfvars 34 | 35 | ## Destroy 36 | 37 | Destroy the cluster : 38 | 39 | $ terraform destroy --var-file=terraform.tfvars 40 | 41 | ## Updating 42 | 43 | 44 | 45 | 46 | 47 | [Terraform]: https://www.terraform.io/ 48 | [Google cloud]: https://cloud.google.com 49 | 50 | [hyperion-k8s]: http://github.com/portefaix/hyperion-k8s 51 | -------------------------------------------------------------------------------- /terraform/google/hyperion.tf: -------------------------------------------------------------------------------- 1 | resource "template_file" "kubernetes" { 2 | template = "../kubernetes.env" 3 | vars { 4 | api_servers = "http://${var.cluster_name}-master.c.${var.gce_project}.internal:8080" 5 | etcd_servers = "http://${var.cluster_name}-master.c.${var.gce_project}.internal:4001" 6 | flannel_backend = "${var.flannel_backend}" 7 | flannel_network = "${var.flannel_network}" 8 | portal_net = "${var.portal_net}" 9 | } 10 | } 11 | 12 | resource "template_file" "etcd" { 13 | template = "../etcd.env" 14 | vars { 15 | cluster_token = "${var.cluster_name}" 16 | } 17 | } 18 | 19 | resource "google_compute_network" "hyperion-network" { 20 | name = "hyperion" 21 | ipv4_range = "${var.gce_ipv4_range}" 22 | } 23 | 24 | # Firewall 25 | resource "google_compute_firewall" "hyperion-firewall-external" { 26 | name = "hyperion-firewall-external" 27 | network = "${google_compute_network.hyperion-network.name}" 28 | source_ranges = ["0.0.0.0/0"] 29 | 30 | allow { 31 | protocol = "icmp" 32 | } 33 | 34 | allow { 35 | protocol = "tcp" 36 | ports = [ 37 | "22", # SSH 38 | "80", # HTTP 39 | "443", # HTTPS 40 | "6443", # Kubernetes secured server 41 | "8080", # Kubernetes unsecure server 42 | ] 43 | } 44 | 45 | } 46 | 47 | resource "google_compute_firewall" "hyperion-firewall-internal" { 48 | name = "hyperion-firewall-internal" 49 | network = "${google_compute_network.hyperion-network.name}" 50 | source_ranges = ["${google_compute_network.hyperion-network.ipv4_range}"] 51 | 52 | allow { 53 | protocol = "tcp" 54 | ports = ["1-65535"] 55 | } 56 | 57 | allow { 58 | protocol = "udp" 59 | ports = ["1-65535"] 60 | } 61 | } 62 | 63 | resource "google_compute_address" "hyperion-master" { 64 | name = "hyperion-master" 65 | } 66 | 67 | resource "google_compute_instance" "hyperion-master" { 68 | zone = "${var.gce_zone}" 69 | name = "${var.cluster_name}-master" 70 | description = "Kubernetes master" 71 | machine_type = "${var.gce_machine_type_master}" 72 | 73 | disk { 74 | image = "${var.gce_image}" 75 | auto_delete = true 76 | } 77 | metadata { 78 | sshKeys = "${var.gce_ssh_user}:${file("${var.gce_ssh_public_key}")}" 79 | } 80 | network_interface { 81 | network = "${google_compute_network.hyperion-network.name}" 82 | access_config { 83 | nat_ip = "${google_compute_address.hyperion-master.address}" 84 | } 85 | } 86 | connection { 87 | user = "${var.gce_ssh_user}" 88 | key_file = "${var.gce_ssh_private_key_file}" 89 | agent = false 90 | } 91 | provisioner "remote-exec" { 92 | inline = [ 93 | "sudo cat <<'EOF' > /tmp/kubernetes.env\n${template_file.kubernetes.rendered}\nEOF", 94 | "sudo cat <<'EOF' > /tmp/etcd.env\n${template_file.etcd.rendered}\nEOF", 95 | "sudo mkdir -p /etc/kubernetes", 96 | "sudo mv /tmp/kubernetes.env /etc/kubernetes.env", 97 | "sudo mv /tmp/etcd.env /etc/etcd.env", 98 | "echo 'ETCD_NAME=${self.name}' >> /etc/etcd.env", 99 | "sudo systemctl enable etcd", 100 | "sudo systemctl enable flannel", 101 | "sudo systemctl enable docker", 102 | "sudo systemctl enable kube-apiserver", 103 | "sudo systemctl enable kube-controller-manager", 104 | "sudo systemctl enable kube-scheduler", 105 | "sudo systemctl start etcd", 106 | "sudo systemctl start flannel", 107 | "sudo systemctl start docker", 108 | "sudo systemctl start kube-apiserver", 109 | "sudo systemctl start kube-controller-manager", 110 | "sudo systemctl start kube-scheduler" 111 | ] 112 | } 113 | depends_on = [ 114 | "template_file.kubernetes", 115 | ] 116 | } 117 | 118 | resource "google_compute_instance" "hyperion-nodes" { 119 | count = "${var.hyperion_nb_nodes}" 120 | zone = "${var.gce_zone}" 121 | name = "${var.cluster_name}-node-${count.index}" // => `xxx-node-{0,1,2}` 122 | description = "Kubernetes node ${count.index}" 123 | machine_type = "${var.gce_machine_type_node}" 124 | 125 | disk { 126 | image = "${var.gce_image}" 127 | auto_delete = true 128 | } 129 | metadata { 130 | sshKeys = "${var.gce_ssh_user}:${file("${var.gce_ssh_public_key}")}" 131 | } 132 | network_interface { 133 | network = "${google_compute_network.hyperion-network.name}" 134 | access_config { 135 | // ephemeral ip 136 | } 137 | } 138 | connection { 139 | user = "${var.gce_ssh_user}" 140 | key_file = "${var.gce_ssh_private_key_file}" 141 | agent = false 142 | } 143 | provisioner "remote-exec" { 144 | inline = [ 145 | "sudo cat <<'EOF' > /tmp/kubernetes.env\n${template_file.kubernetes.rendered}\nEOF", 146 | "sudo mkdir -p /etc/kubernetes", 147 | "sudo mv /tmp/kubernetes.env /etc/kubernetes.env", 148 | "sudo systemctl enable flannel", 149 | "sudo systemctl enable docker", 150 | "sudo systemctl enable kube-kubelet", 151 | "sudo systemctl enable kube-proxy", 152 | "sudo systemctl start flannel", 153 | "sudo systemctl start docker", 154 | "sudo systemctl start kube-kubelet", 155 | "sudo systemctl start kube-proxy" 156 | ] 157 | } 158 | depends_on = [ 159 | "template_file.kubernetes", 160 | ] 161 | } 162 | -------------------------------------------------------------------------------- /terraform/google/output.tf: -------------------------------------------------------------------------------- 1 | output "kubernetes-api-server" { 2 | value = "https://${google_compute_instance.hyperion-master.network_interface.0.access_config.0.nat_ip}:8080" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/google/provider.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | account_file = "" 3 | credentials = "${file("${var.gce_credentials}")}" 4 | project = "${var.gce_project}" 5 | region = "${var.gce_region}" 6 | } 7 | -------------------------------------------------------------------------------- /terraform/google/variables.tf: -------------------------------------------------------------------------------- 1 | variable "hyperion_nb_nodes" { 2 | description = "The number of nodes." 3 | default = "2" 4 | } 5 | 6 | variable "gce_credentials" { 7 | description = "Path to the JSON file used to describe your account credentials, downloaded from Google Cloud Console." 8 | } 9 | 10 | variable "gce_project" { 11 | description = "The name of the project to apply any resources to." 12 | } 13 | 14 | variable "gce_ssh_user" { 15 | description = "SSH user." 16 | } 17 | 18 | variable "gce_ssh_public_key" { 19 | description = "Path to the ssh key to use" 20 | } 21 | 22 | variable "gce_ssh_private_key_file" { 23 | description = "Path to the SSH private key file." 24 | } 25 | 26 | variable "gce_region" { 27 | description = "The region to operate under." 28 | default = "us-central1" 29 | } 30 | 31 | variable "gce_zone" { 32 | description = "The zone that the machines should be created in." 33 | default = "us-central1-a" 34 | } 35 | 36 | variable "gce_ipv4_range" { 37 | description = "The IPv4 address range that machines in the network are assigned to, represented as a CIDR block." 38 | default = "10.0.0.0/16" 39 | } 40 | 41 | variable "gce_image" { 42 | description = "The name of the image to base the launched instances." 43 | default = "hyperion-0-9-4-v20151224" 44 | } 45 | 46 | variable "gce_machine_type_master" { 47 | description = "The machine type to use for the hyperion master ." 48 | default = "n1-standard-1" 49 | } 50 | 51 | variable "gce_machine_type_node" { 52 | description = "The machine type to use for the hyperion nodes ." 53 | default = "n1-standard-1" 54 | } 55 | 56 | variable "cluster_name" { 57 | default = "portefaix" 58 | } 59 | 60 | variable "flannel_backend" { 61 | default = "vxlan" 62 | } 63 | 64 | variable "flannel_network" { 65 | default = "10.10.0.0/16" 66 | } 67 | 68 | variable "portal_net" { 69 | default = "10.200.0.0/16" 70 | } 71 | -------------------------------------------------------------------------------- /terraform/kubernetes.env: -------------------------------------------------------------------------------- 1 | DOCKER_OPTS="daemon \ 2 | --host=tcp://0.0.0.0:2375 \ 3 | --host=unix:///var/run/docker.sock \ 4 | --ip-masq=false" 5 | 6 | FLANNEL_ETCD_PREFIX="/coreos.com/network/config" 7 | 8 | FLANNEL_NETWORK_CONFIG='{"Network":"${flannel_network}","Backend":{"Type":"${flannel_backend}"}}' 9 | 10 | FLANNEL_OPTS="--ip-masq --etcd-endpoints=${etcd_servers}" 11 | 12 | FLANNEL_ETCDCTL_OPTS="-C ${etcd_servers}" 13 | 14 | KUBE_APISERVER_OPTS="--insecure-bind-address=0.0.0.0 \ 15 | --insecure-port=8080 \ 16 | --etcd_servers=${etcd_servers} \ 17 | --logtostderr=true \ 18 | --service-cluster-ip-range=${portal_net} \ 19 | --v=2" 20 | 21 | KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \ 22 | --master=${api_servers} \ 23 | --v=2" 24 | 25 | KUBE_KUBELET_OPTS="--address=0.0.0.0 \ 26 | --api_servers=${api_servers} \ 27 | --enable_server \ 28 | --logtostderr=true \ 29 | --port=10250 \ 30 | --v=2" 31 | 32 | KUBE_PROXY_OPTS="--logtostderr=true \ 33 | --master=${api_servers} \ 34 | --v=2" 35 | 36 | KUBE_SCHEDULER_OPTS="--logtostderr=true \ 37 | --master=${api_servers} \ 38 | --v=2" 39 | -------------------------------------------------------------------------------- /terraform/openstack/README.md: -------------------------------------------------------------------------------- 1 | # Terraform templates for Openstack 2 | 3 | This project contains [Terraform][] templates to help you deploy [hyperion-k8s][] on [Openstack][]. 4 | 5 | ## Prerequisites 6 | 7 | * An Openstack Cloud 8 | * An Account, project and relevant access on your openstack cloud. 9 | 10 | ## Configure 11 | 12 | The available variables that can be configured are: 13 | 14 | * **openstack_access_key**: Openstack username. 15 | * **openstack_secret_key**: Openstack Password. 16 | * **openstack_tenant_name**: The Tenant/Project name in Openstack. 17 | * **openstack_key_name**: The name given to the SSH key which will be uploaded for use by the instances. 18 | * **pub_key**: The actual contents of rsa_id.pub to upload as the public key. 19 | * **pvt_key**: Path to the SSH private key file (Stays local. Used for provisioning.) 20 | * **openstack_ssh_user**: SSH user (default ubuntu) 21 | * **openstack_keystone_uri**: The Keystone API URL 22 | 23 | Setup your informations : 24 | 25 | $ export OPENSTACK_ACCESS_KEY="xxxx" 26 | $ export OPENSTACK_SECRET_KEY="xxxx" 27 | $ ... 28 | 29 | ## Deploy 30 | 31 | Deploy your cluster 32 | 33 | $ terraform apply \ 34 | -var "pub_key=$HOME/.ssh/id_rsa.pub" \ 35 | -var "pvt_key=$HOME/.ssh/id_rsa" \ 36 | -var "openstack_access_key=$(OPENSTACK_ACCESS_KEY)" 37 | -var "openstack_secret_key=$(OPENSTACK_SECRET_KEY)" 38 | [...] 39 | 40 | 41 | ## Destroy 42 | 43 | Destroy the cluster : 44 | 45 | $ terraform destroy \ 46 | -var "openstack_access_key=$(OPENSTACK_ACCESS_KEY)" 47 | -var "openstack_secret_key=$(OPENSTACK_SECRET_KEY)" 48 | [...] 49 | 50 | ## Updating 51 | 52 | 53 | 54 | 55 | [Terraform]: https://www.terraform.io/ 56 | [Openstack]: https://www.openstack.org 57 | 58 | [hyperion]: http://github.com/portefaix/hyperion-k8s 59 | -------------------------------------------------------------------------------- /terraform/openstack/hyperion.tf: -------------------------------------------------------------------------------- 1 | resource "openstack_compute_keypair_v2" "hyperion-key" { 2 | name = "${var.openstack_key_name}" 3 | region = "${var.openstack_region}" 4 | public_key = "${file("${var.openstack_public_key}")}" 5 | } 6 | 7 | resource "openstack_compute_secgroup_v2" "hyperion-sg" { 8 | region = "${var.openstack_region}" 9 | name = "hyperion-sg" 10 | description = "Security Group for Hyperion" 11 | rule { 12 | from_port = 22 13 | to_port = 22 14 | ip_protocol = "tcp" 15 | cidr = "0.0.0.0/0" 16 | } 17 | rule { 18 | from_port = 1 19 | to_port = 65535 20 | ip_protocol = "tcp" 21 | cidr = "0.0.0.0/0" 22 | } 23 | rule { 24 | from_port = 1 25 | to_port = 65535 26 | ip_protocol = "udp" 27 | cidr = "0.0.0.0/0" 28 | } 29 | rule { 30 | from_port = 1 31 | to_port = 65535 32 | ip_protocol = "tcp" 33 | self = true 34 | } 35 | rule { 36 | from_port = 1 37 | to_port = 65535 38 | ip_protocol = "udp" 39 | self = true 40 | } 41 | rule { 42 | from_port = 1 43 | to_port = 1 44 | ip_protocol = "icmp" 45 | self = true 46 | } 47 | } 48 | 49 | resource "openstack_networking_network_v2" "hyperion-network" { 50 | region = "${var.openstack_region}" 51 | name = "hyperion-network" 52 | admin_state_up = "true" 53 | } 54 | 55 | resource "openstack_networking_subnet_v2" "hyperion-network" { 56 | region = "${var.openstack_region}" 57 | network_id = "${openstack_networking_network_v2.hyperion-network.id}" 58 | cidr = "${var.openstack_subnet_cidr_block}" 59 | ip_version = 4 60 | } 61 | 62 | # resource "openstack_networking_router_v2" "hyperion-network" { 63 | # region = "${var.openstack_region}" 64 | # name = "hyperion-network" 65 | # admin_state_up = "true" 66 | # external_gateway = "${var.openstack_neutron_router_gateway_network_id}" 67 | # } 68 | 69 | # resource "openstack_networking_router_interface_v2" "hyperion-network" { 70 | # region = "${var.openstack_region}" 71 | # router_id = "${openstack_networking_router_v2.hyperion-network.id}" 72 | # subnet_id = "${openstack_networking_subnet_v2.hyperion-network.id}" 73 | # } 74 | 75 | resource "openstack_compute_floatingip_v2" "fip-master" { 76 | region = "${var.openstack_region}" 77 | pool = "${var.openstack_floating_ip_pool_name}" 78 | } 79 | 80 | resource "openstack_compute_floatingip_v2" "fip-nodes" { 81 | count = "${var.hyperion_nb_nodes}" 82 | region = "${var.openstack_region}" 83 | pool = "${var.openstack_floating_ip_pool_name}" 84 | } 85 | 86 | resource "openstack_compute_instance_v2" "hyperion-master" { 87 | region = "${var.openstack_region}" 88 | name = "hyperion-master" 89 | image_id = "${var.openstack_image_id}" 90 | flavor_name = "${var.openstack_instance_type_master}" 91 | key_pair = "${var.openstack_key_name}" 92 | security_groups = ["${openstack_compute_secgroup_v2.hyperion-sg.name}"] 93 | network { 94 | uuid = "${openstack_networking_network_v2.hyperion-network.id}" 95 | } 96 | floating_ip = "${openstack_compute_floatingip_v2.fip-master.address}" 97 | } 98 | 99 | resource "openstack_compute_instance_v2" "hyperion-nodes" { 100 | count = "${var.hyperion_nb_nodes}" 101 | region = "${var.openstack_region}" 102 | name = "hyperion-node-${count.index}" // => `hyperion-node-{0,1,2}` 103 | image_id = "${var.openstack_image_id}" 104 | flavor_name = "${var.openstack_instance_type_node}" 105 | key_pair = "${var.openstack_key_name}" 106 | security_groups = ["${openstack_compute_secgroup_v2.hyperion-sg.name}"] 107 | network { 108 | uuid = "${openstack_networking_network_v2.hyperion-network.id}" 109 | } 110 | floating_ip = "${element(openstack_compute_floatingip_v2.fip-nodes.*.address, count.index)}" 111 | } 112 | -------------------------------------------------------------------------------- /terraform/openstack/provider.tf: -------------------------------------------------------------------------------- 1 | 2 | provider "openstack" { 3 | user_name = "${var.openstack_access_key}" 4 | tenant_name = "${var.openstack_tenant_name}" 5 | auth_url = "${var.openstack_keystone_uri}" 6 | password = "${var.openstack_secret_key}" 7 | } 8 | -------------------------------------------------------------------------------- /terraform/openstack/variables.tf: -------------------------------------------------------------------------------- 1 | variable "hyperion_nb_nodes" { 2 | description = "The number of nodes." 3 | default = "2" 4 | } 5 | 6 | variable "openstack_access_key" { 7 | description = "Openstack username" 8 | } 9 | 10 | variable "openstack_secret_key" { 11 | description = "Openstack Password" 12 | } 13 | 14 | variable "openstack_tenant_name" { 15 | description = "Openstack project / tenant name" 16 | } 17 | 18 | variable "openstack_keystone_uri" { 19 | description = "Openstack Keystone API URL" 20 | } 21 | 22 | variable "openstack_region" { 23 | description = "Openstack region" 24 | default = "" 25 | } 26 | 27 | variable "openstack_key_name" { 28 | description = "The SSH key name to use for the instances." 29 | } 30 | 31 | variable "openstack_public_key" { 32 | description = "The SSH public key to upload as openstack_key_name." 33 | } 34 | 35 | variable "openstack_ssh_private_key_file" { 36 | description = "Path to the SSH private key file." 37 | } 38 | 39 | variable "openstack_ssh_user" { 40 | description = "SSH user." 41 | default = "ubuntu" 42 | } 43 | 44 | variable "openstack_subnet_cidr_block" { 45 | description = "The IPv4 address range that machines in the network are assigned to, represented as a CIDR block." 46 | default = "10.0.1.0/24" 47 | } 48 | 49 | # variable "openstack_neutron_router_gateway_network_id" { 50 | # description = "The UUID of the network that will be used as WAN breakout for the neutron L3 Router" 51 | # } 52 | 53 | variable "openstack_floating_ip_pool_name" { 54 | description = "The name of the IP pool that floating IP's will be requested from." 55 | default = "public" 56 | } 57 | 58 | variable "openstack_image_id" { 59 | description = "The ID the image to base the launched instances." 60 | } 61 | 62 | variable "openstack_instance_type_master" { 63 | description = "The machine type to use for the Hyperion master instance." 64 | } 65 | variable "openstack_instance_type_node" { 66 | description = "The machine type to use for the Hyperion nodes instance." 67 | } 68 | --------------------------------------------------------------------------------