├── .gitignore ├── LICENSE ├── README.md ├── addons ├── skydns-rc.yaml └── skydns-svc.yaml ├── azure-login.js ├── cloud_config_templates ├── kubernetes-cluster-etcd-node-template.yml └── kubernetes-cluster-main-nodes-template.yml ├── create-kubernetes-cluster.js ├── destroy-cluster.js ├── expose_guestbook_app_port.sh ├── external_access.png ├── initial_cluster.png ├── lib ├── azure_wrapper.js ├── cloud_config.js ├── deployment_logic │ └── kubernetes.js └── util.js ├── output └── .gitignore ├── package.json └── scale-kubernetes-cluster.js /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | Copyright 2014-2016 Weaveworks Ltd. 179 | 180 | Licensed under the Apache License, Version 2.0 (the "License"); 181 | you may not use this file except in compliance with the License. 182 | You may obtain a copy of the License at 183 | 184 | http://www.apache.org/licenses/LICENSE-2.0 185 | 186 | Unless required by applicable law or agreed to in writing, software 187 | distributed under the License is distributed on an "AS IS" BASIS, 188 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 189 | See the License for the specific language governing permissions and 190 | limitations under the License. 191 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Legacy Weave Getting Started Guides 2 | 3 | **Note:** The guides in here haven't been updated in a while. 4 | They might or might not work for you. We are keeping them around for informational purposes. 5 | 6 | For up-to-date documentation around products and projects started by Weaveworks, please refer to: 7 | 8 | Weaveworks products: 9 | 10 | Open Source projects started at Weaveworks: 11 | 12 | - Cortex (CNCF): 13 | - Flagger (CNCF): 14 | - Flux (CNCF): 15 | - Grafanalib: 16 | - Ignite: 17 | - Weave GitOps: 18 | - wksctl: 19 | 20 | --- 21 | 22 | In this guide I will demonstrate how to deploy a Kubernetes cluster to Azure cloud. You will be using CoreOS with Weave, which implements simple and secure networking, in a transparent, yet robust way. The purpose of this guide is to provide an out-of-the-box implementation that can ultimately be taken into production with little change. It will demonstrate how to provision a dedicated Kubernetes master and etcd nodes, and show how to scale the cluster with ease. 23 | 24 | ### Prerequisites 25 | 26 | 1. You need an Azure account. 27 | 28 | ## Let's go! 29 | 30 | To get started, you need to checkout the code: 31 | 32 | ```shell 33 | https://github.com/weaveworks-guides/weave-kubernetes-coreos-azure 34 | cd weave-kubernetes-coreos-azure 35 | ``` 36 | 37 | You will need to have [Node.js installed](http://nodejs.org/download/) on you machine. If you have previously used Azure CLI, you should have it already. 38 | 39 | First, you need to install some of the dependencies with 40 | 41 | ```shell 42 | npm install 43 | ``` 44 | 45 | Now, all you need to do is: 46 | 47 | ```shell 48 | ./azure-login.js -u 49 | ./create-kubernetes-cluster.js 50 | ``` 51 | 52 | This script will provision a cluster suitable for production use, where there is a ring of 3 dedicated etcd nodes: 1 kubernetes master and 2 kubernetes nodes. The `kube-00` VM will be the master, your work loads are only to be deployed on the nodes, `kube-01` and `kube-02`. Initially, all VMs are single-core, to ensure a user of the free tier can reproduce it without paying extra. I will show how to add more bigger VMs later. 53 | If you need to pass Azure specific options for the creation script you can do this via additional environment variables e.g. 54 | 55 | ```shell 56 | AZ_SUBSCRIPTION= AZ_LOCATION="East US" ./create-kubernetes-cluster.js 57 | # or 58 | AZ_VM_COREOS_CHANNEL=beta ./create-kubernetes-cluster.js 59 | ``` 60 | 61 | ![VMs in Azure](initial_cluster.png) 62 | 63 | Once the creation of Azure VMs has finished, you should see the following: 64 | 65 | ```shell 66 | ... 67 | azure_wrapper/info: Saved SSH config, you can use it like so: `ssh -F ./output/kube_1c1496016083b4_ssh_conf ` 68 | azure_wrapper/info: The hosts in this deployment are: 69 | [ 'etcd-00', 'etcd-01', 'etcd-02', 'kube-00', 'kube-01', 'kube-02' ] 70 | azure_wrapper/info: Saved state into `./output/kube_1c1496016083b4_deployment.yml` 71 | ``` 72 | 73 | Let's login to the master node like so: 74 | 75 | ```shell 76 | ssh -F ./output/kube_1c1496016083b4_ssh_conf kube-00 77 | ``` 78 | 79 | > Note: config file name will be different, make sure to use the one you see. 80 | 81 | Check there are 2 nodes in the cluster: 82 | 83 | ```shell 84 | core@kube-00 ~ $ kubectl get nodes 85 | NAME LABELS STATUS 86 | kube-01 kubernetes.io/hostname=kube-01 Ready 87 | kube-02 kubernetes.io/hostname=kube-02 Ready 88 | ``` 89 | 90 | ## Deploying the workload 91 | 92 | Let's follow the Guestbook example now: 93 | 94 | ```shell 95 | kubectl create -f ~/guestbook-example 96 | ``` 97 | 98 | You need to wait for the pods to get deployed, run the following and wait for `STATUS` to change from `Pending` to `Running`. 99 | 100 | ```shell 101 | kubectl get pods --watch 102 | ``` 103 | 104 | > Note: the most time it will spend downloading Docker container images on each of the nodes. 105 | 106 | Eventually you should see: 107 | 108 | ```shell 109 | NAME READY STATUS RESTARTS AGE 110 | frontend-0a9xi 1/1 Running 0 4m 111 | frontend-4wahe 1/1 Running 0 4m 112 | frontend-6l36j 1/1 Running 0 4m 113 | redis-master-talmr 1/1 Running 0 4m 114 | redis-slave-12zfd 1/1 Running 0 4m 115 | redis-slave-3nbce 1/1 Running 0 4m 116 | ``` 117 | 118 | ## Scaling 119 | 120 | Two single-core nodes are certainly not enough for a production system of today. Let's scale the cluster by adding a couple of bigger nodes. 121 | 122 | You will need to open another terminal window on your machine and go to the same working directory (e.g. `~/Workspace/kubernetes/docs/getting-started-guides/coreos/azure/`). 123 | 124 | First, lets set the size of new VMs: 125 | 126 | ```shell 127 | export AZ_VM_SIZE=Large 128 | ``` 129 | 130 | Now, run scale script with state file of the previous deployment and number of nodes to add: 131 | 132 | ```shell 133 | core@kube-00 ~ $ ./scale-kubernetes-cluster.js ./output/kube_1c1496016083b4_deployment.yml 2 134 | ... 135 | azure_wrapper/info: Saved SSH config, you can use it like so: `ssh -F ./output/kube_8f984af944f572_ssh_conf ` 136 | azure_wrapper/info: The hosts in this deployment are: 137 | [ 'etcd-00', 138 | 'etcd-01', 139 | 'etcd-02', 140 | 'kube-00', 141 | 'kube-01', 142 | 'kube-02', 143 | 'kube-03', 144 | 'kube-04' ] 145 | azure_wrapper/info: Saved state into `./output/kube_8f984af944f572_deployment.yml` 146 | ``` 147 | 148 | > Note: this step has created new files in `./output`. 149 | 150 | Back on `kube-00`: 151 | 152 | ```shell 153 | core@kube-00 ~ $ kubectl get nodes 154 | NAME LABELS STATUS 155 | kube-01 kubernetes.io/hostname=kube-01 Ready 156 | kube-02 kubernetes.io/hostname=kube-02 Ready 157 | kube-03 kubernetes.io/hostname=kube-03 Ready 158 | kube-04 kubernetes.io/hostname=kube-04 Ready 159 | ``` 160 | 161 | You can see that two more nodes joined happily. Let's scale the number of Guestbook instances now. 162 | 163 | First, double-check how many replication controllers there are: 164 | 165 | ```shell 166 | core@kube-00 ~ $ kubectl get rc 167 | ONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS 168 | frontend php-redis kubernetes/example-guestbook-php-redis:v2 name=frontend 3 169 | redis-master master redis name=redis-master 1 170 | redis-slave worker kubernetes/redis-slave:v2 name=redis-slave 2 171 | ``` 172 | 173 | As there are 4 nodes, let's scale proportionally: 174 | 175 | ```shell 176 | core@kube-00 ~ $ kubectl scale --replicas=4 rc redis-slave 177 | scaled 178 | core@kube-00 ~ $ kubectl scale --replicas=4 rc frontend 179 | scaled 180 | ``` 181 | 182 | Check what you have now: 183 | 184 | ```shell 185 | core@kube-00 ~ $ kubectl get rc 186 | CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS 187 | frontend php-redis kubernetes/example-guestbook-php-redis:v2 name=frontend 4 188 | redis-master master redis name=redis-master 1 189 | redis-slave worker kubernetes/redis-slave:v2 name=redis-slave 4 190 | ``` 191 | 192 | You now will have more instances of front-end Guestbook apps and Redis slaves; and, if you look up all pods labeled `name=frontend`, you should see one running on each node. 193 | 194 | ```shell 195 | core@kube-00 ~/guestbook-example $ kubectl get pods -l name=frontend 196 | NAME READY STATUS RESTARTS AGE 197 | frontend-0a9xi 1/1 Running 0 22m 198 | frontend-4wahe 1/1 Running 0 22m 199 | frontend-6l36j 1/1 Running 0 22m 200 | frontend-z9oxo 1/1 Running 0 41s 201 | ``` 202 | 203 | ## Exposing the app to the outside world 204 | 205 | There is no native Azure load-balancer support in Kubernetes 1.0, however here is how you can expose the Guestbook app to the Internet. 206 | 207 | ```shell 208 | ./expose_guestbook_app_port.sh ./output/kube_1c1496016083b4_ssh_conf 209 | Guestbook app is on port 31605, will map it to port 80 on kube-00 210 | info: Executing command vm endpoint create 211 | + Getting virtual machines 212 | + Reading network configuration 213 | + Updating network configuration 214 | info: vm endpoint create command OK 215 | info: Executing command vm endpoint show 216 | + Getting virtual machines 217 | data: Name : tcp-80-31605 218 | data: Local port : 31605 219 | data: Protcol : tcp 220 | data: Virtual IP Address : 137.117.156.164 221 | data: Direct server return : Disabled 222 | info: vm endpoint show command OK 223 | ``` 224 | 225 | You then should be able to access it from anywhere via the Azure virtual IP for `kube-00` displayed above, i.e. `http://137.117.156.164/` in my case. 226 | 227 | ## Next steps 228 | 229 | You now have a full-blown cluster running in Azure, congrats! 230 | 231 | You should probably try deploy other [example apps](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/) or write your own ;) 232 | 233 | ## Tear down... 234 | 235 | If you don't wish care about the Azure bill, you can tear down the cluster. It's easy to redeploy it, as you can see. 236 | 237 | ```shell 238 | ./destroy-cluster.js ./output/kube_8f984af944f572_deployment.yml 239 | ``` 240 | 241 | > Note: make sure to use the _latest state file_, as after scaling there is a new one. 242 | 243 | By the way, with the scripts shown, you can deploy multiple clusters, if you like :) 244 | -------------------------------------------------------------------------------- /addons/skydns-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: kube-dns-v9 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-dns 8 | version: v9 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | replicas: 3 12 | selector: 13 | k8s-app: kube-dns 14 | version: v9 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: kube-dns 19 | version: v9 20 | kubernetes.io/cluster-service: "true" 21 | spec: 22 | containers: 23 | - name: etcd 24 | image: gcr.io/google_containers/etcd:2.0.9 25 | resources: 26 | limits: 27 | cpu: 100m 28 | memory: 50Mi 29 | command: 30 | - /usr/local/bin/etcd 31 | - -data-dir 32 | - /var/etcd/data 33 | - -listen-client-urls 34 | - http://127.0.0.1:2379,http://127.0.0.1:4001 35 | - -advertise-client-urls 36 | - http://127.0.0.1:2379,http://127.0.0.1:4001 37 | - -initial-cluster-token 38 | - skydns-etcd 39 | volumeMounts: 40 | - name: etcd-storage 41 | mountPath: /var/etcd/data 42 | - name: kube2sky 43 | image: gcr.io/google_containers/kube2sky:1.11 44 | resources: 45 | limits: 46 | cpu: 100m 47 | memory: 50Mi 48 | args: 49 | # command = "/kube2sky" 50 | - -domain=kube.local 51 | - -kube_master_url=http://kube-00:8080 52 | - name: skydns 53 | image: gcr.io/google_containers/skydns:2015-03-11-001 54 | resources: 55 | limits: 56 | cpu: 100m 57 | memory: 50Mi 58 | args: 59 | # command = "/skydns" 60 | - -machines=http://localhost:4001 61 | - -addr=0.0.0.0:53 62 | - -domain=kube.local 63 | ports: 64 | - containerPort: 53 65 | name: dns 66 | protocol: UDP 67 | - containerPort: 53 68 | name: dns-tcp 69 | protocol: TCP 70 | livenessProbe: 71 | httpGet: 72 | path: /healthz 73 | port: 8080 74 | scheme: HTTP 75 | initialDelaySeconds: 30 76 | timeoutSeconds: 5 77 | readinessProbe: 78 | httpGet: 79 | path: /healthz 80 | port: 8080 81 | scheme: HTTP 82 | initialDelaySeconds: 1 83 | timeoutSeconds: 5 84 | - name: healthz 85 | image: gcr.io/google_containers/exechealthz:1.0 86 | resources: 87 | limits: 88 | cpu: 10m 89 | memory: 20Mi 90 | args: 91 | - -cmd=nslookup kubernetes.default.svc.kube.local localhost >/dev/null 92 | - -port=8080 93 | ports: 94 | - containerPort: 8080 95 | protocol: TCP 96 | volumes: 97 | - name: etcd-storage 98 | emptyDir: {} 99 | dnsPolicy: Default # Don't use cluster DNS. 100 | -------------------------------------------------------------------------------- /addons/skydns-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-dns 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-dns 8 | kubernetes.io/cluster-service: "true" 9 | kubernetes.io/name: "KubeDNS" 10 | spec: 11 | selector: 12 | k8s-app: kube-dns 13 | clusterIP: 10.16.0.3 14 | ports: 15 | - name: dns 16 | port: 53 17 | protocol: UDP 18 | - name: dns-tcp 19 | port: 53 20 | protocol: TCP 21 | -------------------------------------------------------------------------------- /azure-login.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | require('child_process').fork('node_modules/azure-cli/bin/azure', ['login'].concat(process.argv)); 4 | -------------------------------------------------------------------------------- /cloud_config_templates/kubernetes-cluster-etcd-node-template.yml: -------------------------------------------------------------------------------- 1 | ## This file is used as input to deployment script, which amends it as needed. 2 | ## More specifically, we need to add peer hosts for each but the elected peer. 3 | 4 | coreos: 5 | units: 6 | - name: etcd2.service 7 | enable: true 8 | command: start 9 | etcd2: 10 | name: '%H' 11 | initial-cluster-token: 'etcd-cluster' 12 | initial-advertise-peer-urls: 'http://%H:2380' 13 | listen-peer-urls: 'http://%H:2380' 14 | listen-client-urls: 'http://0.0.0.0:2379,http://0.0.0.0:4001' 15 | advertise-client-urls: 'http://%H:2379,http://%H:4001' 16 | initial-cluster-state: 'new' 17 | update: 18 | group: stable 19 | reboot-strategy: off 20 | -------------------------------------------------------------------------------- /cloud_config_templates/kubernetes-cluster-main-nodes-template.yml: -------------------------------------------------------------------------------- 1 | ## This file is used as input to deployment script, which amends it as needed. 2 | ## More specifically, we need to add environment files for as many nodes as we 3 | ## are going to deploy. 4 | 5 | write_files: 6 | - path: /opt/bin/curl-retry.sh 7 | permissions: '0755' 8 | owner: root 9 | content: | 10 | #!/bin/sh -x 11 | until curl $@ 12 | do sleep 1 13 | done 14 | 15 | coreos: 16 | update: 17 | group: stable 18 | reboot-strategy: off 19 | units: 20 | - name: systemd-networkd-wait-online.service 21 | drop-ins: 22 | - name: 50-check-github-is-reachable.conf 23 | content: | 24 | [Service] 25 | ExecStart=/bin/sh -x -c \ 26 | 'until curl --silent --fail https://status.github.com/api/status.json | grep -q \"good\"; do sleep 2; done' 27 | 28 | - name: weave-network.target 29 | enable: true 30 | content: | 31 | [Unit] 32 | Description=Weave Network Setup Complete 33 | Documentation=man:systemd.special(7) 34 | RefuseManualStart=no 35 | After=network-online.target 36 | [Install] 37 | WantedBy=multi-user.target 38 | WantedBy=kubernetes-master.target 39 | WantedBy=kubernetes-node.target 40 | 41 | - name: kubernetes-master.target 42 | enable: true 43 | command: start 44 | content: | 45 | [Unit] 46 | Description=Kubernetes Cluster Master 47 | Documentation=http://kubernetes.io/ 48 | RefuseManualStart=no 49 | After=weave-network.target 50 | Requires=weave-network.target 51 | ConditionHost=kube-00 52 | Wants=kube-apiserver.service 53 | Wants=kube-scheduler.service 54 | Wants=kube-controller-manager.service 55 | Wants=kube-proxy.service 56 | [Install] 57 | WantedBy=multi-user.target 58 | 59 | - name: kubernetes-node.target 60 | enable: true 61 | command: start 62 | content: | 63 | [Unit] 64 | Description=Kubernetes Cluster Node 65 | Documentation=http://kubernetes.io/ 66 | RefuseManualStart=no 67 | After=weave-network.target 68 | Requires=weave-network.target 69 | ConditionHost=!kube-00 70 | Wants=kube-proxy.service 71 | Wants=kubelet.service 72 | [Install] 73 | WantedBy=multi-user.target 74 | 75 | - name: 10-weave.network 76 | runtime: false 77 | content: | 78 | [Match] 79 | Type=bridge 80 | Name=weave* 81 | [Network] 82 | 83 | - name: install-weave.service 84 | enable: true 85 | content: | 86 | [Unit] 87 | After=network-online.target 88 | After=docker.service 89 | Before=weave.service 90 | Description=Install Weave 91 | Documentation=http://docs.weave.works/ 92 | Requires=network-online.target 93 | [Service] 94 | EnvironmentFile=-/etc/weave.%H.env 95 | EnvironmentFile=-/etc/weave.env 96 | Type=oneshot 97 | RemainAfterExit=yes 98 | TimeoutStartSec=0 99 | ExecStartPre=/bin/mkdir -p /opt/bin/ 100 | ExecStartPre=/opt/bin/curl-retry.sh \ 101 | --silent \ 102 | --location \ 103 | git.io/weave \ 104 | --output /opt/bin/weave 105 | ExecStartPre=/usr/bin/chmod +x /opt/bin/weave 106 | ExecStart=/opt/bin/weave setup 107 | [Install] 108 | WantedBy=weave-network.target 109 | WantedBy=weave.service 110 | 111 | - name: weaveproxy.service 112 | enable: true 113 | content: | 114 | [Unit] 115 | After=install-weave.service 116 | After=docker.service 117 | Description=Weave proxy for Docker API 118 | Documentation=http://docs.weave.works/ 119 | Requires=docker.service 120 | Requires=install-weave.service 121 | [Service] 122 | EnvironmentFile=-/etc/weave.%H.env 123 | EnvironmentFile=-/etc/weave.env 124 | ExecStartPre=/opt/bin/weave launch-proxy --rewrite-inspect --without-dns 125 | ExecStart=/usr/bin/docker attach weaveproxy 126 | Restart=on-failure 127 | ExecStop=/opt/bin/weave stop-proxy 128 | [Install] 129 | WantedBy=weave-network.target 130 | 131 | - name: weave.service 132 | enable: true 133 | content: | 134 | [Unit] 135 | After=install-weave.service 136 | After=docker.service 137 | Description=Weave Network Router 138 | Documentation=http://docs.weave.works/ 139 | Requires=docker.service 140 | Requires=install-weave.service 141 | [Service] 142 | TimeoutStartSec=0 143 | EnvironmentFile=-/etc/weave.%H.env 144 | EnvironmentFile=-/etc/weave.env 145 | ExecStartPre=/opt/bin/weave launch-router $WEAVE_PEERS 146 | ExecStart=/usr/bin/docker attach weave 147 | Restart=on-failure 148 | ExecStop=/opt/bin/weave stop-router 149 | [Install] 150 | WantedBy=weave-network.target 151 | 152 | - name: weave-expose.service 153 | enable: true 154 | content: | 155 | [Unit] 156 | After=install-weave.service 157 | After=weave.service 158 | After=docker.service 159 | Documentation=http://docs.weave.works/ 160 | Requires=docker.service 161 | Requires=install-weave.service 162 | Requires=weave.service 163 | [Service] 164 | Type=oneshot 165 | RemainAfterExit=yes 166 | TimeoutStartSec=0 167 | EnvironmentFile=-/etc/weave.%H.env 168 | EnvironmentFile=-/etc/weave.env 169 | ExecStart=/opt/bin/weave expose 170 | ExecStop=/opt/bin/weave hide 171 | [Install] 172 | WantedBy=weave-network.target 173 | 174 | - name: install-kubernetes.service 175 | enable: true 176 | content: | 177 | [Unit] 178 | After=network-online.target 179 | Before=kube-apiserver.service 180 | Before=kube-controller-manager.service 181 | Before=kubelet.service 182 | Before=kube-proxy.service 183 | Description=Download Kubernetes Binaries 184 | Documentation=http://kubernetes.io/ 185 | Requires=network-online.target 186 | [Service] 187 | Environment=KUBE_RELEASE_TARBALL=https://github.com/kubernetes/kubernetes/releases/download/v1.2.1/kubernetes.tar.gz 188 | ExecStartPre=/bin/mkdir -p /opt/ 189 | ExecStart=/opt/bin/curl-retry.sh --silent --location $KUBE_RELEASE_TARBALL --output /tmp/kubernetes.tgz 190 | ExecStart=/bin/tar xzvf /tmp/kubernetes.tgz -C /var/tmp/ 191 | ExecStart=/bin/tar xzvf /var/tmp/kubernetes/server/kubernetes-server-linux-amd64.tar.gz -C /opt 192 | ExecStartPost=/bin/chmod o+rx -R /opt/kubernetes 193 | ExecStartPost=/bin/ln -s /opt/kubernetes/server/bin/kubectl /opt/bin/ 194 | ExecStartPost=/bin/mv /var/tmp/kubernetes/examples/guestbook /home/core/guestbook-example 195 | ExecStartPost=/bin/chown core. -R /home/core/guestbook-example 196 | ExecStartPost=/bin/rm -f /tmp/kubernetes.tgz 197 | ExecStartPost=/bin/rm -rf /var/tmp/kubernetes 198 | ExecStartPost=/bin/sed 's/# type: LoadBalancer/type: NodePort/' -i /home/core/guestbook-example/frontend-service.yaml 199 | RemainAfterExit=yes 200 | Type=oneshot 201 | [Install] 202 | WantedBy=kubernetes-master.target 203 | WantedBy=kubernetes-node.target 204 | 205 | - name: kube-apiserver.service 206 | enable: true 207 | content: | 208 | [Unit] 209 | After=install-kubernetes.service 210 | Before=kube-controller-manager.service 211 | Before=kube-scheduler.service 212 | ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-apiserver 213 | Description=Kubernetes API Server 214 | Documentation=http://kubernetes.io/ 215 | Wants=install-kubernetes.service 216 | ConditionHost=kube-00 217 | [Service] 218 | ExecStart=/opt/kubernetes/server/bin/kube-apiserver \ 219 | --insecure-bind-address=0.0.0.0 \ 220 | --advertise-address=$public_ipv4 \ 221 | --port=8080 \ 222 | $ETCD_SERVERS \ 223 | --service-cluster-ip-range=10.16.0.0/12 \ 224 | --allow-privileged=true \ 225 | --logtostderr=true 226 | Restart=always 227 | RestartSec=10 228 | [Install] 229 | WantedBy=kubernetes-master.target 230 | 231 | - name: kube-scheduler.service 232 | enable: true 233 | content: | 234 | [Unit] 235 | After=kube-apiserver.service 236 | After=install-kubernetes.service 237 | ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-scheduler 238 | Description=Kubernetes Scheduler 239 | Documentation=http://kubernetes.io/ 240 | Wants=kube-apiserver.service 241 | ConditionHost=kube-00 242 | [Service] 243 | ExecStart=/opt/kubernetes/server/bin/kube-scheduler \ 244 | --logtostderr=true \ 245 | --master=127.0.0.1:8080 246 | Restart=always 247 | RestartSec=10 248 | [Install] 249 | WantedBy=kubernetes-master.target 250 | 251 | - name: kube-controller-manager.service 252 | enable: true 253 | content: | 254 | [Unit] 255 | After=install-kubernetes.service 256 | After=kube-apiserver.service 257 | ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-controller-manager 258 | Description=Kubernetes Controller Manager 259 | Documentation=http://kubernetes.io/ 260 | Wants=kube-apiserver.service 261 | Wants=install-kubernetes.service 262 | ConditionHost=kube-00 263 | [Service] 264 | ExecStart=/opt/kubernetes/server/bin/kube-controller-manager \ 265 | --master=127.0.0.1:8080 \ 266 | --logtostderr=true 267 | Restart=always 268 | RestartSec=10 269 | [Install] 270 | WantedBy=kubernetes-master.target 271 | 272 | - name: kubelet.service 273 | enable: true 274 | content: | 275 | [Unit] 276 | After=install-kubernetes.service 277 | ConditionFileIsExecutable=/opt/kubernetes/server/bin/kubelet 278 | Description=Kubernetes Kubelet 279 | Documentation=http://kubernetes.io/ 280 | Wants=install-kubernetes.service 281 | ConditionHost=!kube-00 282 | [Service] 283 | ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests/ 284 | ExecStart=/opt/kubernetes/server/bin/kubelet \ 285 | --docker-endpoint=unix:///var/run/weave/weave.sock \ 286 | --address=0.0.0.0 \ 287 | --port=10250 \ 288 | --hostname-override=%H \ 289 | --api-servers=http://kube-00:8080 \ 290 | --logtostderr=true \ 291 | --cluster-dns=10.16.0.3 \ 292 | --cluster-domain=kube.local \ 293 | --config=/etc/kubernetes/manifests/ 294 | Restart=always 295 | RestartSec=10 296 | [Install] 297 | WantedBy=kubernetes-node.target 298 | 299 | - name: kube-proxy.service 300 | enable: true 301 | content: | 302 | [Unit] 303 | After=install-kubernetes.service 304 | ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-proxy 305 | Description=Kubernetes Proxy 306 | Documentation=http://kubernetes.io/ 307 | Wants=install-kubernetes.service 308 | [Service] 309 | ExecStart=/opt/kubernetes/server/bin/kube-proxy \ 310 | --master=http://kube-00:8080 \ 311 | --proxy-mode=userspace \ 312 | --logtostderr=true 313 | Restart=always 314 | RestartSec=10 315 | [Install] 316 | WantedBy=kubernetes-master.target 317 | WantedBy=kubernetes-node.target 318 | 319 | - name: kube-create-addons.service 320 | enable: true 321 | content: | 322 | [Unit] 323 | After=install-kubernetes.service 324 | ConditionFileIsExecutable=/opt/kubernetes/server/bin/kubectl 325 | ConditionPathIsDirectory=/etc/kubernetes/addons/ 326 | ConditionHost=kube-00 327 | Description=Kubernetes Addons 328 | Documentation=http://kubernetes.io/ 329 | Wants=install-kubernetes.service 330 | Wants=kube-apiserver.service 331 | [Service] 332 | Type=oneshot 333 | RemainAfterExit=no 334 | ExecStart=/bin/bash -c 'until /opt/kubernetes/server/bin/kubectl create -f /etc/kubernetes/addons/; do sleep 2; done' 335 | SuccessExitStatus=1 336 | [Install] 337 | WantedBy=kubernetes-master.target 338 | -------------------------------------------------------------------------------- /create-kubernetes-cluster.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | var azure = require('./lib/azure_wrapper.js'); 4 | var kube = require('./lib/deployment_logic/kubernetes.js'); 5 | 6 | azure.create_config('kube', { 'etcd': 3, 'kube': 3 }); 7 | 8 | var coreos_update_channel = process.env['AZ_VM_COREOS_CHANNEL'] || 'stable'; 9 | 10 | azure.run_task_queue([ 11 | azure.queue_default_network(), 12 | azure.queue_storage_if_needed(), 13 | azure.queue_machines('etcd', coreos_update_channel, 14 | kube.create_etcd_cloud_config), 15 | azure.queue_machines('kube', coreos_update_channel, 16 | kube.create_node_cloud_config), 17 | ]); 18 | -------------------------------------------------------------------------------- /destroy-cluster.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | var azure = require('./lib/azure_wrapper.js'); 4 | 5 | azure.destroy_cluster(process.argv[2]); 6 | 7 | console.log('The cluster had been destroyed, you can delete the state file now.'); 8 | -------------------------------------------------------------------------------- /expose_guestbook_app_port.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2014 The Kubernetes Authors All rights reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -e 18 | 19 | [ ! -z $1 ] || (echo Usage: $0 ssh_conf; exit 1) 20 | 21 | fe_port=$(ssh -F $1 kube-00 \ 22 | "/opt/bin/kubectl get -o template --template='{{(index .spec.ports 0).nodePort}}' services frontend -L name=frontend" \ 23 | ) 24 | 25 | echo "Guestbook app is on port $fe_port, will map it to port 80 on kube-00" 26 | 27 | ./node_modules/.bin/azure vm endpoint create kube-00 80 $fe_port 28 | 29 | ./node_modules/.bin/azure vm endpoint show kube-00 tcp-80-${fe_port} 30 | -------------------------------------------------------------------------------- /external_access.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weaveworks-guides/weave-kubernetes-coreos-azure/ad1601c9920b06e8f1cc70b72cbab48210c9a2e7/external_access.png -------------------------------------------------------------------------------- /initial_cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weaveworks-guides/weave-kubernetes-coreos-azure/ad1601c9920b06e8f1cc70b72cbab48210c9a2e7/initial_cluster.png -------------------------------------------------------------------------------- /lib/azure_wrapper.js: -------------------------------------------------------------------------------- 1 | var _ = require('underscore'); 2 | 3 | var fs = require('fs'); 4 | var cp = require('child_process'); 5 | 6 | var yaml = require('js-yaml'); 7 | 8 | var openssl = require('openssl-wrapper'); 9 | 10 | var clr = require('colors'); 11 | var inspect = require('util').inspect; 12 | 13 | var util = require('./util.js'); 14 | 15 | var coreos_image_ids = { 16 | 'stable': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Stable-899.13.0', 17 | 'beta': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Beta-991.2.0', // untested 18 | 'alpha': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Alpha-1000.0.0' // untested 19 | }; 20 | 21 | var conf = {}; 22 | 23 | var hosts = { 24 | collection: [], 25 | ssh_port_counter: 2200, 26 | }; 27 | 28 | var task_queue = []; 29 | 30 | exports.run_task_queue = function (dummy) { 31 | var tasks = { 32 | todo: task_queue, 33 | done: [], 34 | }; 35 | 36 | var pop_task = function() { 37 | console.log(clr.yellow('azure_wrapper/task:'), clr.grey(inspect(tasks))); 38 | var ret = {}; 39 | var cur = tasks.todo.shift(); 40 | if (cur !== undefined && cur instanceof Array) { 41 | ret.current = cur.filter(function(x) { return (x !== undefined) ; }); 42 | } else { 43 | ret.current = cur; 44 | } 45 | ret.remaining = tasks.todo.length; 46 | return ret; 47 | }; 48 | 49 | (function iter (task) { 50 | if (task.current === undefined) { 51 | if (conf.destroying === undefined) { 52 | create_ssh_conf(); 53 | save_state(); 54 | } 55 | return; 56 | } else { 57 | if (task.current.length !== 0) { 58 | console.log(clr.yellow('azure_wrapper/exec:'), clr.blue(inspect(task.current))); 59 | cp.fork('node_modules/azure-cli/bin/azure', task.current) 60 | .on('exit', function (code, signal) { 61 | tasks.done.push({ 62 | code: code, 63 | signal: signal, 64 | what: task.current.join(' '), 65 | remaining: task.remaining, 66 | }); 67 | if (code !== 0 && conf.destroying === undefined) { 68 | console.log(clr.red('azure_wrapper/fail: Exiting due to an error.')); 69 | save_state(); 70 | console.log(clr.cyan('azure_wrapper/info: You probably want to destroy and re-run.')); 71 | process.abort(); 72 | } else { 73 | iter(pop_task()); 74 | } 75 | }); 76 | } else { 77 | iter(pop_task()); 78 | } 79 | } 80 | })(pop_task()); 81 | }; 82 | 83 | var save_state = function () { 84 | var file_name = util.join_output_file_path(conf.name, 'deployment.yml'); 85 | try { 86 | conf.hosts = hosts.collection; 87 | fs.writeFileSync(file_name, yaml.safeDump(conf)); 88 | console.log(clr.yellow('azure_wrapper/info: Saved state into `%s`'), file_name); 89 | } catch (e) { 90 | console.log(clr.red(e)); 91 | } 92 | }; 93 | 94 | var load_state = function (file_name) { 95 | try { 96 | conf = yaml.safeLoad(fs.readFileSync(file_name, 'utf8')); 97 | console.log(clr.yellow('azure_wrapper/info: Loaded state from `%s`'), file_name); 98 | return conf; 99 | } catch (e) { 100 | console.log(clr.red(e)); 101 | } 102 | }; 103 | 104 | var create_ssh_key = function (prefix) { 105 | var opts = { 106 | x509: true, 107 | nodes: true, 108 | newkey: 'rsa:2048', 109 | subj: '/O=Weaveworks, Inc./L=London/C=GB/CN=weave.works', 110 | keyout: util.join_output_file_path(prefix, 'ssh.key'), 111 | out: util.join_output_file_path(prefix, 'ssh.pem'), 112 | }; 113 | openssl.exec('req', opts, function (err, buffer) { 114 | if (err) console.log(clr.red(err)); 115 | openssl.exec('rsa', { in: opts.keyout, out: opts.keyout }, function (err, buffer) { 116 | if (err) console.log(clr.red(err)); 117 | fs.chmod(opts.keyout, '0600', function (err) { 118 | if (err) console.log(clr.red(err)); 119 | }); 120 | }); 121 | }); 122 | return { 123 | key: opts.keyout, 124 | pem: opts.out, 125 | } 126 | } 127 | 128 | var create_ssh_conf = function () { 129 | var file_name = util.join_output_file_path(conf.name, 'ssh_conf'); 130 | var ssh_conf_head = [ 131 | "Host *", 132 | "\tHostname " + conf.resources['service'] + ".cloudapp.net", 133 | "\tUser core", 134 | "\tCompression yes", 135 | "\tLogLevel FATAL", 136 | "\tStrictHostKeyChecking no", 137 | "\tUserKnownHostsFile /dev/null", 138 | "\tIdentitiesOnly yes", 139 | "\tIdentityFile " + conf.resources['ssh_key']['key'], 140 | "\n", 141 | ]; 142 | 143 | fs.writeFileSync(file_name, ssh_conf_head.concat(_.map(hosts.collection, function (host) { 144 | return _.template("Host <%= name %>\n\tPort <%= port %>\n")(host); 145 | })).join('\n')); 146 | console.log(clr.yellow('azure_wrapper/info:'), clr.green('Saved SSH config, you can use it like so: `ssh -F ', file_name, '`')); 147 | console.log(clr.yellow('azure_wrapper/info:'), clr.green('The hosts in this deployment are:\n'), _.map(hosts.collection, function (host) { return host.name; })); 148 | }; 149 | 150 | var get_location = function () { 151 | if (process.env['AZ_AFFINITY']) { 152 | return '--affinity-group=' + process.env['AZ_AFFINITY']; 153 | } else if (process.env['AZ_LOCATION']) { 154 | return '--location=' + process.env['AZ_LOCATION']; 155 | } else { 156 | return '--location=West Europe'; 157 | } 158 | } 159 | var get_vm_size = function () { 160 | if (process.env['AZ_VM_SIZE']) { 161 | return '--vm-size=' + process.env['AZ_VM_SIZE']; 162 | } else { 163 | return '--vm-size=Small'; 164 | } 165 | } 166 | 167 | var get_subscription = function () { 168 | if (process.env['AZ_SUBSCRIPTION']) { 169 | return '--subscription=' + process.env['AZ_SUBSCRIPTION']; 170 | } 171 | } 172 | 173 | exports.queue_default_network = function () { 174 | task_queue.push([ 175 | 'network', 'vnet', 'create', 176 | get_location(), 177 | '--address-space=172.16.0.0', 178 | get_subscription(), 179 | conf.resources['vnet'], 180 | ]); 181 | } 182 | 183 | exports.queue_storage_if_needed = function() { 184 | if (!process.env['AZURE_STORAGE_ACCOUNT']) { 185 | conf.resources['storage_account'] = util.rand_suffix; 186 | task_queue.push([ 187 | 'storage', 'account', 'create', 188 | '--type=LRS', 189 | get_location(), 190 | get_subscription(), 191 | conf.resources['storage_account'], 192 | ]); 193 | process.env['AZURE_STORAGE_ACCOUNT'] = conf.resources['storage_account']; 194 | } else { 195 | // Preserve it for resizing, so we don't create a new one by accident, 196 | // when the environment variable is unset 197 | conf.resources['storage_account'] = process.env['AZURE_STORAGE_ACCOUNT']; 198 | } 199 | }; 200 | 201 | exports.queue_machines = function (name_prefix, coreos_update_channel, cloud_config_creator) { 202 | var x = conf.nodes[name_prefix]; 203 | var vm_create_base_args = [ 204 | 'vm', 'create', 205 | get_location(), 206 | get_vm_size(), 207 | '--connect=' + conf.resources['service'], 208 | '--virtual-network-name=' + conf.resources['vnet'], 209 | '--no-ssh-password', 210 | '--ssh-cert=' + conf.resources['ssh_key']['pem'], 211 | get_subscription(), 212 | ]; 213 | 214 | var cloud_config = cloud_config_creator(x, conf); 215 | 216 | var next_host = function (n) { 217 | hosts.ssh_port_counter += 1; 218 | var host = { name: util.hostname(n, name_prefix), port: hosts.ssh_port_counter }; 219 | if (cloud_config instanceof Array) { 220 | host.cloud_config_file = cloud_config[n]; 221 | } else { 222 | host.cloud_config_file = cloud_config; 223 | } 224 | hosts.collection.push(host); 225 | return _.map([ 226 | "--vm-name=<%= name %>", 227 | "--ssh=<%= port %>", 228 | "--custom-data=<%= cloud_config_file %>", 229 | ], function (arg) { return _.template(arg)(host); }); 230 | }; 231 | 232 | task_queue = task_queue.concat(_(x).times(function (n) { 233 | if (conf.resizing && n < conf.old_size) { 234 | return []; 235 | } else { 236 | return vm_create_base_args.concat(next_host(n), [ 237 | coreos_image_ids[coreos_update_channel], 'core', 238 | ]); 239 | } 240 | })); 241 | }; 242 | 243 | exports.create_config = function (name, nodes) { 244 | conf = { 245 | name: name, 246 | nodes: nodes, 247 | weave_salt: util.rand_string(), 248 | resources: { 249 | vnet: [name, 'internal-vnet', util.rand_suffix].join('-'), 250 | service: [name, util.rand_suffix].join('-'), 251 | ssh_key: create_ssh_key(name), 252 | } 253 | }; 254 | 255 | }; 256 | 257 | exports.destroy_cluster = function (state_file) { 258 | load_state(state_file); 259 | if (conf.hosts === undefined) { 260 | console.log(clr.red('azure_wrapper/fail: Nothing to delete.')); 261 | process.abort(); 262 | } 263 | 264 | conf.destroying = true; 265 | task_queue = _.map(conf.hosts, function (host) { 266 | return ['vm', 'delete', '--quiet', '--blob-delete', host.name, get_subscription()]; 267 | }); 268 | 269 | task_queue.push(['network', 'vnet', 'delete', '--quiet', conf.resources['vnet'], get_subscription()]); 270 | task_queue.push(['storage', 'account', 'delete', '--quiet', conf.resources['storage_account'], get_subscription()]); 271 | 272 | exports.run_task_queue(); 273 | }; 274 | 275 | exports.load_state_for_resizing = function (state_file, node_type, new_nodes) { 276 | load_state(state_file); 277 | if (conf.hosts === undefined) { 278 | console.log(clr.red('azure_wrapper/fail: Nothing to look at.')); 279 | process.abort(); 280 | } 281 | conf.resizing = true; 282 | conf.old_size = conf.nodes[node_type]; 283 | conf.old_state_file = state_file; 284 | conf.nodes[node_type] += new_nodes; 285 | hosts.collection = conf.hosts; 286 | hosts.ssh_port_counter += conf.hosts.length; 287 | process.env['AZURE_STORAGE_ACCOUNT'] = conf.resources['storage_account']; 288 | } 289 | -------------------------------------------------------------------------------- /lib/cloud_config.js: -------------------------------------------------------------------------------- 1 | var _ = require('underscore'); 2 | var fs = require('fs'); 3 | var yaml = require('js-yaml'); 4 | var colors = require('colors/safe'); 5 | 6 | var write_cloud_config_from_object = function (data, output_file) { 7 | try { 8 | fs.writeFileSync(output_file, [ 9 | '#cloud-config', 10 | yaml.safeDump(data), 11 | ].join("\n")); 12 | return output_file; 13 | } catch (e) { 14 | console.log(colors.red(e)); 15 | } 16 | }; 17 | 18 | exports.generate_environment_file_entry_from_object = function (hostname, environ) { 19 | var data = { 20 | hostname: hostname, 21 | environ_array: _.map(environ, function (value, key) { 22 | return [key.toUpperCase(), JSON.stringify(value.toString())].join('='); 23 | }), 24 | }; 25 | 26 | return { 27 | permissions: '0600', 28 | owner: 'root', 29 | content: _.template("<%= environ_array.join('\\n') %>\n")(data), 30 | path: _.template("/etc/weave.<%= hostname %>.env")(data), 31 | }; 32 | }; 33 | 34 | exports.process_template = function (input_file, output_file, processor) { 35 | var data = {}; 36 | try { 37 | data = yaml.safeLoad(fs.readFileSync(input_file, 'utf8')); 38 | } catch (e) { 39 | console.log(colors.red(e)); 40 | } 41 | return write_cloud_config_from_object(processor(_.clone(data)), output_file); 42 | }; 43 | 44 | exports.write_files_from = function (local_dir, remote_dir) { 45 | try { 46 | return _.map(fs.readdirSync(local_dir), function (fn) { 47 | return { 48 | path: [remote_dir, fn].join('/'), 49 | owner: 'root', 50 | permissions: '0640', 51 | encoding: 'base64', 52 | content: fs.readFileSync([local_dir, fn].join('/')).toString('base64'), 53 | }; 54 | }); 55 | } catch (e) { 56 | console.log(colors.red(e)); 57 | } 58 | }; 59 | -------------------------------------------------------------------------------- /lib/deployment_logic/kubernetes.js: -------------------------------------------------------------------------------- 1 | var _ = require('underscore'); 2 | _.mixin(require('underscore.string').exports()); 3 | 4 | var util = require('../util.js'); 5 | var cloud_config = require('../cloud_config.js'); 6 | 7 | 8 | etcd_initial_cluster_conf_self = function (conf) { 9 | var port = '2380'; 10 | 11 | var data = { 12 | nodes: _(conf.nodes.etcd).times(function (n) { 13 | var host = util.hostname(n, 'etcd'); 14 | return [host, [host, port].join(':')].join('=http://'); 15 | }), 16 | }; 17 | 18 | return { 19 | 'name': 'etcd2.service', 20 | 'drop-ins': [{ 21 | 'name': '50-etcd-initial-cluster.conf', 22 | 'content': _.template("[Service]\nEnvironment=ETCD_INITIAL_CLUSTER=<%= nodes.join(',') %>\n")(data), 23 | }], 24 | }; 25 | }; 26 | 27 | etcd_initial_cluster_conf_kube = function (conf) { 28 | var port = '4001'; 29 | 30 | var data = { 31 | nodes: _(conf.nodes.etcd).times(function (n) { 32 | var host = util.hostname(n, 'etcd'); 33 | return 'http://' + [host, port].join(':'); 34 | }), 35 | }; 36 | 37 | return { 38 | 'name': 'kube-apiserver.service', 39 | 'drop-ins': [{ 40 | 'name': '50-etcd-initial-cluster.conf', 41 | 'content': _.template("[Service]\nEnvironment=ETCD_SERVERS=--etcd-servers=<%= nodes.join(',') %>\n")(data), 42 | }], 43 | }; 44 | }; 45 | 46 | exports.create_etcd_cloud_config = function (node_count, conf) { 47 | var input_file = './cloud_config_templates/kubernetes-cluster-etcd-node-template.yml'; 48 | var output_file = util.join_output_file_path('kubernetes-cluster-etcd-nodes', 'generated.yml'); 49 | 50 | return cloud_config.process_template(input_file, output_file, function(data) { 51 | data.coreos.units.push(etcd_initial_cluster_conf_self(conf)); 52 | return data; 53 | }); 54 | }; 55 | 56 | exports.create_node_cloud_config = function (node_count, conf) { 57 | var elected_node = 0; 58 | 59 | var input_file = './cloud_config_templates/kubernetes-cluster-main-nodes-template.yml'; 60 | var output_file = util.join_output_file_path('kubernetes-cluster-main-nodes', 'generated.yml'); 61 | 62 | var make_node_config = function (n) { 63 | return cloud_config.generate_environment_file_entry_from_object(util.hostname(n, 'kube'), { 64 | weave_password: conf.weave_salt, 65 | weave_peers: n === elected_node ? "" : util.hostname(elected_node, 'kube'), 66 | breakout_route: util.ipv4([10, 2, 0, 0], 16), 67 | bridge_address_cidr: util.ipv4([10, 2, n, 1], 24), 68 | }); 69 | }; 70 | 71 | var write_files_extra = cloud_config.write_files_from('addons', '/etc/kubernetes/addons'); 72 | return cloud_config.process_template(input_file, output_file, function(data) { 73 | data.write_files = data.write_files.concat(_(node_count).times(make_node_config), write_files_extra); 74 | data.coreos.units.push(etcd_initial_cluster_conf_kube(conf)); 75 | return data; 76 | }); 77 | }; 78 | -------------------------------------------------------------------------------- /lib/util.js: -------------------------------------------------------------------------------- 1 | var _ = require('underscore'); 2 | _.mixin(require('underscore.string').exports()); 3 | 4 | exports.ipv4 = function (ocets, prefix) { 5 | return { 6 | ocets: ocets, 7 | prefix: prefix, 8 | toString: function () { 9 | return [ocets.join('.'), prefix].join('/'); 10 | } 11 | } 12 | }; 13 | 14 | exports.hostname = function hostname (n, prefix) { 15 | return _.template("<%= pre %>-<%= seq %>")({ 16 | pre: prefix || 'core', 17 | seq: _.pad(n, 2, '0'), 18 | }); 19 | }; 20 | 21 | exports.rand_string = function () { 22 | var crypto = require('crypto'); 23 | var shasum = crypto.createHash('sha256'); 24 | shasum.update(crypto.randomBytes(256)); 25 | return shasum.digest('hex'); 26 | }; 27 | 28 | 29 | exports.rand_suffix = exports.rand_string().substring(50); 30 | 31 | exports.join_output_file_path = function(prefix, suffix) { 32 | return './output/' + [prefix, exports.rand_suffix, suffix].join('_'); 33 | }; 34 | -------------------------------------------------------------------------------- /output/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "coreos-azure-weave", 3 | "version": "1.0.0", 4 | "description": "Small utility to bring up a woven CoreOS cluster", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "Ilya Dmitrichenko ", 10 | "license": "Apache 2.0", 11 | "dependencies": { 12 | "azure-cli": "^0.9.19", 13 | "colors": "^1.1.2", 14 | "js-yaml": "^3.5.5", 15 | "openssl-wrapper": "^0.2.3", 16 | "underscore": "^1.8.3", 17 | "underscore.string": "^3.3.4" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /scale-kubernetes-cluster.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | var azure = require('./lib/azure_wrapper.js'); 4 | var kube = require('./lib/deployment_logic/kubernetes.js'); 5 | 6 | azure.load_state_for_resizing(process.argv[2], 'kube', parseInt(process.argv[3] || 1)); 7 | 8 | var coreos_update_channel = process.env['AZ_VM_COREOS_CHANNEL'] || 'stable'; 9 | 10 | azure.run_task_queue([ 11 | azure.queue_machines('kube', coreos_update_channel, kube.create_node_cloud_config), 12 | ]); 13 | --------------------------------------------------------------------------------