├── .gitignore
├── .travis.yml
├── LICENSE.txt
├── README.md
├── README_cn.md
├── RELEASE.md
├── bsroot.sh
├── bsroot_test.bats
├── doc
├── centos-design.md
└── sextant-design.md
├── docker
├── .gitignore
├── Dockerfile
├── dhcp.sh
└── entrypoint.sh
├── fabric
├── README_CN.md
├── get_mac_ip_host.py
├── gpu_driver.py
├── hosts.template.yaml
├── k8s.py
├── set_hosts.py
├── upgrade_kernel.py
└── upgrade_kernel.sh
├── golang
├── addons
│ ├── README.md
│ ├── addons.go
│ ├── addons_test.go
│ └── template
│ │ ├── dashboard-controller.template
│ │ ├── dashboard-service.yaml
│ │ ├── default-backend-svc.yaml
│ │ ├── default-backend.template
│ │ ├── dnsmasq.conf.template
│ │ ├── grafana-service.yaml
│ │ ├── heapster-controller.template
│ │ ├── heapster-service.yaml
│ │ ├── influxdb-grafana-controller.template
│ │ ├── influxdb-service.yaml
│ │ ├── ingress.template
│ │ ├── kubedns-controller.template
│ │ └── kubedns-svc.template
├── certgen
│ ├── certgen.go
│ ├── certgen_test.go
│ ├── cmd.go
│ └── cmd_test.go
├── cloud-config-server
│ ├── README.md
│ ├── build.sh
│ ├── server.go
│ └── server_test.go
├── clusterdesc
│ ├── README.md
│ ├── config.go
│ ├── config_test.go
│ ├── etcd.go
│ ├── etcd_test.go
│ ├── linux_distro.go
│ ├── master.go
│ └── master_test.go
├── template
│ ├── cluster-desc.sample.yaml
│ ├── template.go
│ ├── template_test.go
│ └── templatefiles
│ │ ├── cc-centos-post.template
│ │ ├── cc-centos.template
│ │ ├── cc-common.template
│ │ ├── cc-coreos.template
│ │ └── cloud-config.template
└── validate-yaml
│ ├── validate-yaml.go
│ └── validate-yaml_test.go
├── install-ceph
├── README.md
├── install-mon.sh
└── install-osd.sh
├── logo
└── Sextant.png
├── scripts
├── centos.sh
├── centos
│ └── gpu
│ │ ├── build_centos_gpu_drivers.sh
│ │ └── nvidia-gpu-mkdev.sh
├── common.sh
├── common
│ └── addons.sh
├── coreos.sh
├── coreos
│ └── install.sh
├── coreos_gpu
│ ├── Dockerfile
│ ├── README.md
│ ├── _container_build.sh
│ ├── _export.sh
│ ├── build.sh
│ ├── check.sh
│ └── setup_gpu.sh
├── load_yaml.sh
└── log.sh
├── setup-kubectl.bash
├── start_bootstrapper_container.sh
├── testdata
└── example.yaml
└── vm-cluster
├── README.md
├── Vagrantfile
├── cluster-desc.yml.template
├── prepare_install_bootstrapper.sh
└── provision_bootstrapper_vm.sh
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | *~
3 | *.test
4 | vm
5 | bsroot
6 | vm-cluster/.vagrant
7 | cluster-desc.yml
8 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: go
2 |
3 | go:
4 | - 1.6
5 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright 2016 The Sextant Authors. All rights reserved.
2 |
3 | Apache License
4 | Version 2.0, January 2004
5 | http://www.apache.org/licenses/
6 |
7 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8 |
9 | 1. Definitions.
10 |
11 | "License" shall mean the terms and conditions for use, reproduction,
12 | and distribution as defined by Sections 1 through 9 of this document.
13 |
14 | "Licensor" shall mean the copyright owner or entity authorized by
15 | the copyright owner that is granting the License.
16 |
17 | "Legal Entity" shall mean the union of the acting entity and all
18 | other entities that control, are controlled by, or are under common
19 | control with that entity. For the purposes of this definition,
20 | "control" means (i) the power, direct or indirect, to cause the
21 | direction or management of such entity, whether by contract or
22 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
23 | outstanding shares, or (iii) beneficial ownership of such entity.
24 |
25 | "You" (or "Your") shall mean an individual or Legal Entity
26 | exercising permissions granted by this License.
27 |
28 | "Source" form shall mean the preferred form for making modifications,
29 | including but not limited to software source code, documentation
30 | source, and configuration files.
31 |
32 | "Object" form shall mean any form resulting from mechanical
33 | transformation or translation of a Source form, including but
34 | not limited to compiled object code, generated documentation,
35 | and conversions to other media types.
36 |
37 | "Work" shall mean the work of authorship, whether in Source or
38 | Object form, made available under the License, as indicated by a
39 | copyright notice that is included in or attached to the work
40 | (an example is provided in the Appendix below).
41 |
42 | "Derivative Works" shall mean any work, whether in Source or Object
43 | form, that is based on (or derived from) the Work and for which the
44 | editorial revisions, annotations, elaborations, or other modifications
45 | represent, as a whole, an original work of authorship. For the purposes
46 | of this License, Derivative Works shall not include works that remain
47 | separable from, or merely link (or bind by name) to the interfaces of,
48 | the Work and Derivative Works thereof.
49 |
50 | "Contribution" shall mean any work of authorship, including
51 | the original version of the Work and any modifications or additions
52 | to that Work or Derivative Works thereof, that is intentionally
53 | submitted to Licensor for inclusion in the Work by the copyright owner
54 | or by an individual or Legal Entity authorized to submit on behalf of
55 | the copyright owner. For the purposes of this definition, "submitted"
56 | means any form of electronic, verbal, or written communication sent
57 | to the Licensor or its representatives, including but not limited to
58 | communication on electronic mailing lists, source code control systems,
59 | and issue tracking systems that are managed by, or on behalf of, the
60 | Licensor for the purpose of discussing and improving the Work, but
61 | excluding communication that is conspicuously marked or otherwise
62 | designated in writing by the copyright owner as "Not a Contribution."
63 |
64 | "Contributor" shall mean Licensor and any individual or Legal Entity
65 | on behalf of whom a Contribution has been received by Licensor and
66 | subsequently incorporated within the Work.
67 |
68 | 2. Grant of Copyright License. Subject to the terms and conditions of
69 | this License, each Contributor hereby grants to You a perpetual,
70 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71 | copyright license to reproduce, prepare Derivative Works of,
72 | publicly display, publicly perform, sublicense, and distribute the
73 | Work and such Derivative Works in Source or Object form.
74 |
75 | 3. Grant of Patent License. Subject to the terms and conditions of
76 | this License, each Contributor hereby grants to You a perpetual,
77 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78 | (except as stated in this section) patent license to make, have made,
79 | use, offer to sell, sell, import, and otherwise transfer the Work,
80 | where such license applies only to those patent claims licensable
81 | by such Contributor that are necessarily infringed by their
82 | Contribution(s) alone or by combination of their Contribution(s)
83 | with the Work to which such Contribution(s) was submitted. If You
84 | institute patent litigation against any entity (including a
85 | cross-claim or counterclaim in a lawsuit) alleging that the Work
86 | or a Contribution incorporated within the Work constitutes direct
87 | or contributory patent infringement, then any patent licenses
88 | granted to You under this License for that Work shall terminate
89 | as of the date such litigation is filed.
90 |
91 | 4. Redistribution. You may reproduce and distribute copies of the
92 | Work or Derivative Works thereof in any medium, with or without
93 | modifications, and in Source or Object form, provided that You
94 | meet the following conditions:
95 |
96 | (a) You must give any other recipients of the Work or
97 | Derivative Works a copy of this License; and
98 |
99 | (b) You must cause any modified files to carry prominent notices
100 | stating that You changed the files; and
101 |
102 | (c) You must retain, in the Source form of any Derivative Works
103 | that You distribute, all copyright, patent, trademark, and
104 | attribution notices from the Source form of the Work,
105 | excluding those notices that do not pertain to any part of
106 | the Derivative Works; and
107 |
108 | (d) If the Work includes a "NOTICE" text file as part of its
109 | distribution, then any Derivative Works that You distribute must
110 | include a readable copy of the attribution notices contained
111 | within such NOTICE file, excluding those notices that do not
112 | pertain to any part of the Derivative Works, in at least one
113 | of the following places: within a NOTICE text file distributed
114 | as part of the Derivative Works; within the Source form or
115 | documentation, if provided along with the Derivative Works; or,
116 | within a display generated by the Derivative Works, if and
117 | wherever such third-party notices normally appear. The contents
118 | of the NOTICE file are for informational purposes only and
119 | do not modify the License. You may add Your own attribution
120 | notices within Derivative Works that You distribute, alongside
121 | or as an addendum to the NOTICE text from the Work, provided
122 | that such additional attribution notices cannot be construed
123 | as modifying the License.
124 |
125 | You may add Your own copyright statement to Your modifications and
126 | may provide additional or different license terms and conditions
127 | for use, reproduction, or distribution of Your modifications, or
128 | for any such Derivative Works as a whole, provided Your use,
129 | reproduction, and distribution of the Work otherwise complies with
130 | the conditions stated in this License.
131 |
132 | 5. Submission of Contributions. Unless You explicitly state otherwise,
133 | any Contribution intentionally submitted for inclusion in the Work
134 | by You to the Licensor shall be under the terms and conditions of
135 | this License, without any additional terms or conditions.
136 | Notwithstanding the above, nothing herein shall supersede or modify
137 | the terms of any separate license agreement you may have executed
138 | with Licensor regarding such Contributions.
139 |
140 | 6. Trademarks. This License does not grant permission to use the trade
141 | names, trademarks, service marks, or product names of the Licensor,
142 | except as required for reasonable and customary use in describing the
143 | origin of the Work and reproducing the content of the NOTICE file.
144 |
145 | 7. Disclaimer of Warranty. Unless required by applicable law or
146 | agreed to in writing, Licensor provides the Work (and each
147 | Contributor provides its Contributions) on an "AS IS" BASIS,
148 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149 | implied, including, without limitation, any warranties or conditions
150 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151 | PARTICULAR PURPOSE. You are solely responsible for determining the
152 | appropriateness of using or redistributing the Work and assume any
153 | risks associated with Your exercise of permissions under this License.
154 |
155 | 8. Limitation of Liability. In no event and under no legal theory,
156 | whether in tort (including negligence), contract, or otherwise,
157 | unless required by applicable law (such as deliberate and grossly
158 | negligent acts) or agreed to in writing, shall any Contributor be
159 | liable to You for damages, including any direct, indirect, special,
160 | incidental, or consequential damages of any character arising as a
161 | result of this License or out of the use or inability to use the
162 | Work (including but not limited to damages for loss of goodwill,
163 | work stoppage, computer failure or malfunction, or any and all
164 | other commercial damages or losses), even if such Contributor
165 | has been advised of the possibility of such damages.
166 |
167 | 9. Accepting Warranty or Additional Liability. While redistributing
168 | the Work or Derivative Works thereof, You may choose to offer,
169 | and charge a fee for, acceptance of support, warranty, indemnity,
170 | or other liability obligations and/or rights consistent with this
171 | License. However, in accepting such obligations, You may act only
172 | on Your own behalf and on Your sole responsibility, not on behalf
173 | of any other Contributor, and only if You agree to indemnify,
174 | defend, and hold each Contributor harmless for any liability
175 | incurred by, or claims asserted against, such Contributor by reason
176 | of your accepting any such warranty or additional liability.
177 |
178 | END OF TERMS AND CONDITIONS
179 |
180 | APPENDIX: How to apply the Apache License to your work.
181 |
182 | To apply the Apache License to your work, attach the following
183 | boilerplate notice, with the fields enclosed by brackets "[]"
184 | replaced with your own identifying information. (Don't include
185 | the brackets!) The text should be enclosed in the appropriate
186 | comment syntax for the file format. We also recommend that a
187 | file or class name and description of purpose be included on the
188 | same "printed page" as the copyright notice for easier
189 | identification within third-party archives.
190 |
191 | Copyright 2016 The Sextant Authors.
192 |
193 | Licensed under the Apache License, Version 2.0 (the "License");
194 | you may not use this file except in compliance with the License.
195 | You may obtain a copy of the License at
196 |
197 | http://www.apache.org/licenses/LICENSE-2.0
198 |
199 | Unless required by applicable law or agreed to in writing, software
200 | distributed under the License is distributed on an "AS IS" BASIS,
201 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202 | See the License for the specific language governing permissions and
203 | limitations under the License.
204 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://travis-ci.org/k8sp/sextant.svg?branch=master)
2 |
3 | #Sextant
4 |
5 |
6 | Sextant initialize a cluster installed with CoreOS and Kubernetes using PXE.
7 |
8 | # Enviroment setup
9 | Bootstrapper will be running on a machine(AKA: bootstrapper server), which need to meet the following requirements
10 |
11 | 1. The kubernetes machines waiting for install need to be connected with bootstrapper server.
12 | 2. Bootstrapper server is a linux server with docker daemon(1.11 or later) installed.
13 | 3. Have root access of the bootstrapper server.
14 |
15 | # Configurations and download image files that bootstrapper needs.
16 |
17 | ***The following steps will prepare the environment, generate configurations and build docker images.***
18 | * if there's no internet access on the bootstrapper server, you can copy the pre-donwloaded `/bsroot` directory to it.
19 |
20 | After getting the sextant code, you need to plan the cluster installation details by editing `cloud-config-server/template/cluster-desc.sample.yaml`. Then build bootstrapper to the `./bsroot` directory.
21 |
22 | ```
23 | go get -u -d github.com/k8sp/sextant/...
24 | cd $GOPATH/src/github.com/k8sp/sextant
25 | vim golang/template/cluster-desc.sample.yaml
26 | ./bsroot.sh golang/template/cluster-desc.sample.yaml
27 | ```
28 |
29 | # Uploaded to the bootstrapper server
30 |
31 | If the above steps is done on the bootstrapper server, you can skip this step.
32 |
33 | 1. Packing direcotry `./bsroot`: `tar czvf bsroot.tar.gz ./bsroot`
34 | 2. Upload `bsroot.tar.gz` to the bootstrapper server.(using tools such as SCP or FTP)
35 | 3. Extract `bsroot.tar.gz` to `/` directory on bootstrapper server.
36 |
37 | # Start bootstrapper
38 |
39 | ```
40 | ssh root@bootstrapper
41 | cd /bsroot
42 | ./start_bootstrapper_container.sh /bsroot
43 | ```
44 |
45 | # Setup kubernetes cluster using the bootstrapper
46 |
47 | Just set kubernetes nodes boot through PXE, reboot the machine, then it will completed Kubernetes and Ceph installation automatically.
48 |
49 | # Using kubernetes cluster
50 |
51 | ## Configurate kubectl client
52 |
53 | ```
54 | scp root@bootstrapper:/bsroot/setup-kubectl.bash ./
55 | ./setup-kubectl.bash
56 | ```
57 |
58 | ## Verify kubectl configuration and connection
59 |
60 | Execute the following command, verify whether the client has been property configured according to the return result.
61 |
62 | ```
63 | bootstrapper ~ # ./kubectl get nodes
64 | NAME STATUS AGE
65 | 08-00-27-4a-2d-a1 Ready,SchedulingDisabled 1m
66 | ```
67 |
68 | ## Using Ceph cluster
69 |
70 | After the cluster installation is complete, you can use the following command to obtain admin keyring for the later use.
71 |
72 | ```
73 | etcdctl --endpoints http://08-00-27-ef-d2-12:2379 get /ceph-config/ceph/adminKeyring
74 | ```
75 |
76 | For example, mount a directory with CephFS.
77 |
78 | ```
79 | mount -t ceph 192.168.8.112:/ /ceph -o name=admin,secret=[your secret]
80 | ```
81 |
82 | # Cluster maintenance
83 |
84 | ## How to updating the cert after the cluster is running for some time.
85 |
86 | 1. Edit the confuration `openssl.cnf` in `certgen.go`.
87 |
88 | ```
89 | [req]
90 | req_extensions = v3_req
91 | distinguished_name = req_distinguished_name
92 | [req_distinguished_name]
93 | [ v3_req ]
94 | basicConstraints = CA:FALSE
95 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment
96 | subjectAltName = @alt_names
97 | [alt_names]
98 | DNS.1 = kubernetes
99 | DNS.2 = kubernetes.default
100 | DNS.3 = kubernetes.default.svc
101 | DNS.4 = kubernetes.default.svc.cluster.local
102 | DNS.5 = 10.10.10.201
103 | IP.1 = 10.100.0.1
104 | ```
105 | 2. Regenerating api-server.pem and other files according the openssl.cnf: https://coreos.com/kubernetes/docs/latest/openssl.html
106 | 3. Restart master processes, including api-server,controller-manager,scheduler,kube-proxy
107 | 4. Delete default secret under kube-system/default namesapce using kubectl delete secret
108 | 5. Resubmit failed service.
109 |
--------------------------------------------------------------------------------
/README_cn.md:
--------------------------------------------------------------------------------
1 | [](https://travis-ci.org/k8sp/sextant.svg?branch=master)
2 |
3 | # sextant
4 |
5 |
6 | sextant 提供了可以通过PXE全自动化安装初始化一个CoreOS+kubernetes集群。
7 |
8 | ## 环境准备
9 | bootstrapper需要运行在一台服务器上(以下称bootstrapper server),满足以下的几个要求:
10 |
11 | 1. 待初始化的kubernetes机器需要和bootstrapper server保持网络连通
12 | 1. bootstrapper server是一台安装有docker daemon(***1.11以上版本***)的Linux服务器
13 | 1. 拥有bootstrapper server的root权限
14 | 1. 配置bootstrapper server的/etc/hosts文件,增加hostname的解析:```127.0.0.1 bootstrapper```
15 |
16 | ## 初始化配置和准备bootstrapper需要的镜像文件
17 | ***在能访问互联网的一台机器上完成下面的准备环境,配置,创建Docker镜像的步骤***
18 | * 注:如果bootstrapper机器没有互联网访问,可以事先准备好/bsroot目录然后上传到bootstrapper server
19 |
20 | 获取sextant代码后,根据要初始化的整体集群规划,
21 | 编辑cloud-config-server/template/cluster-desc.sample.yaml文件完成配置
22 | 然后下载bootstrapper用到的文件到/bsroot目录下
23 | ```
24 | go get -u -d github.com/k8sp/sextant/...
25 | cd $GOPATH/src/github.com/k8sp/sextant
26 | vim cloud-config-server/template/cluster-desc.sample.yaml
27 | ./bsroot.sh cloud-config-server/template/cluster-desc.sample.yaml
28 | ```
29 |
30 | ## 上传到集群内部的bootstrapper机器
31 | 如果上述步骤是在bootstrapper服务器上完成的,则可以跳过此步骤。
32 |
33 | 1. 手动打包./bsroot目录:```tar czf bsroot.tar.gz ./bsroot```
34 | 1. 将bsroot.tar.gz上传到你的bootstrapper机器上(使用scp或ftp等工具)
35 | 1. 在bootstrapper机器上解压bsroot.tar.gz到/目录
36 |
37 | ## 启动bootstrapper
38 | ```
39 | ssh root@bootstrapper
40 | cd /bsroot
41 | ./start_bootstrapper_container.sh /bsroot
42 | ```
43 |
44 | ## 通过bootstrapper来初始化您的kubernetes集群
45 | ***只需要设置kubernetes节点通过PXE网络引导,并开机(和bootstrapper网络联通),就可以自动完成kubernetes和ceph安装***
46 |
47 | ## 使用集群
48 |
49 | ### 配置kubectl客户端
50 | ```
51 | scp root@bootstrapper:/bsroot/setup-kubectl.bash ./
52 | ./setup-kubectl.bash
53 | ```
54 |
55 | ### 测试kubectl客户端可用
56 | 执行下面的命令,观察返回结果是否正常,判断是否已经成客户端的正确配置:
57 | ```
58 | bootstrapper ~ # ./kubectl get nodes
59 | NAME STATUS AGE
60 | 08-00-27-4a-2d-a1 Ready,SchedulingDisabled 1m
61 | ```
62 |
63 | ### 使用ceph集群
64 | 在集群安装完成之后,可以使用下面的命令获得admin keyring作为后续使用
65 | ```
66 | etcdctl --endpoints http://08-00-27-ef-d2-12:2379 get /ceph-config/ceph/adminKeyring
67 | ```
68 | 比如,需要使用cephFS mount目录:
69 | ```
70 | mount -t ceph 192.168.8.112:/ /ceph -o name=admin,secret=[your secret]
71 | ```
72 |
73 | ## 维护集群
74 |
75 | ### 集群初始化完成后如何更新master节点的证书
76 |
77 | 1.修改certgen.go中openssl.cnf的配置
78 | ```
79 | [req]
80 | req_extensions = v3_req
81 | distinguished_name = req_distinguished_name
82 | [req_distinguished_name]
83 | [ v3_req ]
84 | basicConstraints = CA:FALSE
85 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment
86 | subjectAltName = @alt_names
87 | [alt_names]
88 | DNS.1 = kubernetes
89 | DNS.2 = kubernetes.default
90 | DNS.3 = kubernetes.default.svc
91 | DNS.4 = kubernetes.default.svc.cluster.local
92 | DNS.5 = 10.10.10.201
93 | IP.1 = 10.100.0.1
94 | ```
95 | 1. 根据openssl.cnf,重新生成api-server.pem等文件:https://coreos.com/kubernetes/docs/latest/openssl.html
96 | 1. 重启master的相关进程,包括api-server, controller-manager, scheduler, kube-proxy
97 | 1. 使用kubectl delete secret删除kube-system/default namespace下的default secret
98 | 1. 重新提交失败的service
99 |
--------------------------------------------------------------------------------
/RELEASE.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/k8sp/sextant/0f4fec9ae68aa5eba689aeb1b7584977033ab907/RELEASE.md
--------------------------------------------------------------------------------
/bsroot.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # bsroot.sh doing the preparing stage of running the sextant bootstrapper.
3 | #
4 | # Usage: bsroot.sh [\$SEXTANT_DIR/bsroot]
5 | #
6 | #
7 | # Things include:
8 | # 1. Create a "bsroot" directory, download contents that is needed:
9 | # 1) PXE images
10 | # 2) Linux images, currently CoreOS and CentOS7
11 | # 3) docker images that is needed to deploy kubernetes and ceph
12 | # 4) NVIDIA gpu drivers
13 | # 2. Compile cloud-config-server binaries in a docker container
14 | # 3. Generate configurations accroding to 'cluster-desc.yaml'
15 | # 4. Generate root CA and api-server certs under 'ssl/'.
16 | # 4. Package the bootstrapper as a docker image. Put dnsmasq, docker registry
17 | # cloud-config-server and scripts needed into one image.
18 | # ***** Important *****
19 | # bsroot.sh considers the situation of a "offline cluster", which is not able to
20 | # connect to the internet directly. So all the images/files must be prepared
21 | # under generated "./bsroot" directory. Copy this directory to the "real"
22 | # bootstrap server when the bootstrapper server is "offline". Or, you can run
23 | # bsroot.sh directly on the bootstrap server.
24 |
25 | alias cp='cp'
26 |
27 | SEXTANT_ROOT=${PWD}
28 | source $SEXTANT_ROOT/scripts/log.sh
29 | source $SEXTANT_ROOT/scripts/common.sh
30 |
31 | check_prerequisites
32 | check_cluster_desc_file
33 |
34 | echo "Install OS: ${cluster_desc_os_name}"
35 | if [[ $cluster_desc_os_name == "CentOS" ]]; then
36 | source $SEXTANT_DIR/scripts/centos.sh
37 | download_centos_images
38 | generate_pxe_centos_config
39 | generate_kickstart_config
40 | generate_post_cloudinit_script
41 | generate_rpmrepo_config
42 | if [[ $cluster_desc_set_gpu == "y" ]];then
43 | download_centos_gpu_drivers
44 | fi
45 | elif [[ $cluster_desc_os_name == "CoreOS" ]]; then
46 | source $SEXTANT_DIR/scripts/coreos.sh
47 | check_coreos_version
48 | download_pxe_images
49 | generate_pxe_config
50 | update_coreos_images
51 | generate_install_script
52 | if [[ $cluster_desc_set_gpu == "y" ]];then
53 | build_coreos_nvidia_gpu_drivers
54 | fi
55 | else
56 | echo "Unsupport OS: ${cluster_desc_os_name}"
57 | exit -1
58 | fi
59 |
60 | generate_registry_config
61 | generate_ceph_install_scripts
62 | download_k8s_images
63 | build_bootstrapper_image
64 | generate_tls_assets
65 | prepare_setup_kubectl
66 | generate_addons_config
67 | log info "bsroot done!"
68 |
--------------------------------------------------------------------------------
/bsroot_test.bats:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bats
2 |
3 | if [[ ! -f ./scripts/load_yaml.sh ]]; then
4 | echo "Please run bsroot_test.bats from the sextant directory, "
5 | echo "otherwise, bats would prevents us from finding bsroot_lib.bash"
6 | fi
7 | SEXTANT_DIR=$PWD
8 | source $SEXTANT_DIR/scripts/load_yaml.sh
9 |
10 | setup() {
11 | echo "setup ${BATS_TEST_NAME} ..." >> /tmp/bsroot_test_bats.log
12 | }
13 |
14 | teardown() {
15 | echo "teardown ${BATS_TEST_NAME} ..." >> /tmp/bsroot_test_bats.log
16 | }
17 |
18 | @test "check prerequisites" {
19 | check_prerequisites
20 | }
21 |
22 | @test "check load_yaml" {
23 | load_yaml testdata/example.yaml "bsroot_test_"
24 | [[ $bsroot_test_animal == "cat" ]]
25 | }
26 |
--------------------------------------------------------------------------------
/doc/centos-design.md:
--------------------------------------------------------------------------------
1 | # CentOS 自动安装与配置
2 |
3 | ## 目的
4 |
5 | 实现基于 CentOS 的 Kubernetes 集群及其附属组件和服务的自动安装与配置,在实现的过程中,尽量复用基于 CoreOS 安装配置的已有的脚本和功能模块。
6 |
7 | ## 方案
8 |
9 | ### 设计 Bootstrapper server 的功能
10 | 为了让机群中的机器可以全自动地安装 CentOS 和Kubernetes,并且加入Kubernete机群,我们需要将机群中一台有 static IP 的机器上,称为bootstrapper server。Bootstrapper server 具有以下功能:
11 | * 自动安装 CentOS
12 | * 自动配置 CentOS
13 | * 自动安装 Kubernetes
14 | * 为集群提供 DHCP + DNS 服务
15 | * 自动安装 GPU 驱动 (可选)
16 | * 提供 Ceph 存储服务 (可选)
17 |
18 | ### 实现 Bootstrapper server 的功能
19 |
20 | 为了方便地实现 Bootstrapper server 上述功能,我们将其封装在一个 Docker image 里,称为*bootstrapper image*。这个image在执行的时候,会挂载 Bootstrapper server 上本地文件系统里的一个目录,里面有上述各服务的配置文件,以及它们需要依赖的其他文件。为了方便,我们称这个目录为`bsroot`目录。
21 |
22 | 为了生成bsroot目录的内容,我们要运行 `bsroot_centos.sh`。这个脚本读取一个机群描述文件 `cluster-desc.yml`,分析其中信息,生成配置文件。下面分别介绍 `bsroot_centos.sh` 如何实现 Bootstrapper server 各个功能。
23 |
24 | * 自动安装 CentOS
25 |
26 | 此功能通过 *bootstrapper image* 里的 dnsmasq 服务实现
27 |
--------------------------------------------------------------------------------
/doc/sextant-design.md:
--------------------------------------------------------------------------------
1 | # Sextant
2 |
3 | Sextant是一套软件系统,简化Kubernetes机群的自动部署。Sextant之于Kubernetes类似RedHat之于Linux。
4 |
5 | ## 设计思路
6 |
7 | 为了让机群中的机器可以全自动地安装CoreOS和Kubernetes,并且加入Kubernete机群,我们需要将机群中一台有static IP的机器上运行PXE服务(DHCP+TFTP)。这台机器我们称为bootstrapper server。我们希望顺便利用这台bootstrapper server做机群中机器的IP和域名映射管理,所以需要DNS服务。以上都可以通过运行dnsmasq软件实现。此外,还需要运行我们自己开发的cloud-config-server来为机群中各个CoreOS服务器提供定制化的cloud-config文件,以及其他需要通过HTTP协议提供给机群的信息,包括CoreOS的安装镜像。
8 |
9 | 为了方便地部署上述服务,我们将其封装在一个Docker image里,称为*bootstrapper image*。这个image在执行的时候,会挂载bootstrapper server上本地文件系统里的一个目录,里面有上述各服务的配置文件,以及它们需要依赖的其他文件。为了方便,我们称这个目录为`bsroot`目录。
10 |
11 | 为了生成bsroot目录,我们要运行bsroot.sh。这个脚本读取一个机群描述文件 `cluster-desc.yml`,分析其中信息,生成配置文件。
12 |
13 | 因为bsroot目录里的很多文件需要预先下载,而下载需要翻墙,所以我们假设bsroot.sh是在一台“笔记本电脑”上执行的,这样我们可以抱着这台笔记本跑去“网吧”翻墙上网。随后,我们回到公司,把笔记本上生成的bsroot目录需要被拷贝到 bootstrapper server上,并且启动bootstrapper container。
14 |
15 | 实际上,Sextant提供一个regeression test方案,我们称为 vm-cluster。这个方案利用Vagrant创建若干台虚拟机,包括bootstrapper VM和Kubernetes机群里的服务器。在这个方案里,host(开发机)就对应上述的“笔记本”了。
16 |
17 | ## 环境需求
18 |
19 | 1. “笔记本”
20 |
21 | 1. bash:用于执行 bsroot.sh
22 | 1. Go:用于编译Sextant
23 | 1. git:被 go get 命令调用获取Sextant及其依赖
24 | 1. docker:用于docker pull各种Kubernetes机群需要的images,比如pause。
25 | 1. wget:用于下载各种文件
26 | 1. ssh/scp:
27 |
28 | 1. bootstrapper server
29 |
30 | 1. 静态IP:dnsmasq运行PXE 和 DNS service的时候需要
31 | 1. docker:执行 bootstrapper docker container
32 | 1. root权限:bootstrapper container需要以特权模式运行,比如运行docker container
33 | 1. 计划要自动安装CoreOS和kubernetes的机群机器要和bootstrapper所在的机器网络连通(2层连通)。
34 |
35 | 1. Kubernetes client
36 |
37 | 1. 运行 Linux 或者 macOS 操作系统
38 | 1. 与 bootstrapper 和 Kubernetes master 网络连通。
39 |
40 | ## 使用方法
41 |
42 |
43 | 1. 在*笔记本*或者vm-cluster的*host*上的准备工作流程如下:
44 |
45 | 1. 配置 Go 环境
46 |
47 | ```
48 | mkdir -p ~/work
49 | export GOPATH=$HOME/work
50 | ```
51 |
52 | 1. 获取Sextant并且获取其中Go程序的依赖
53 |
54 | ```
55 | go get github.com/k8sp/sextant/...
56 | ```
57 |
58 | 请注意上面命令里的省略号不可以少。
59 |
60 | 1. 编辑 `~/cluster-desc.yml` 描述即将安装的机群
61 |
62 | 1. 下载相关文件,生成 `./bsroot` 目录
63 |
64 | ```
65 | $GOPATH/src/github.com/k8sp/sextant/bsroot.sh ~/cluster-desc.yml
66 | ```
67 |
68 | 1. 把准备好的内容上传到 bootstrapper server(或者bootstrapper VM):
69 |
70 | ```
71 | scp -r ./bsroot root@bootstrapper:/
72 | ```
73 |
74 | 1. 在 bootstrapper server(或者bootstrapper VM)上执行 [`start_bootstrapper_container.sh`](https://github.com/k8sp/sextant/blob/master/start_bootstrapper_container.sh):
75 |
76 | ```
77 | host $ ssh bootstrapper
78 | bootstrapper $ sudo /root/start_bootstrapper_container.sh
79 | ```
80 |
81 | 或者
82 |
83 | ```
84 | host $ ssh root@bootstrapper -c "nohup /root/start_bootstrapper_container.sh"
85 | ```
86 |
87 | `start_bootstrapper_container.sh` 会:
88 |
89 | 1. 启动 bootstrapper service:
90 |
91 | ```
92 | host $ ssh bootstrapper
93 | bootstrapper $ docker load /bsroot/bootstrapper.tar
94 | bootstrapper $ docker run -d bootstrapper
95 | ```
96 |
97 | 1. 为了让bootstrapper service中的Docker registry service能向
98 | Kubernetes机群提供服务,还需要向其中push一些必须的images。这些
99 | images都事先由 bsroot.sh下载好,并且放进bsroot目录里了。
100 |
101 | 1. 配置 Kubernetes client
102 |
103 | 1. 下载并配置 kubectl
104 |
105 | ```bash
106 | client $ mkdir ~/work && cd ~/work
107 | client $ scp -r root@bootstrapper:/bsroot/kubectl .
108 | client $ wget https://raw.githubusercontent.com/k8sp/sextant/master/setup-kubectl.bash
109 | client $ bash ./setup-kubectl.bash ./kubectl ./kubectl
110 | ```
111 |
112 | 1. 验证
113 |
114 | ```bash
115 | client $ ./kubectl/kubectl get nodes
116 | ```
117 |
118 | ## 设计细节
119 |
120 | 1. 规划机群,并且把规划描述成[ClusterDesc配置文件](https://raw.githubusercontent.com/k8sp/sextant/master/cloud-config-server/template/cluster-desc.sample.yaml),比如如哪个机器作为master,哪些机器作为etcd机群,哪些作为worker。每台机器通过MAC地址唯一标识。
121 |
122 | 1. 管理员在一台预先规划好的的机器上,下载/上传bootstrapper的docker image,并通过docker run启动bootstrapper。启动成功后,bootstrapper会提供DHCP, DNS(服务于物理节点), PXE, tftp, cloud-config HTTP服务, CoreOS镜像自动更新服务。
123 |
124 | 1. 将机群中的其他所有节点开机,并从网络引导安装。即可完成整个机群的初始化。
125 |
126 | 1. 每启动一台新的机器(网络引导),先从DHCP获取一个IP地址,DHCP server将启动引导指向PXE server,然后由PXE server提供启动镜像(保存在tftpserver),至此,新的机器可以完成内存中的CoreOS引导,为CoreOS操作系统安装提供环境。
127 |
128 | 1. 由于PXE server配置了initrd参数,指定了install.sh的cloud-config文件(网络引导cloud-config),PXE引导启动后,将使用HTTP访问cloud-config-server,获得到这个install.sh。install.sh执行coreos-install命令,把CoreOS系统安装到当前机器并reboot。安装命令coreos-install 也可以指定一个cloud-config文件(系统安装cloud-config),这个文件是cloud-config-server自动生成生成的,这个cloud-config文件将本机安装成为对应的kubernetes机群节点(由之前的ClusterDesc指定的角色)。
129 |
130 | 1. 机器重启后,由于已经安装了系统,磁盘上有MBR,则使用磁盘引导。磁盘上的CoreOS启动后,会根据之前coreos-install指定的cloud-config文件完成配置,此时kubernetes的相关组件也完成了启动并把本机的hostname汇报给kubernetes master(hostname用mac地址生成)。
131 |
132 | 1. 网络配置统一都使用了DHCP,由dnsmasq统一管理和分配。在IP地址租期之内,DHCP会分配给本机一个相对稳定的IP地址。如果超过了租期,物理节点就会获得一个不同的IP,但由于kubernetes worker是根据mac地址生成的hostname上报给master的,之前给这个node打的标签也不会丢失。***所以在配置的时候需要着重考虑租期的配置***
133 |
134 | 1. 机群里所有服务器的CoreOS更新都是通过访问 bootstrapper 上 cloud-config-server 提供的镜像来做的。不需要访问外网。因此,如果我们希望机群更新,则需要手工从外网下载新版本CoreOS镜像,并且上传到 bootstrapper server 上的 bsroot 目录里。
135 |
136 |
137 | ## 组件功能
138 |
139 | ### dnsmasq
140 |
141 | dnsmasq在机群中提供DHCP, DNS(物理机的DNS), PXE服务。使用docker启动dnsmasq的试验方法可以参考:https://github.com/k8sp/sextant/issues/102
142 |
143 | ### cloud-config-server
144 |
145 | cloud-config-server是使用Go语言开发的一个HTTP Server,将提供安装kubernetes组件用到的需要通过HTTP访问的所有资源。包括:
146 |
147 | * install.sh, 访问url如: http://bootstrapper/install.sh
148 | * CoreOS镜像, 访问url如: http://bootstrapper/stable/1010.5.0/coreos_production_image.bin.bz2
149 | * 根据模版自动生成的cloud-config文件, 访问url如: http://bootstrapper/cloud-config/08:00:36:a7:5e:9f.yaml
150 | * 自动生成的证书, ca.pem以及为api-server, worker, client生成的证书
151 |
152 | ### docker registry
153 |
154 | 在bootstrapper所在的机器上,启动一个docker registry,这样在kubernetes master/worker启动时需要的docker镜像(hyperkube, kubelet, pause, skydns, kube2sky等)就可以不需要翻墙即可完成启动。这样的好处是:
155 |
156 | 1. 在内网可以获得最快的镜像下载和启动速度,即使翻墙,下载镜像的速度也会很慢。
157 | 1. 不需要额外搭建翻墙环境
158 |
159 | 这样,bootstrapper在编译的时候就需要下载好docker registry的镜像,kubernetes需要的镜像。启动bootstrapper的时候,先把docker registry的镜像load到docker daemon中,然后再把kubernetes用到的镜像push到启动好的registry中,并打上对应的tag(cloud-config-server生成的cloud-config文件使用的镜像的tag)
160 |
--------------------------------------------------------------------------------
/docker/.gitignore:
--------------------------------------------------------------------------------
1 | cloud-config-server
2 | addons
3 | registry
4 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM distribution/registry
2 |
3 | # Install required software packages.
4 | RUN set -ex && \
5 | apk update && \
6 | apk add dnsmasq openssl
7 |
8 | # Upload Sextant Go programs and retrieve dependencies.
9 | RUN mkdir -p /go/bin
10 | COPY cloud-config-server /go/bin
11 |
12 | # NOTICE: change install.sh HTTP server ip:port when running entrypoint.sh
13 | COPY entrypoint.sh /
14 | COPY dhcp.sh /
15 | VOLUME ["/var/lib/registry"]
16 | WORKDIR "/go"
17 | ENTRYPOINT ["/entrypoint.sh"]
18 |
--------------------------------------------------------------------------------
/docker/dhcp.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | op="${1:-op}"
4 | mac="${2:-mac}"
5 | ip="${3:-ip}"
6 | hostname="${4}"
7 |
8 | fileNameForMac () {
9 | echo $1| tr ':' '-'
10 | }
11 | filename=$( fileNameForMac $mac )
12 | filepath="/bsroot/dnsmasq/hosts.d/${filename}"
13 |
14 | if [[ $op == "add" || $op == "old" ]]
15 | then
16 | if [ -f $filepath ]
17 | then
18 | rm -f $filepath
19 | fi
20 | cat > $filepath <> $filepath
26 | fi
27 | fi
28 |
29 | if [[ $op == "del" ]]; then
30 | rm -f $filepath
31 | fi
32 |
--------------------------------------------------------------------------------
/docker/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | if [[ $# != 1 ]]; then
4 | echo "need to set start_pxe"
5 | exit 1
6 | fi
7 |
8 | if [[ $1 == "y" ]]; then
9 | # start dnsmasq
10 | mkdir -p /bsroot/dnsmasq/hosts.d
11 | dnsmasq --log-facility=- --conf-file=/bsroot/config/dnsmasq.conf \
12 | --dhcp-leasefile=/bsroot/dnsmasq/dnsmasq.leases
13 | fi
14 |
15 | # start cloud-config-server
16 | /go/bin/cloud-config-server -addr ":80" \
17 | -dir /bsroot/html/static \
18 | -cloud-config-dir /bsroot/config/templatefiles \
19 | -cluster-desc /bsroot/config/cluster-desc.yml \
20 | -ca-crt /bsroot/tls/ca.pem \
21 | -ca-key /bsroot/tls/ca-key.pem &
22 |
23 | # start registry
24 | /go/bin/registry serve /bsroot/config/registry.yml &
25 | sleep 2
26 |
27 | wait
28 |
--------------------------------------------------------------------------------
/fabric/README_CN.md:
--------------------------------------------------------------------------------
1 | # 前言
2 | Sextant设计之初考虑的是在裸机集群中一键式的解决方案。实际使用的过程中,企业内部的集群一般都有了自己的初始化安装环境,如部署了DHCP服务器,有自己的DNS,机器也有自己的hostname,机器之间通过hostname相互也能ping同。这种情况下,同时两个DHCP服务器无疑是有冲突的,需要对Sextant做一些改动以便适应这种的环境。
3 |
4 | 我们可以把Sextant PXE服务部分设置为可选项,保留资源cache服务部分。由于post_script不能通过kick start的方式启动,所以引入fabric作为集群管理者,方便安装、配置、检查、启动、关闭软件。我们写的如下的步骤,都是在考虑了企业一般的现实情况来做的。
5 |
6 | 首先,`copy host.template.yaml host.yaml`,然后修改之。
7 |
8 | ***注意:***
9 | - 符合要求的步骤可以略过
10 | - 需要已经安装centos7的基础操作系统
11 |
12 | # 步骤一:机器之间可以访问
13 | 我们需要机器都可以通过hostname来相互之间访问。如果企业的网络不支持,需要我们把静态解析写入各个节点`/etc/hosts`中(已经支持的可以忽略)。
14 |
15 | ```
16 | # get mac_ip_host
17 | fab -f get_mac_ip_host.py get_mac_addr
18 |
19 | # display all before set them
20 | fab -f set_hosts.py display
21 |
22 | # set hosts
23 | fab -f set_hosts.py set_mac_hosts
24 | ```
25 |
26 | # 步骤二:生成bsroot
27 | 注意设置cluster-desc.yaml中的`start_pxe: n`。
28 |
29 |
30 | # 步骤三:升级kernel
31 | ```
32 | fab -f upgrade_kernel.py prepare
33 | fab -f upgrade_kernel.py upgrade
34 | fab -f upgrade_kernel.py reboot
35 | ```
36 |
37 | # 步骤四:安装gpu driver
38 | ```
39 | fab -f gpu_driver.py prepare
40 | fab -f gpu_driver.py install
41 | fab -f gpu_driver.py check
42 | ```
43 |
44 | # 步骤五:安装k8s需要的软件
45 | ```
46 | fab -f k8s.py prepare
47 | fab -f k8s.py install
48 | ```
49 |
50 | # TODO: 启动etcd flannel kubelet等
--------------------------------------------------------------------------------
/fabric/get_mac_ip_host.py:
--------------------------------------------------------------------------------
1 | from __future__ import with_statement
2 | from fabric.api import *
3 | from fabric.contrib.console import confirm
4 | import fabric.operations as op
5 | import yaml
6 | import sys
7 | import re
8 |
9 | def get_mac_addr():
10 | src_path = "/etc/mac_ip_host"
11 |
12 | cmd = """ default_iface=$(awk '$2 == 00000000 { print $1 }' /proc/net/route | uniq) &&
13 | default_iface=`echo ${default_iface} | awk '{ print $1 }'` &&
14 | mac_addr=`ip addr show dev ${default_iface} | awk '$1 ~ /^link\// { print $2 }'` &&
15 | echo $mac_addr %s $HOSTNAME > %s
16 | """ % (env.host_string, src_path)
17 | run(cmd)
18 |
19 | dst_path = env.host_string + "/mac_ip_host"
20 | get(src_path, dst_path)
21 |
22 | with open("hosts.yaml", 'r') as stream:
23 | try:
24 | y = yaml.load(stream)
25 | env.hosts = y["hosts"]
26 | env.user = y["user"]
27 | env.password = y["password"]
28 | except yaml.YAMLError as exc:
29 | print(exc)
30 | abort("load yaml error")
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/fabric/gpu_driver.py:
--------------------------------------------------------------------------------
1 | from __future__ import with_statement
2 | from fabric.api import *
3 | from fabric.contrib.console import confirm
4 | import fabric.operations as op
5 | import yaml
6 | import sys
7 |
8 | driver_version=""
9 | http_gpu_dir=""
10 | boot_strapper=""
11 |
12 | def prepare():
13 | cmd = """setenforce 0
14 | && sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config
15 | && cat /etc/selinux/config | grep SELINUX"""
16 | run(cmd)
17 |
18 | def install():
19 | # Imporant: gpu must be installed after the kernel has been installed
20 | run("wget -P /root %s/build_centos_gpu_drivers.sh" % http_gpu_dir)
21 | cmd = "bash -x /root/build_centos_gpu_drivers.sh %s %s" % (driver_version, http_gpu_dir)
22 | run(cmd)
23 |
24 | #@parallel
25 | def check():
26 | cmd="ret=`nvidia-smi | grep \"Driver Version\" | grep %s` ; if [[ -z $ret ]]; then exit 1; fi " % driver_version
27 | result = run(cmd)
28 | if result.failed:
29 | abort(env.host_string + ": check failed")
30 |
31 | with open("hosts.yaml", 'r') as stream:
32 | try:
33 | y = yaml.load(stream)
34 | env.hosts = y["hosts"]
35 | env.user = y["user"]
36 | env.password = y["password"]
37 |
38 | boot_strapper = y["boot_strapper"]
39 | driver_version = y["gpu"]["driver_version"]
40 |
41 | http_gpu_dir="http://%s/static/CentOS7/gpu_drivers" % boot_strapper
42 | except yaml.YAMLError as exc:
43 | print(exc)
44 | abort("load yaml error")
45 |
46 |
47 |
--------------------------------------------------------------------------------
/fabric/hosts.template.yaml:
--------------------------------------------------------------------------------
1 | user: "root"
2 | password: "passwd"
3 | hosts:
4 | - 192.168.16.23
5 | - 192.168.16.24
6 | - 192.168.16.25
7 | - 192.168.16.26
8 | - 192.168.16.27
9 | - 192.168.16.28
10 | - 192.168.16.29
11 | kernel:
12 | old_version: "3.10.0-327.el7.x86_64"
13 | new_version: "4.4.79-1.el7.elrepo.x86_64"
14 | gpu:
15 | driver_verion: 375.26
16 | boot_strapper: "192.168.16.23"
17 |
18 | # host, mac, all
19 | set_type: host
20 |
21 | # change hostname to mac?
22 | set_mac_hostname: n
23 |
24 | # set "" if not need to change default path
25 | docker_data_path: "/home/var/lib/docker"
26 | etcd_data_path: "/home/var/lib/etcd"
27 |
--------------------------------------------------------------------------------
/fabric/k8s.py:
--------------------------------------------------------------------------------
1 | from __future__ import with_statement
2 | from fabric.api import *
3 | from fabric.contrib.console import confirm
4 | import fabric.operations as op
5 | import yaml
6 | import sys
7 | import re
8 |
9 |
10 | boot_strapper=""
11 | set_mac_hostname=""
12 | docker_data_path=""
13 | etcd_data_path=""
14 |
15 | def prepare():
16 | run("systemctl stop firewalld && systemctl disable firewalld")
17 | run("wget -O /etc/yum.repos.d/Cloud-init.repo http://%s/static/CentOS7/repo/cloud-init.repo" % boot_strapper)
18 | run("wget -O /root/post-process.sh http://%s/centos/post-script/00-00-00-00-00-00" % boot_strapper)
19 | run("wget -O /root http://%s/static/CentOS7/post_cloudinit_provision.sh" % boot_strapper)
20 |
21 | def install():
22 | run("yum --enablerepo=Cloud-init install -y cloud-init docker-engine etcd flannel")
23 | run("""cd /root
24 | && export set_mac_hostname=%s
25 | && export docker_data_path=%s
26 | && bash post-process.sh""" % (set_mac_hostname, docker_data_path))
27 |
28 | if len(etcd_data_path) > 0 :
29 | run("id -u etcd &>/dev/null || useradd etcd")
30 | run("mkdir -p %s && chown etcd -R %s" % (etcd_data_path, etcd_data_path))
31 |
32 | run(""" cd /root
33 | && export bootstrapper_ip=%s
34 | && export etcd_data_path=%s
35 | && bash post_cloudinit_provision.sh""" % (boot_strapper, etcd_data_path))
36 |
37 | def rm_clouinit_cache():
38 | run("rm -rf /var/lib/cloud/instances/iid-local01")
39 |
40 | def start_etcd():
41 | run("""systemctl daemon-reload
42 | && systemctl stop etcd
43 | && systemctl enable etcd
44 | && systemctl start etcd""")
45 |
46 | with open("hosts.yaml", 'r') as stream:
47 | try:
48 | y = yaml.load(stream)
49 | env.user = y["user"]
50 | env.password = y["password"]
51 | env.hosts = y["hosts"]
52 | boot_strapper = y["boot_strapper"]
53 |
54 | set_mac_hostname = y["set_mac_hostname"]
55 | docker_data_path = y["docker_data_path"]
56 | etcd_data_path = y["etcd_data_path"]
57 | except yaml.YAMLError as exc:
58 | print(exc)
59 | abort("load yaml error")
60 |
61 |
62 |
63 |
64 |
--------------------------------------------------------------------------------
/fabric/set_hosts.py:
--------------------------------------------------------------------------------
1 | from __future__ import with_statement
2 | from fabric.api import *
3 | from fabric.contrib.console import confirm
4 | import fabric.operations as op
5 | import yaml
6 | import sys
7 | import re
8 |
9 | mac_ip={}
10 | host_ip={}
11 | set_type=""
12 |
13 | def modify_mac_hosts(path, ips):
14 | import copy
15 | local = copy.deepcopy(ips)
16 |
17 | #hostname->ip
18 | hosts = []
19 | with open(path, "r") as fp:
20 | for line in fp.read().split('\n'):
21 | if len(re.sub('\s*', '', line)) and not line.startswith('#'):
22 | parts = re.split('\s+', line)
23 | ip = parts[0]
24 | host_name = " ".join(parts[1:])
25 | hosts.append([host_name, ip])
26 | fp.close()
27 |
28 | for n in hosts:
29 | if n[0] in local:
30 | n[1] = local[n[0]]
31 | local[n[0]]= ""
32 |
33 | with open(path, "w") as fw:
34 | for n in hosts:
35 | fw.write("%s %s\n" % (n[1], n[0]) )
36 | for n in local:
37 | if len(local[n]) > 0:
38 | fw.write("%s %s\n" % (local[n], n) )
39 | fw.close()
40 |
41 | def set_mac_hosts():
42 | src_path = "/etc/hosts"
43 | dst_path = env.host_string + "/hosts"
44 | get(src_path)
45 | if set_type == "mac" or set_type == "all":
46 | modify_mac_hosts(dst_path, mac_ip)
47 | if set_type == "host" or set_type == "all":
48 | modify_mac_hosts(dst_path, host_ip)
49 | put(dst_path, src_path)
50 |
51 | def display():
52 | print host_ip
53 | print mac_ip
54 |
55 | with open("hosts.yaml", 'r') as stream:
56 | try:
57 | y = yaml.load(stream)
58 | env.hosts = y["hosts"]
59 | env.user = y["user"]
60 | env.password = y["password"]
61 |
62 | set_type = y["set_type"]
63 | except yaml.YAMLError as exc:
64 | print(exc)
65 | abort("load yaml error")
66 |
67 | for h in env.hosts:
68 | dst_path = h + "/mac_ip_host"
69 | with open(dst_path, "r") as fp:
70 | for line in fp.read().split('\n'):
71 | if len(re.sub('\s*', '', line)) and not line.startswith('#'):
72 | parts = re.split('\s+', line)
73 | mac = parts[0].replace(":", "-")
74 | ip = parts[1]
75 | host_name = parts[2]
76 |
77 | mac_ip[mac] = ip
78 | host_ip[host_name] = ip
79 |
80 |
--------------------------------------------------------------------------------
/fabric/upgrade_kernel.py:
--------------------------------------------------------------------------------
1 | from __future__ import with_statement
2 | from fabric.api import *
3 | from fabric.contrib.console import confirm
4 | import fabric.operations as op
5 |
6 | new_kernel_version=""
7 | old_kernel_version=""
8 | boot_strapper=""
9 |
10 | def prepare():
11 | run("sed -i '/exclude=*/ s/^/#/' /etc/yum.conf")
12 |
13 | def post():
14 | run("sed -i -e '/exclude=*kernel*/ s/^#//' /etc/yum.conf")
15 |
16 | @parallel
17 | def upgrade():
18 | put("./upgrade_kernel.sh", "/tmp/upgrade_kernel.sh")
19 | result = run("bash /tmp/upgrade_kernel.sh %s" % boot_strapper)
20 | if result.failed:
21 | abort("failed")
22 | run("grub2-set-default \"CentOS Linux (%s) 7 (Core)\"" % new_kernel_version)
23 |
24 | def reset():
25 | cmd = "grub2-set-default \"CentOS Linux (%s) 7 (Core)\"" % old_kernel_version
26 | run(cmd)
27 |
28 | def check():
29 | cmd = "if [[ ! -d /usr/src/kernels/%s ]]; then exit 1; fi" % new_kernel_version
30 | result = run(cmd)
31 | if result.failed:
32 | abort(env.host_string + " check failed")
33 |
34 | start = "saved_entry=CentOS Linux (%s) 7 (Core)" % new_kernel_version
35 | cmd = "if [[ \"$(grub2-editenv list)\" != \"%s\" ]]; then exit 1; fi" % start
36 | result = run(cmd)
37 | if result.failed:
38 | abort(env.host_string + " check failed")
39 |
40 | @parallel
41 | def reboot():
42 | run("systemctl set-default multi-user.target && reboot")
43 |
44 | def display():
45 | run("uname -a")
46 |
47 | import yaml
48 |
49 | with open("hosts.yaml", 'r') as stream:
50 | try:
51 | y = yaml.load(stream)
52 | env.hosts = y["hosts"]
53 | env.user = y["user"]
54 | env.password = y["password"]
55 |
56 | new_kernel_version=y["kernel"]["new_version"]
57 | old_kernel_version=y["kernel"]["old_version"]
58 | boot_strapper = y["boot_strapper"]
59 |
60 | #print new_kernel_version, old_kernel_version
61 | print "grub2-set-default \"CentOS Linux (%s) 7 (Core)\"" % new_kernel_version
62 | except yaml.YAMLError as exc:
63 | print(exc)
64 | abort("load yaml error")
65 |
66 |
--------------------------------------------------------------------------------
/fabric/upgrade_kernel.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #export https_proxy=192.168.16.30:3128
3 | #export http_proxy=192.168.16.30:3128
4 |
5 | if [[ $# > 0 ]]; then
6 | wget -O /etc/yum.repos.d/Cloud-init.repo http://$1/static/CentOS7/repo/cloud-init.repo
7 | yum --enablerepo=Cloud-init -y -d1 install kernel-lt kernel-lt-devel
8 | else
9 | rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
10 | rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
11 | yum --enablerepo=elrepo-kernel -y -d1 install kernel-lt kernel-lt-devel
12 | fi
13 |
14 | if [[ ! -f /boot/grub2/grub.cfg ]]; then
15 | grub2-mkconfig --output=/boot/grub2/grub.cfg
16 | fi
17 |
--------------------------------------------------------------------------------
/golang/addons/README.md:
--------------------------------------------------------------------------------
1 | #### Sextant add-ons
2 | [Kubernetes add-ons](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 是一组Replication Controllers或者Services,作为Kubernetes集群的一部分而存在的,例如 [skydns](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns),[ingress](https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx) 等都属于add-ons的一部分。
3 |
4 | Sextant addons模块会根据集群的 *cluster-desc.yaml* 配置文件以及相应add-on的配置模板,生成对应的add-on配置。
5 |
6 | ```
7 | addone \
8 | --cluster-desc-file {cluster-desc.yaml} \
9 | --template-file {add-on template file}
10 | --config-file {add-on config file}
11 | ```
12 |
--------------------------------------------------------------------------------
/golang/addons/addons.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "html/template"
6 | "io"
7 | "io/ioutil"
8 | "strings"
9 |
10 | "github.com/k8sp/sextant/golang/clusterdesc"
11 | "github.com/topicai/candy"
12 | yaml "gopkg.in/yaml.v2"
13 | )
14 |
15 | type addonsConfig struct {
16 | Bootstrapper string
17 | DomainName string
18 | IPLow string
19 | IPHigh string
20 | Netmask string
21 | Routers []string
22 | NameServers []string
23 | UpstreamNameServers []string
24 | Broadcast string
25 | IngressReplicas int
26 | Dockerdomain string
27 | K8sClusterDNS string
28 | EtcdEndpoint string
29 | Images map[string]string
30 | IngressHostNetwork bool
31 | MasterHostname string
32 | SetNTP bool
33 | DNSMASQLease string
34 | }
35 |
36 | func execute(templateFile string, config *clusterdesc.Cluster, w io.Writer) {
37 | d, e := ioutil.ReadFile(templateFile)
38 | candy.Must(e)
39 |
40 | tmpl := template.Must(template.New("").Parse(string(d)))
41 |
42 | ac := addonsConfig{
43 | Bootstrapper: config.Bootstrapper,
44 | DomainName: config.DomainName,
45 | IPLow: config.IPLow,
46 | IPHigh: config.IPHigh,
47 | Netmask: config.Netmask,
48 | Routers: config.Routers,
49 | NameServers: config.Nameservers,
50 | UpstreamNameServers: config.UpstreamNameServers,
51 | Broadcast: config.Broadcast,
52 | IngressReplicas: config.GetIngressReplicas(),
53 | Dockerdomain: config.Dockerdomain,
54 | K8sClusterDNS: config.K8sClusterDNS,
55 | EtcdEndpoint: strings.Split(config.GetEtcdEndpoints(), ",")[0],
56 | Images: config.Images,
57 | IngressHostNetwork: config.IngressHostNetwork,
58 | MasterHostname: config.GetMasterHostname(),
59 | SetNTP: config.DNSMASQSetNTP,
60 | DNSMASQLease: config.DNSMASQLease,
61 | }
62 |
63 | candy.Must(tmpl.Execute(w, ac))
64 | }
65 |
66 | func main() {
67 | clusterDescFile := flag.String("cluster-desc-file", "./cluster-desc.yml", "Local copy of cluster description YAML file.")
68 | templateFile := flag.String("template-file", "./ingress.template", "config file template.")
69 | configFile := flag.String("config-file", "./ingress.yaml", "config file with yaml")
70 | flag.Parse()
71 |
72 | d, e := ioutil.ReadFile(*clusterDescFile)
73 | candy.Must(e)
74 |
75 | c := &clusterdesc.Cluster{
76 | DNSMASQSetNTP: false,
77 | DNSMASQLease: "24h",
78 | }
79 | candy.Must(yaml.Unmarshal(d, c))
80 | candy.WithCreated(*configFile, func(w io.Writer) { execute(*templateFile, c, w) })
81 | }
82 |
--------------------------------------------------------------------------------
/golang/addons/addons_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "io"
6 | "io/ioutil"
7 | "testing"
8 |
9 | tpcfg "github.com/k8sp/sextant/golang/clusterdesc"
10 | "github.com/stretchr/testify/assert"
11 | "github.com/topicai/candy"
12 | yaml "gopkg.in/yaml.v2"
13 | )
14 |
15 | func TestExecute(t *testing.T) {
16 | config := candy.WithOpened("../template/cluster-desc.sample.yaml", func(r io.Reader) interface{} {
17 | b, e := ioutil.ReadAll(r)
18 | candy.Must(e)
19 |
20 | c := &tpcfg.Cluster{}
21 | assert.Nil(t, yaml.Unmarshal(b, &c))
22 | return c
23 | }).(*tpcfg.Cluster)
24 |
25 | var ccTmpl bytes.Buffer
26 | execute("./template/ingress.template", config, &ccTmpl)
27 | yml := make(map[interface{}]interface{})
28 | candy.Must(yaml.Unmarshal(ccTmpl.Bytes(), yml))
29 |
30 | initialEtcdCluster := yml["metadata"].(map[interface{}]interface{})["name"]
31 | assert.Equal(t, initialEtcdCluster, "nginx-ingress-controller-v1")
32 | }
33 |
--------------------------------------------------------------------------------
/golang/addons/template/dashboard-controller.template:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: kubernetes-dashboard
5 | namespace: kube-system
6 | labels:
7 | k8s-app: kubernetes-dashboard
8 | kubernetes.io/cluster-service: "true"
9 | addonmanager.kubernetes.io/mode: Reconcile
10 | spec:
11 | selector:
12 | matchLabels:
13 | k8s-app: kubernetes-dashboard
14 | template:
15 | metadata:
16 | labels:
17 | k8s-app: kubernetes-dashboard
18 | annotations:
19 | scheduler.alpha.kubernetes.io/critical-pod: ''
20 | spec:
21 | containers:
22 | - name: kubernetes-dashboard
23 | image: {{ .Dockerdomain }}:5000/{{ .Images.dashboard }}
24 | resources:
25 | # keep request = limit to keep this container in guaranteed class
26 | limits:
27 | cpu: 100m
28 | memory: 50Mi
29 | requests:
30 | cpu: 100m
31 | memory: 50Mi
32 | ports:
33 | - containerPort: 9090
34 | livenessProbe:
35 | httpGet:
36 | path: /
37 | port: 9090
38 | initialDelaySeconds: 30
39 | timeoutSeconds: 30
40 | tolerations:
41 | - key: "CriticalAddonsOnly"
42 | operator: "Exists"
43 |
--------------------------------------------------------------------------------
/golang/addons/template/dashboard-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: kubernetes-dashboard
5 | namespace: kube-system
6 | labels:
7 | k8s-app: kubernetes-dashboard
8 | kubernetes.io/cluster-service: "true"
9 | addonmanager.kubernetes.io/mode: Reconcile
10 | spec:
11 | # In a production setup, we recommend accessing Grafana through an external Loadbalancer
12 | # or through a public IP.
13 | type: NodePort
14 | selector:
15 | k8s-app: kubernetes-dashboard
16 | ports:
17 | - port: 80
18 | targetPort: 9090
19 | nodePort: 32018
20 |
--------------------------------------------------------------------------------
/golang/addons/template/default-backend-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: default-http-backend
5 | labels:
6 | name: default-http-backend
7 | kubernetes.io/cluster-service: "true"
8 | namespace: kube-system
9 | spec:
10 | selector:
11 | app: default-http-backend
12 | ports:
13 | - port: 8080
14 | targetPort: 8080
15 | protocol: TCP
16 | name: default-http-backend
17 |
18 |
--------------------------------------------------------------------------------
/golang/addons/template/default-backend.template:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: default-http-backend-v1
5 | namespace: kube-system
6 | labels:
7 | kubernetes.io/cluster-service: "true"
8 | app: default-http-backend
9 | spec:
10 | replicas: 1
11 | template:
12 | metadata:
13 | labels:
14 | app: default-http-backend
15 | spec:
16 | terminationGracePeriodSeconds: 60
17 | containers:
18 | - name: default-http-backend
19 | # Any image is permissable as long as:
20 | # 1. It serves a 404 page at /
21 | # 2. It serves 200 on a /healthz endpoint
22 | image: {{ .Dockerdomain }}:5000/yancey1989/defaultbackend:1.0
23 | livenessProbe:
24 | httpGet:
25 | path: /healthz
26 | port: 8080
27 | scheme: HTTP
28 | initialDelaySeconds: 30
29 | timeoutSeconds: 5
30 | ports:
31 | - containerPort: 8080
32 | resources:
33 | limits:
34 | cpu: 10m
35 | memory: 20Mi
36 | requests:
37 | cpu: 10m
38 | memory: 20Mi
39 |
--------------------------------------------------------------------------------
/golang/addons/template/dnsmasq.conf.template:
--------------------------------------------------------------------------------
1 | domain={{ .DomainName }}
2 | user=root
3 | dhcp-range={{ .IPLow }},{{ .IPHigh }},{{ .Netmask }},{{ .DNSMASQLease }}
4 | log-dhcp
5 |
6 | {{- if .SetNTP }}
7 | dhcp-option=option:ntp-server,{{ .Bootstrapper }}
8 | {{- end }}
9 |
10 | dhcp-option=28,{{ .Broadcast }}
11 | dhcp-option=3,{{range $index, $ele := .Routers}}{{if $index}},{{end}}{{$ele}}{{end}}
12 | dhcp-option=6,{{range $index, $ele := .NameServers}}{{if $index}},{{end}}{{$ele}}{{end}}
13 |
14 | no-hosts
15 | expand-hosts
16 | addn-hosts=/bsroot/config/dnsmasq.hosts
17 | no-resolv
18 | {{range $index, $ele := .UpstreamNameServers}}
19 | server={{$ele}}
20 | {{end}}
21 |
22 | local=/{{ .DomainName }}/
23 | domain-needed
24 |
25 |
26 | dhcp-boot=pxelinux.0
27 | pxe-prompt="Press F8 for menu.", 5
28 | pxe-service=x86PC, "Install CoreOS from network server", pxelinux
29 | enable-tftp
30 | tftp-root=/bsroot/tftpboot
31 | dhcp-script=/dhcp.sh
32 | hostsdir=/bsroot/dnsmasq/hosts.d
33 |
--------------------------------------------------------------------------------
/golang/addons/template/grafana-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | kubernetes.io/cluster-service: 'true'
6 | kubernetes.io/name: monitoring-grafana
7 | name: monitoring-grafana
8 | namespace: kube-system
9 | spec:
10 | # In a production setup, we recommend accessing Grafana through an external Loadbalancer
11 | # or through a public IP.
12 | type: NodePort
13 | ports:
14 | - port: 80
15 | targetPort: 3000
16 | nodePort: 32017
17 | selector:
18 | name: influxGrafana
19 |
--------------------------------------------------------------------------------
/golang/addons/template/heapster-controller.template:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ReplicationController
3 | metadata:
4 | labels:
5 | k8s-app: heapster
6 | name: heapster
7 | version: v6
8 | kubernetes.io/cluster-service: "true"
9 | name: heapster
10 | namespace: kube-system
11 | spec:
12 | replicas: 1
13 | selector:
14 | k8s-app: heapster
15 | version: v6
16 | template:
17 | metadata:
18 | labels:
19 | k8s-app: heapster
20 | version: v6
21 | spec:
22 | containers:
23 | - name: heapster
24 | image: {{ .Dockerdomain }}:5000/{{ .Images.heapster }}
25 | imagePullPolicy: Always
26 | command:
27 | - /heapster
28 | - --source=kubernetes:https://kubernetes.default
29 | - --sink=influxdb:http://monitoring-influxdb:8086
30 |
--------------------------------------------------------------------------------
/golang/addons/template/heapster-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | kubernetes.io/cluster-service: 'true'
6 | kubernetes.io/name: Heapster
7 | name: heapster
8 | namespace: kube-system
9 | spec:
10 | ports:
11 | - port: 80
12 | targetPort: 8082
13 | selector:
14 | k8s-app: heapster
15 |
--------------------------------------------------------------------------------
/golang/addons/template/influxdb-grafana-controller.template:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ReplicationController
3 | metadata:
4 | labels:
5 | name: influxGrafana
6 | kubernetes.io/cluster-service: "true"
7 | name: influxdb-grafana
8 | namespace: kube-system
9 | spec:
10 | replicas: 1
11 | selector:
12 | name: influxGrafana
13 | template:
14 | metadata:
15 | labels:
16 | name: influxGrafana
17 | spec:
18 | containers:
19 | - name: influxdb
20 | image: {{ .Dockerdomain }}:5000/{{ .Images.influxdb }}
21 | volumeMounts:
22 | - mountPath: /data
23 | name: influxdb-storage
24 | - name: grafana
25 | image: {{ .Dockerdomain }}:5000/{{ .Images.grafana }}
26 | env:
27 | - name: INFLUXDB_SERVICE_URL
28 | value: http://monitoring-influxdb:8086
29 | # The following env variables are required to make Grafana accessible via
30 | # the kubernetes api-server proxy. On production clusters, we recommend
31 | # removing these env variables, setup auth for grafana, and expose the grafana
32 | # service using a LoadBalancer or a public IP.
33 | - name: GF_AUTH_BASIC_ENABLED
34 | value: "false"
35 | - name: GF_AUTH_ANONYMOUS_ENABLED
36 | value: "true"
37 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE
38 | value: Admin
39 | - name: GF_SERVER_ROOT_URL
40 | value: /
41 | volumeMounts:
42 | - mountPath: /var
43 | name: grafana-storage
44 | volumes:
45 | - name: influxdb-storage
46 | emptyDir: {}
47 | - name: grafana-storage
48 | emptyDir: {}
49 |
--------------------------------------------------------------------------------
/golang/addons/template/influxdb-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | kubernetes.io/cluster-service: "true"
6 | name: monitoring-influxdb
7 | namespace: kube-system
8 | spec:
9 | ports:
10 | - name: http
11 | port: 8083
12 | targetPort: 8083
13 | - name: api
14 | port: 8086
15 | targetPort: 8086
16 | selector:
17 | name: influxGrafana
18 |
--------------------------------------------------------------------------------
/golang/addons/template/ingress.template:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-ingress-controller-v1
5 | namespace: kube-system
6 | labels:
7 | k8s-app: nginx-ingress-lb
8 | version: v1
9 | kubernetes.io/cluster-service: "true"
10 | spec:
11 | replicas: {{ .IngressReplicas }}
12 | template:
13 | metadata:
14 | labels:
15 | k8s-app: nginx-ingress-lb
16 | name: nginx-ingress-lb
17 | spec:
18 | terminationGracePeriodSeconds: 60
19 | {{- if .IngressHostNetwork }}
20 | hostNetwork: true
21 | {{- end }}
22 | containers:
23 | - image: {{ .Dockerdomain }}:5000/{{ .Images.ingress }}
24 | name: nginx-ingress-lb
25 | imagePullPolicy: Always
26 | livenessProbe:
27 | httpGet:
28 | path: /healthz
29 | port: 10254
30 | scheme: HTTP
31 | initialDelaySeconds: 30
32 | timeoutSeconds: 5
33 | # use downward API
34 | env:
35 | - name: POD_NAME
36 | valueFrom:
37 | fieldRef:
38 | fieldPath: metadata.name
39 | - name: POD_NAMESPACE
40 | valueFrom:
41 | fieldRef:
42 | fieldPath: metadata.namespace
43 | {{- if .IngressHostNetwork }}
44 | - name: KUBERNETES_SERVICE_HOST
45 | value: {{ .MasterHostname }}
46 | {{- end }}
47 | ports:
48 | - containerPort: 80
49 | hostPort: 80
50 | - containerPort: 443
51 | hostPort: 443
52 | args:
53 | - /nginx-ingress-controller
54 | - --default-backend-service=kube-system/default-http-backend
55 | nodeSelector:
56 | role: ingress
57 |
--------------------------------------------------------------------------------
/golang/addons/template/kubedns-controller.template:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ReplicationController
3 | metadata:
4 | name: kube-dns-v20
5 | namespace: kube-system
6 | labels:
7 | k8s-app: kube-dns
8 | version: v20
9 | kubernetes.io/cluster-service: "true"
10 | spec:
11 | replicas: 1
12 | selector:
13 | k8s-app: kube-dns
14 | version: v20
15 | template:
16 | metadata:
17 | labels:
18 | k8s-app: kube-dns
19 | version: v20
20 | annotations:
21 | scheduler.alpha.kubernetes.io/critical-pod: ''
22 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
23 | spec:
24 | containers:
25 | - name: kubedns
26 | image: {{ .Dockerdomain }}:5000/{{ .Images.kubedns }}
27 | resources:
28 | limits:
29 | memory: 170Mi
30 | requests:
31 | cpu: 100m
32 | memory: 70Mi
33 | livenessProbe:
34 | httpGet:
35 | path: /healthz-kubedns
36 | port: 8080
37 | scheme: HTTP
38 | initialDelaySeconds: 60
39 | timeoutSeconds: 5
40 | successThreshold: 1
41 | failureThreshold: 5
42 | readinessProbe:
43 | httpGet:
44 | path: /readiness
45 | port: 8081
46 | scheme: HTTP
47 | initialDelaySeconds: 3
48 | timeoutSeconds: 5
49 | args:
50 | - --domain=cluster.local.
51 | - --dns-port=10053
52 | ports:
53 | - containerPort: 10053
54 | name: dns-local
55 | protocol: UDP
56 | - containerPort: 10053
57 | name: dns-tcp-local
58 | protocol: TCP
59 | - name: dnsmasq
60 | image: {{ .Dockerdomain }}:5000/{{ .Images.kubednsmasq }}
61 | livenessProbe:
62 | httpGet:
63 | path: /healthz-dnsmasq
64 | port: 8080
65 | scheme: HTTP
66 | initialDelaySeconds: 60
67 | timeoutSeconds: 5
68 | successThreshold: 1
69 | failureThreshold: 5
70 | args:
71 | - --cache-size=1000
72 | - --no-resolv
73 | - --server=127.0.0.1#10053
74 | - --log-facility=-
75 | ports:
76 | - containerPort: 53
77 | name: dns
78 | protocol: UDP
79 | - containerPort: 53
80 | name: dns-tcp
81 | protocol: TCP
82 | - name: healthz
83 | image: {{ .Dockerdomain }}:5000/{{ .Images.healthz }}
84 | resources:
85 | limits:
86 | memory: 50Mi
87 | requests:
88 | cpu: 10m
89 | memory: 50Mi
90 | args:
91 | - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
92 | - --url=/healthz-dnsmasq
93 | - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null
94 | - --url=/healthz-kubedns
95 | - --port=8080
96 | - --quiet
97 | ports:
98 | - containerPort: 8080
99 | protocol: TCP
100 | dnsPolicy: Default
101 |
--------------------------------------------------------------------------------
/golang/addons/template/kubedns-svc.template:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: kube-dns
5 | namespace: kube-system
6 | labels:
7 | k8s-app: kube-dns
8 | kubernetes.io/cluster-service: "true"
9 | kubernetes.io/name: "KubeDNS"
10 | spec:
11 | selector:
12 | k8s-app: kube-dns
13 | clusterIP: {{ .K8sClusterDNS }}
14 | ports:
15 | - name: dns
16 | port: 53
17 | protocol: UDP
18 | - name: dns-tcp
19 | port: 53
20 | protocol: TCP
21 |
--------------------------------------------------------------------------------
/golang/certgen/certgen.go:
--------------------------------------------------------------------------------
1 | package certgen
2 |
3 | import (
4 | "html/template"
5 | "io"
6 | "io/ioutil"
7 | "log"
8 | "os"
9 | "path"
10 |
11 | "github.com/topicai/candy"
12 | )
13 |
14 | const (
15 | masterOpenSSLConfTmpl = `[req]
16 | req_extensions = v3_req
17 | distinguished_name = req_distinguished_name
18 | [req_distinguished_name]
19 | [ v3_req ]
20 | basicConstraints = CA:FALSE
21 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment
22 | subjectAltName = @alt_names
23 | [alt_names]
24 | DNS.1 = kubernetes
25 | DNS.2 = kubernetes.default
26 | DNS.3 = kubernetes.default.svc
27 | DNS.4 = kubernetes.default.svc.cluster.local
28 | DNS.5 = {{ .HostName }}
29 | {{ range $index, $element := .KubeMasterDNS }}
30 | DNS.{{ add $index 6 }} = {{ $element }}
31 | {{ end }}
32 | {{ range $index, $element := .KubeMasterIP }}
33 | IP.{{ add $index 1}} = {{ $element }}
34 | {{ end }}
35 | `
36 |
37 | workerOpenSSLConfTmpl = `[req]
38 | req_extensions = v3_req
39 | distinguished_name = req_distinguished_name
40 | [req_distinguished_name]
41 | [ v3_req ]
42 | basicConstraints = CA:FALSE
43 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment
44 | subjectAltName = @alt_names
45 | [alt_names]
46 | DNS.1 = {{ .HostName }}
47 | `
48 | )
49 |
50 | var funcMap = template.FuncMap{
51 | "add": add,
52 | }
53 |
54 | func add(a, b int) int {
55 | return (a + b)
56 | }
57 |
58 | // Execution struct config opendssl.conf
59 | type Execution struct {
60 | HostName string
61 | KubeMasterIP []string
62 | KubeMasterDNS []string
63 | }
64 |
65 | // GenerateRootCA generate ca.key and ca.crt depending on out path
66 | func GenerateRootCA(out string) (string, string) {
67 | caKey := path.Join(out, "ca.key")
68 | caCrt := path.Join(out, "ca.crt")
69 | GenerateCA(caKey, caCrt)
70 |
71 | return caKey, caCrt
72 | }
73 |
74 | func GenerateCA(caKey string, caCrt string) error {
75 | Run("openssl", "genrsa", "-out", caKey, "2048")
76 | Run("openssl", "req", "-x509", "-new", "-nodes", "-key", caKey, "-days", "10000", "-out", caCrt, "-subj", "/CN=kube-ca")
77 |
78 | return nil
79 | }
80 |
81 | func openSSLCnfTmpl(master bool) *template.Template {
82 | if master == true {
83 | return template.Must(template.New("").Funcs(funcMap).Parse(masterOpenSSLConfTmpl))
84 | }
85 | return template.Must(template.New("").Funcs(funcMap).Parse(workerOpenSSLConfTmpl))
86 | }
87 |
88 | // Gen generates and returns the TLS certse. It panics for errors.
89 | func Gen(master bool, hostname, caKey, caCrt string, kubeMasterIP, kubeMasterDNS []string) ([]byte, []byte) {
90 | out, e := ioutil.TempDir("", "")
91 | candy.Must(e)
92 | defer func() {
93 | if e = os.RemoveAll(out); e != nil {
94 | log.Printf("Generator.Gen failed deleting %s", out)
95 | }
96 | }()
97 |
98 | cnf := path.Join(out, "openssl.cnf")
99 | key := path.Join(out, "key.pem")
100 | csr := path.Join(out, "csr.pem")
101 | crt := path.Join(out, "crt.pem")
102 |
103 | ec := Execution{
104 | HostName: hostname,
105 | KubeMasterIP: kubeMasterIP,
106 | KubeMasterDNS: kubeMasterDNS,
107 | }
108 |
109 | candy.WithCreated(cnf, func(w io.Writer) {
110 | candy.Must(openSSLCnfTmpl(master).Execute(w, ec))
111 | })
112 | subj := "/CN=" + hostname
113 | if master == true {
114 | subj = "/CN=kube-apiserver"
115 | }
116 | d, _ := ioutil.ReadFile(cnf)
117 | log.Print(string(d))
118 | Run("openssl", "genrsa", "-out", key, "2048")
119 | Run("openssl", "req", "-new", "-key", key, "-out", csr, "-subj", subj, "-config", cnf)
120 | Run("openssl", "x509", "-req", "-in", csr, "-CA", caCrt, "-CAkey", caKey, "-CAcreateserial", "-out", crt, "-days", "365", "-extensions", "v3_req", "-extfile", cnf)
121 |
122 | k, e := ioutil.ReadFile(key)
123 | candy.Must(e)
124 | c, e := ioutil.ReadFile(crt)
125 | candy.Must(e)
126 | return k, c
127 | }
128 |
--------------------------------------------------------------------------------
/golang/certgen/certgen_test.go:
--------------------------------------------------------------------------------
1 | package certgen
2 |
3 | import (
4 | "io/ioutil"
5 | "log"
6 | "os"
7 | "strings"
8 | "testing"
9 |
10 | "github.com/stretchr/testify/assert"
11 | "github.com/topicai/candy"
12 | )
13 |
14 | func TestGen(t *testing.T) {
15 | out, e := ioutil.TempDir("", "")
16 | candy.Must(e)
17 | defer func() {
18 | if e = os.RemoveAll(out); e != nil {
19 | log.Printf("Generator.Gen failed deleting %s", out)
20 | }
21 | }()
22 | caKey, caCrt := GenerateRootCA(out)
23 |
24 | kubeMasterIP := []string{"10.100.0.1", "192.168.100.1", "192.168.100.2"}
25 | kubeMasterDNS := []string{"aa-bb-cc-dd-ee", "xx-yy.abc.com"}
26 |
27 | key, crt := Gen(true, "10.10.10.201", caKey, caCrt, kubeMasterIP, kubeMasterDNS)
28 |
29 | assert.True(t, strings.HasPrefix(string(key), "-----BEGIN RSA PRIVATE KEY-----"))
30 | assert.True(t, strings.HasSuffix(string(key), "-----END RSA PRIVATE KEY-----\n"))
31 |
32 | assert.True(t, strings.HasPrefix(string(crt), "-----BEGIN CERTIFICATE-----"))
33 | assert.True(t, strings.HasSuffix(string(crt), "-----END CERTIFICATE-----\n"))
34 | }
35 |
--------------------------------------------------------------------------------
/golang/certgen/cmd.go:
--------------------------------------------------------------------------------
1 | package certgen
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "log"
7 | "os"
8 | "os/exec"
9 | "strings"
10 | )
11 |
12 | var (
13 | Silent = flag.Bool("cmd.silent", false, "If cmd.Run displays outputs of commands")
14 | )
15 |
16 | // Run runs a command that doesn't need any input from the stdin. It
17 | // log.Panic the stdout and stderr of the command, only if the
18 | // execution goes wrong.
19 | func Run(name string, arg ...string) {
20 | run(true, nil, name, arg)
21 | }
22 |
23 | // Try works like Run, but doesn't panic if the commands returns a
24 | // non-zero value.
25 | func Try(name string, arg ...string) {
26 | run(false, nil, name, arg)
27 | }
28 |
29 | // RunWithEnv works like Run, but allows the user to set the environment
30 | // variables, in addition to those inherited from the parent process.
31 | func RunWithEnv(env map[string]string, name string, arg ...string) {
32 | run(true, env, name, arg)
33 | }
34 |
35 | // TryWithEnv works like Try, but allows the user to set the environment
36 | // variables, in addition to those inherited from the parent process.
37 | func TryWithEnv(env map[string]string, name string, arg ...string) {
38 | run(false, env, name, arg)
39 | }
40 |
41 | func run(panic bool, env map[string]string, name string, arg []string) {
42 | log.Printf("Running %s %s ...", name, strings.Join(arg, " "))
43 | cmd := exec.Command(name, arg...)
44 |
45 | // Inherit environ from the parent process. Note that, instead
46 | // of appending env to cmd.Env, we rewrite the value of an
47 | // environment varaible in cmd.Env if it is in env. This
48 | // prevents from cases like two GOPATH variables in cmd.Env.
49 | for _, en := range os.Environ() {
50 | kv := strings.Split(en, "=")
51 | if v := env[kv[0]]; v != "" {
52 | en = fmt.Sprintf("%s=%s", kv[0], v)
53 | delete(env, kv[0])
54 | }
55 | cmd.Env = append(cmd.Env, en)
56 | }
57 |
58 | for k, v := range env {
59 | cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v))
60 | }
61 |
62 | p := log.Printf
63 | if panic {
64 | p = log.Panicf
65 | }
66 |
67 | if *Silent {
68 | b, e := cmd.CombinedOutput()
69 | if e != nil {
70 | p("Command \"%s %s\" error: %v\nwith output:\n%s", name, strings.Join(arg, " "), e, string(b))
71 | }
72 | } else {
73 | cmd.Stdout = os.Stdout
74 | cmd.Stderr = os.Stderr
75 | if e := cmd.Run(); e != nil {
76 | p("Command \"%s %s\" error: %v", name, strings.Join(arg, " "), e)
77 | }
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/golang/certgen/cmd_test.go:
--------------------------------------------------------------------------------
1 | package certgen
2 |
3 | import (
4 | "fmt"
5 | "io/ioutil"
6 | "path"
7 | "testing"
8 |
9 | "github.com/stretchr/testify/assert"
10 | )
11 |
12 | func TestRunAndTry(t *testing.T) {
13 | *Silent = true
14 | assert.NotPanics(t, func() { Run("ls", "/") })
15 | assert.Panics(t, func() { Run("something-not-exists") })
16 | assert.NotPanics(t, func() { Try("something-not-exists") })
17 | }
18 |
19 | func TestRunWithEnv(t *testing.T) {
20 | tmpdir, _ := ioutil.TempDir("", "")
21 | tmpfile := path.Join(tmpdir, "TestRunWithEnv")
22 |
23 | RunWithEnv(map[string]string{"GOPATH": "/tmp"},
24 | "awk",
25 | fmt.Sprintf("BEGIN{print ENVIRON[\"GOPATH\"] > \"%s\";}", tmpfile))
26 |
27 | b, _ := ioutil.ReadFile(tmpfile)
28 | assert.Equal(t, "/tmp\n", string(b))
29 | }
30 |
--------------------------------------------------------------------------------
/golang/cloud-config-server/README.md:
--------------------------------------------------------------------------------
1 | # Cloud-Config Template Server (CCTS)
2 |
3 | 我们的[自动安装CoreOS](https://github.com/k8sp/bare-metal-coreos)和
4 | [自动部署Kubernetes](https://github.com/k8sp/k8s-coreos-bare-metal)的
5 | 过程需要为机群中每一台机器提供一个 cloud-config 文件。这些机器的
6 | cloud-config 文件大同小异,所以适合写一个 template,然后带入和每台机器
7 | (以及机群)相关的具体信息。cloud-config template server (CCTS) 是一
8 | 个 HTTP server,就是负责 template execution,并且为安装过程提供
9 | cloud-config 文件的。
10 |
11 | ## 配置信息的更新
12 |
13 | 为了方便合作编辑,我们选择用Github来维护cloud-config模板文件
14 | `cloud-config.template` 和配置信息文件 `build-config.yml` 。 通常我们
15 | 会把这两个文件放在一个私有 Github repo里,这样可以通过输入用户名和密码
16 | 访问,或者通过绑定一个 private SSH key 来访问。
17 |
18 | 当CoreOS安装脚本向 CCTS 请求一个特定mac地址的机器的 cloud-config 文件
19 | 的时候,CCTS 访问 Github 获取模板和配置信息,并且执行 template
20 | execution 把配置信息带入模板, 和返回cloud-config。
21 |
22 | 具体地说,每当我们将一台新的(没有操作系统的)机器加入到机群里并且接通
23 | 电源启动机器,这台机器就会通过预先配置好的PXE server引导 CoreOS 来执行
24 | CoreOS/Kubernetes 安装脚本。此时安装脚本向 CCTS 请求 cloud-config。此
25 | 时 CCTS 会按照新机器的mac的地址寻找其配置信息,带入模板,生成和返回
26 | cloud-config。
27 |
28 | 也就是说,新机器启动后安装CoreOS 和 Kubernetes 的时候,会使用最新的模
29 | 板和配置信息。**而配置信息更新之后,只有重装服务器的操作系统时,才会更
30 | 新服务器上的 cloud-config**。
31 |
32 | ## 配置信息的缓存
33 |
34 | 上述过程中有一个潜在问题:如果往机群里加入新机器的时候,恰好不能访问
35 | Github,就没法返回合理的 cloud-config 了。为此我们要在设立一个缓存,
36 | CCTS 每隔一段时间试着访问 Github 看是否有更新,如果有,则下载下来并且
37 | 替换缓存中的内容。
38 |
39 | 最简答的缓存机制是 CCTS 在内存中维护,但是如果CCTS 被重启,则缓存信息
40 | 就丢失了。目前的做法是,将这些配置信息写入本地文件。
41 |
42 | ## 相关算法
43 |
44 | 1. 处理 HTTP request 的伪代码如下
45 |
46 | ```
47 | func HttpHandler(mac_addr) cloud_config {
48 | template, config, timeout := RetriveFromGithub(timeout = 1s)
49 | if !timeout {
50 | WriteToFile(template, config)
51 | } else {
52 | template, config, ok := ReadFromFile()
53 | if !ok {
54 | return error
55 | }
56 | }
57 | return Execute(template, config[mac])
58 | }
59 | ```
60 |
61 | 2. 周期性访问Github并缓存信息至etcd的伪代码如下
62 |
63 | ```
64 | go func() {
65 | for {
66 | Sleep(10m)
67 | template, config := RetriveFromGithub(timeout = infinite)
68 | WriteToFile(template, config)
69 | }
70 | }
71 | ```
72 |
73 | ## Go环境配置
74 |
75 | ```
76 | cd ~
77 | wget https://storage.googleapis.com/golang/go1.6.3.linux-amd64.tar.gz
78 | sudo tar -C /usr/local -xzf go1.6.3.linux-amd64.tar.gz
79 | export PATH=$PATH:/usr/local/go/bin
80 | export GOPATH=
81 | ```
82 |
83 | ## 获取并编译Go代码
84 |
85 | 下面的命令中需要使用到[Github personal access token](https://github.com/k8sp/sextant/issues/29),请根据[这篇文档](https://github.com/k8sp/sextant/issues/29)事先生成。
86 |
87 | ```
88 | git config --global url."https://:x-oauth-basic@github.com/".insteadOf "https://github.com/"
89 | go get github.com/k8sp/sextant/golang
90 |
91 | ```
92 |
93 | ## 配置为系统服务(system unit file)
94 |
95 | ```
96 | sudo vim /etc/systemd/system/cloud-config.service
97 | ```
98 |
99 | **cloud-config.service 内容如下:**
100 |
101 | ```
102 | [Unit]
103 | Description=Cloud config server
104 | After=network.target
105 | Wants=network-online.target
106 |
107 | [Service]
108 | User=root
109 | Group=root
110 | ExecStart=/work/golang/bin/cloud-config-server
111 | RestartSec=5s
112 | Restart=always
113 |
114 | [Install]
115 | WantedBy=multi-user.target
116 | ```
117 | **设置开机启动,并启动服务**
118 | ```
119 | sudo systemctl enable cloud-config.service; sudo systemctl start cloud-config.service;
120 | ```
121 |
122 | (END)
123 |
--------------------------------------------------------------------------------
/golang/cloud-config-server/build.sh:
--------------------------------------------------------------------------------
1 | CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build
2 |
--------------------------------------------------------------------------------
/golang/cloud-config-server/server.go:
--------------------------------------------------------------------------------
1 | // cloud-config-server starts an HTTP server, which can be accessed
2 | // via URLs in the form of
3 | //
4 | // http://?mac=aa:bb:cc:dd:ee:ff
5 | //
6 | // and returns the cloud-config YAML file specificially tailored for
7 | // the node whose primary NIC's MAC address matches that specified in
8 | // above URL.
9 | package main
10 |
11 | import (
12 | "errors"
13 | "flag"
14 | "fmt"
15 | "log"
16 | "net"
17 | "net/http"
18 | "os"
19 |
20 | "github.com/golang/glog"
21 |
22 | "github.com/gorilla/mux"
23 | "github.com/k8sp/sextant/golang/certgen"
24 | cctemplate "github.com/k8sp/sextant/golang/template"
25 | "github.com/topicai/candy"
26 | )
27 |
28 | func main() {
29 | clusterDesc := flag.String("cluster-desc", "./cluster-desc.yml", "Configurations for a k8s cluster.")
30 | ccTemplateDir := flag.String("cloud-config-dir", "./cloud-config.template", "cloud-config file template.")
31 | caCrt := flag.String("ca-crt", "", "CA certificate file, in PEM format")
32 | caKey := flag.String("ca-key", "", "CA private key file, in PEM format")
33 | addr := flag.String("addr", ":8080", "Listening address")
34 | staticDir := flag.String("dir", "./static/", "The directory to serve files from. Default is ./static/")
35 | flag.Parse()
36 |
37 | if len(*caCrt) == 0 || len(*caKey) == 0 {
38 | glog.Info("No ca.pem or ca-key.pem provided, generating now...")
39 | *caKey, *caCrt = certgen.GenerateRootCA("./")
40 | }
41 | // valid caKey and caCrt file is ready
42 | if err := fileExist(*caCrt); err != nil {
43 | glog.Error("No cert of ca.pem has been generated!")
44 | log.Panic(err)
45 | }
46 | if err := fileExist(*caKey); err != nil {
47 | glog.Error("No cert of ca-key.pem has been generated!")
48 | log.Panic(err)
49 | }
50 |
51 | glog.Info("Cloud-config server start Listenning...")
52 | l, e := net.Listen("tcp", *addr)
53 | candy.Must(e)
54 |
55 | // start and run the HTTP server
56 | router := mux.NewRouter().StrictSlash(true)
57 | router.HandleFunc("/cloud-config/{mac}", makeCloudConfigHandler(*clusterDesc, *ccTemplateDir, *caKey, *caCrt))
58 | router.HandleFunc("/centos/post-script/{mac}", makeCentOSPostScriptHandler(*clusterDesc, *ccTemplateDir, *caKey, *caCrt))
59 | router.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(*staticDir))))
60 |
61 | glog.Fatal(http.Serve(l, router))
62 | }
63 |
64 | // makeCloudConfigHandler generate a HTTP server handler to serve cloud-config
65 | // fetching requests
66 | func makeCloudConfigHandler(clusterDescFile string, ccTemplateDir string, caKey, caCrt string) http.HandlerFunc {
67 | return makeSafeHandler(func(w http.ResponseWriter, r *http.Request) {
68 | hwAddr, err := net.ParseMAC(mux.Vars(r)["mac"])
69 | candy.Must(err)
70 | candy.Must(cctemplate.Execute(w, hwAddr.String(), "cc-template", ccTemplateDir, clusterDescFile, caKey, caCrt))
71 | })
72 | }
73 |
74 | func makeCentOSPostScriptHandler(clusterDescFile string, ccTemplateDir string, caKey, caCrt string) http.HandlerFunc {
75 | return makeSafeHandler(func(w http.ResponseWriter, r *http.Request) {
76 | hwAddr, err := net.ParseMAC(mux.Vars(r)["mac"])
77 | candy.Must(err)
78 | candy.Must(cctemplate.Execute(w, hwAddr.String(), "centos-post-script", ccTemplateDir, clusterDescFile, caKey, caCrt))
79 | })
80 | }
81 |
82 | func makeSafeHandler(h http.HandlerFunc) http.HandlerFunc {
83 | return func(w http.ResponseWriter, r *http.Request) {
84 | defer func() {
85 | if err := recover(); err != nil {
86 | http.Error(w, fmt.Sprint(err), http.StatusInternalServerError)
87 | }
88 | }()
89 | h(w, r)
90 | }
91 | }
92 |
93 | func fileExist(fn string) error {
94 | _, err := os.Stat(fn)
95 | if err != nil || os.IsNotExist(err) {
96 | return errors.New("file " + fn + " is not ready.")
97 | }
98 | return nil
99 | }
100 |
--------------------------------------------------------------------------------
/golang/cloud-config-server/server_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "io/ioutil"
7 | "log"
8 | "net/http"
9 | "net/http/httptest"
10 | "os"
11 | "testing"
12 | "time"
13 |
14 | "github.com/gorilla/mux"
15 | "github.com/k8sp/sextant/golang/certgen"
16 | "github.com/k8sp/sextant/golang/clusterdesc"
17 | "github.com/stretchr/testify/assert"
18 | "github.com/topicai/candy"
19 | "gopkg.in/yaml.v2"
20 | )
21 |
22 | const (
23 | templateDir = "../template/templatefiles"
24 | clusterDescExampleFile = "../template/cluster-desc.sample.yaml"
25 | loadTimeout = 15 * time.Second
26 | )
27 |
28 | func TestCloudConfigHandler(t *testing.T) {
29 | // generate temp ca files for unitest and delete it when exit
30 | out, e := ioutil.TempDir("", "")
31 | candy.Must(e)
32 | defer func() {
33 | if e = os.RemoveAll(out); e != nil {
34 | log.Printf("Generator.Gen failed deleting %s", out)
35 | }
36 | }()
37 | caKey, caCrt := certgen.GenerateRootCA(out)
38 | // load ClusterDesc
39 | config := candy.WithOpened(clusterDescExampleFile, func(r io.Reader) interface{} {
40 | b, e := ioutil.ReadAll(r)
41 | candy.Must(e)
42 |
43 | c := &clusterdesc.Cluster{}
44 | assert.Nil(t, yaml.Unmarshal(b, &c))
45 | return c
46 | }).(*clusterdesc.Cluster)
47 | // test HTTP handler directly
48 | rr := httptest.NewRecorder()
49 | req, err := http.NewRequest("GET", "/cloud-config/00:25:90:c0:f7:80", nil)
50 | if err != nil {
51 | t.Fatal(err)
52 | }
53 | // use mux router.ServeHTTP to test handlers
54 | // TODO: put route setups in a common function
55 | router := mux.NewRouter().StrictSlash(true)
56 | router.HandleFunc("/cloud-config/{mac}",
57 | makeCloudConfigHandler(clusterDescExampleFile, templateDir, caKey, caCrt))
58 | router.ServeHTTP(rr, req)
59 |
60 | if status := rr.Code; status != http.StatusOK {
61 | t.Errorf("handler returned wrong status code: got %v want %v",
62 | status, http.StatusOK)
63 | }
64 |
65 | fmt.Println(rr.Body.String())
66 |
67 | if rr.Body.String() != "" {
68 | // Compare only a small fraction -- the etcd2 initial cluster -- for testing.
69 | yml := make(map[interface{}]interface{})
70 | candy.Must(yaml.Unmarshal(rr.Body.Bytes(), yml))
71 | switch i := config.OSName; i {
72 | case "CoreOS":
73 | initialEtcdCluster := yml["coreos"].(map[interface{}]interface{})["etcd2"].(map[interface{}]interface{})["initial-cluster"]
74 | assert.Equal(t, config.InitialEtcdCluster(), initialEtcdCluster)
75 | case "CentOS":
76 | for _, fileinfo := range yml["write_files"].([]interface{}) {
77 | m := fileinfo.(map[interface{}]interface{})["path"]
78 | if m == "/etc/systemd/system/setup-network-environment.service" {
79 | assert.Equal(t, m, "/etc/systemd/system/setup-network-environment.service")
80 | }
81 | }
82 | }
83 | } else {
84 | t.Errorf("cloud-config empty.")
85 | }
86 | }
87 |
--------------------------------------------------------------------------------
/golang/clusterdesc/README.md:
--------------------------------------------------------------------------------
1 | # `clusterdesc`
2 |
3 | The Go package `clusterdesc` defines the schema of
4 | `cluster-desc.yaml`, the file describes the Kubernetes cluster and is
5 | used as the input to `bsroot.sh`, `cloud-config-server` and `addons`.
6 |
--------------------------------------------------------------------------------
/golang/clusterdesc/config.go:
--------------------------------------------------------------------------------
1 | // Package clusterdesc defines Go structs that configure a Kubernetes
2 | // cluster. The configuration is often encoded and saved as a YAML
3 | // file, which is used by config-bootstrapper and cloud-config-server.
4 | package clusterdesc
5 |
6 | import (
7 | "net"
8 | "strings"
9 |
10 | "github.com/topicai/candy"
11 | )
12 |
13 | // Cluster configures a cluster, which includes: (1) a
14 | // bootstrapper machine, (2) the Kubernetes cluster.
15 | type Cluster struct {
16 | // Bootstrapper is the IP of the PXE server (DHCP + TFTP,
17 | // https://github.com/k8sp/bare-metal-coreos), which is also
18 | // an Ngix server and SkyDNS server
19 | // (https://github.com/k8sp/sextant/tree/master/dns).
20 | Bootstrapper string
21 |
22 | // The following are for configuring the DHCP service on the
23 | // PXE server. For any node, if its MAC address and IP
24 | // address are enlisted in Node.MAC and Node.IP, the generated
25 | // /etc/dnsmasq/dnsmasq.conf will bind the IP address to the MAC
26 | // address; otherwise the node will be assigned an IP from
27 | // within the range of [IPLow, IPHigh]. In practice, nodes
28 | // running etcd members requires fixed IP addresses.
29 | Subnet string
30 | Netmask string
31 | Routers []string
32 | Broadcast string
33 | Nameservers []string
34 | UpstreamNameServers []string
35 | DomainName string `yaml:"domainname"`
36 | IPLow, IPHigh string // The IP address range of woker nodes.
37 | Nodes []Node // Enlist nodes that run Kubernetes/etcd/Ceph masters.
38 |
39 | CoreOSChannel string `yaml:"coreos_channel"`
40 |
41 | NginxRootDir string `yaml:"nginx_root_dir"`
42 |
43 | SSHAuthorizedKeys string `yaml:"ssh_authorized_keys"` // So maintainers can SSH to all nodes.
44 | Dockerdomain string
45 | K8sClusterDNS string `yaml:"k8s_cluster_dns"`
46 | K8sServiceClusterIPRange string `yaml:"k8s_service_cluster_ip_range"`
47 | Ceph Ceph
48 | Images map[string]string
49 | FlannelBackend string `yaml:"flannel_backend"`
50 | IngressHostNetwork bool `yaml:"ingress_hostnetwork"`
51 | CoreOS CoreOS
52 | CoreOSVersion string `yaml:"coreos_version"`
53 | GPUDriversVersion string `yaml:"gpu_drivers_version"`
54 | OSName string `yaml:"os_name"`
55 | KubeMasterIP []string `yaml:"kube_master_ip"`
56 | KubeMasterDNS []string `yaml:"kube_master_dns"`
57 | DNSMASQSetNTP bool `yaml:"set_ntp"`
58 | DNSMASQLease string `yaml:"lease"`
59 | CentOSYumRepo string `yaml:"set_yum_repo"`
60 | StartPXE bool `yaml:"start_pxe"`
61 | }
62 |
63 | // CoreOS defines the system related operations, such as: system updates.
64 | type CoreOS struct {
65 | RebootStrategy string `yaml:"reboot_strategy"`
66 | StartTime string `yaml:"start_time"`
67 | TimeLength string `yaml:"time_length"`
68 | }
69 |
70 | // Ceph consists configs for ceph deploy
71 | type Ceph struct {
72 | ZapAndStartOSD bool `yaml:"zap_and_start_osd"`
73 | OSDJournalSize int `yaml:"osd_journal_size"`
74 | }
75 |
76 | // Node defines properties of some nodes in the cluster. For example,
77 | // for those nodes on which we install etcd members, we prefer that
78 | // the DHCP server assigns them fixed IPs. This can be done by
79 | // specify Node.IP. Also, some of nodes can also have Kubernetes
80 | // master or Ceph monitor installed as well. NOTE: for nodes with IP
81 | // specified in Node.IP, these IPs should not be in the range of
82 | // Cluster.IPLow and Cluster.IPHigh.
83 | type Node struct {
84 | MAC string
85 | CurHostName string `yaml:"cur_host_name"`
86 | IngressLabel bool
87 | CephMonitor bool `yaml:"ceph_monitor"`
88 | KubeMaster bool `yaml:"kube_master"`
89 | EtcdMember bool `yaml:"etcd_member"`
90 | FlannelIface string `yaml:"flannel_iface"`
91 | }
92 |
93 | // Join is defined as a method of Cluster, so can be called in
94 | // templates. For more details, refer to const tmplDHCPConf.
95 | func (c Cluster) Join(s []string) string {
96 | return strings.Join(s, ", ")
97 | }
98 |
99 | // GetIngressReplicas return replica number of the ingress node
100 | func (c Cluster) GetIngressReplicas() int {
101 | var cnt = 0
102 | for _, n := range c.Nodes {
103 | if n.IngressLabel {
104 | cnt++
105 | }
106 | }
107 | return cnt
108 | }
109 |
110 | // Hostname is defined as a method of Node, so can be call in
111 | // template. For more details, refer to const tmplDHCPConf.
112 | func (n Node) Hostname() string {
113 | if n.CurHostName != "" {
114 | return n.CurHostName
115 | }
116 | return strings.ToLower(strings.Replace(n.Mac(), ":", "-", -1))
117 | }
118 |
119 | // Mac is defined as a method of Node, so can be called in template.
120 | // For more details, refer to const tmplDHCPConf.
121 | func (n Node) Mac() string {
122 | hwAddr, err := net.ParseMAC(n.MAC)
123 | candy.Must(err)
124 | return hwAddr.String()
125 | }
126 |
--------------------------------------------------------------------------------
/golang/clusterdesc/config_test.go:
--------------------------------------------------------------------------------
1 | package clusterdesc
2 |
3 | import (
4 | "io/ioutil"
5 | "path"
6 | "testing"
7 |
8 | "github.com/topicai/candy"
9 | "gopkg.in/yaml.v2"
10 | )
11 |
12 | func TestDefaultValues(t *testing.T) {
13 | c := &Cluster{}
14 | clusterDescExample, e := ioutil.ReadFile(path.Join(candy.GoPath(), clusterDescExampleFile))
15 | candy.Must(e)
16 | candy.Must(yaml.Unmarshal([]byte(clusterDescExample), c))
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/golang/clusterdesc/etcd.go:
--------------------------------------------------------------------------------
1 | package clusterdesc
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 | )
7 |
8 | // SelectNodes input node role condition,
9 | // ouptut hostname range
10 | func (c *Cluster) SelectNodes(f func(n *Node) string) string {
11 | var ret []string
12 | for i := range c.Nodes {
13 | t := f(&(c.Nodes[i]))
14 | if len(t) > 0 {
15 | ret = append(ret, t)
16 | }
17 | }
18 | return strings.Join(ret, ",")
19 | }
20 |
21 | // InitialEtcdCluster derives the value of command line parameter
22 | // --initial_cluster of etcd from Cluter.Nodes and Node.EtcdMember.
23 | // NOTE: Every node in the cluster will have a etcd daemon running --
24 | // either as a member or as a proxy.
25 | func (c *Cluster) InitialEtcdCluster() string {
26 | return c.SelectNodes(func(n *Node) string {
27 | if n.EtcdMember {
28 | name := n.Hostname()
29 | addr := n.Hostname()
30 | return fmt.Sprintf("%s=http://%s:2380", name, addr)
31 | }
32 | return ""
33 | })
34 | }
35 |
36 | // GetEtcdEndpoints fetch etcd cluster endpoints
37 | func (c *Cluster) GetEtcdEndpoints() string {
38 | return c.SelectNodes(func(n *Node) string {
39 | if n.EtcdMember {
40 | addr := n.Hostname()
41 | return fmt.Sprintf("http://%s:4001", addr)
42 | }
43 | return ""
44 | })
45 | }
46 |
47 | // GetEtcdMachines return the etcd members
48 | func (c *Cluster) GetEtcdMachines() string {
49 | return c.SelectNodes(func(n *Node) string {
50 | if n.EtcdMember {
51 | if len(n.Hostname()) > 0 {
52 | return fmt.Sprintf("http://%s:2379", n.Hostname())
53 | }
54 | }
55 | return ""
56 | })
57 | }
58 |
--------------------------------------------------------------------------------
/golang/clusterdesc/etcd_test.go:
--------------------------------------------------------------------------------
1 | package clusterdesc
2 |
3 | import (
4 | "io/ioutil"
5 | "path"
6 | "testing"
7 |
8 | "github.com/topicai/candy"
9 | "gopkg.in/yaml.v2"
10 | )
11 |
12 | const (
13 | clusterDescExampleFile = "src/github.com/k8sp/sextant/golang/template/cluster-desc.sample.yaml"
14 | )
15 |
16 | func TestInitialEtcdCluster(t *testing.T) {
17 | c := &Cluster{}
18 | clusterDescExample, e := ioutil.ReadFile(path.Join(candy.GoPath(), clusterDescExampleFile))
19 | candy.Must(e)
20 | candy.Must(yaml.Unmarshal([]byte(clusterDescExample), c))
21 | }
22 |
23 | func TestGetEtcdMachines(t *testing.T) {
24 | c := &Cluster{}
25 | clusterDescExample, e := ioutil.ReadFile(path.Join(candy.GoPath(), clusterDescExampleFile))
26 | candy.Must(e)
27 | candy.Must(yaml.Unmarshal([]byte(clusterDescExample), c))
28 | }
29 |
--------------------------------------------------------------------------------
/golang/clusterdesc/linux_distro.go:
--------------------------------------------------------------------------------
1 | package clusterdesc
2 |
3 | import (
4 | "log"
5 | "runtime"
6 | "strings"
7 |
8 | "github.com/wangkuiyi/sh"
9 | )
10 |
11 | // LinuxDistro returns known distribution names, including centos,
12 | // coreos, and ubuntu, if the current system is Linux, or panics
13 | // otherwise.
14 | func LinuxDistro() string {
15 | if runtime.GOOS != "linux" {
16 | log.Panicf("Not Linux, but %s", runtime.GOOS)
17 | }
18 |
19 | line := strings.ToLower(<-sh.Head(sh.Cat("/etc/os-release"), 1))
20 |
21 | if strings.Contains(line, "centos") {
22 | return "centos"
23 | } else if strings.Contains(line, "ubuntu") {
24 | return "ubuntu"
25 | } else if strings.Contains(line, "coreos") {
26 | return "coreos"
27 | }
28 | log.Panicf("Unknown OS %s", line)
29 | return "" // dummpy return
30 | }
31 |
--------------------------------------------------------------------------------
/golang/clusterdesc/master.go:
--------------------------------------------------------------------------------
1 | package clusterdesc
2 |
3 | import "fmt"
4 |
5 | // GetMasterHostname fetch master node hostname
6 | func (c *Cluster) GetMasterHostname() string {
7 | return c.SelectNodes(func(n *Node) string {
8 | if n.KubeMaster {
9 | addr := n.Hostname()
10 | return fmt.Sprintf("%s", addr)
11 | }
12 | return ""
13 | })
14 | }
15 |
--------------------------------------------------------------------------------
/golang/clusterdesc/master_test.go:
--------------------------------------------------------------------------------
1 | package clusterdesc
2 |
3 | import (
4 | "io/ioutil"
5 | "path"
6 | "testing"
7 |
8 | "github.com/topicai/candy"
9 | "gopkg.in/yaml.v2"
10 | )
11 |
12 | func TestGetMasterIP(t *testing.T) {
13 | c := &Cluster{}
14 |
15 | clusterDescExample, e := ioutil.ReadFile(path.Join(candy.GoPath(), clusterDescExampleFile))
16 | candy.Must(e)
17 | candy.Must(yaml.Unmarshal([]byte(clusterDescExample), c))
18 |
19 | }
20 |
--------------------------------------------------------------------------------
/golang/template/cluster-desc.sample.yaml:
--------------------------------------------------------------------------------
1 | # NOTICE:
2 | # 1. If you change version of them such as kube,centos, it's better to
3 | # delete the bsroot directory because it does't update a exist file.
4 | # or do what you know.
5 | # 2. Please check that $mirror_site/$centos_version/(isos|os) exist
6 |
7 | bootstrapper: 10.10.14.253
8 | subnet: 10.10.14.0
9 | netmask: 255.255.255.0
10 | iplow: 10.10.14.1
11 | iphigh: 10.10.14.127
12 | routers: [10.10.14.254]
13 | broadcast: 10.10.14.255
14 | nameservers: [10.10.14.253]
15 | upstreamnameservers: [8.8.8.8, 8.8.4.4]
16 | domainname: "ail.unisound.com"
17 | dockerdomain: "bootstrapper"
18 | k8s_service_cluster_ip_range: 10.100.0.0/24
19 | k8s_cluster_dns: 10.100.0.10
20 |
21 | #start pxe?
22 | start_pxe: n
23 |
24 | # Flannel backend only support "host-gw", "vxlan" and "udp" for now.
25 | flannel_backend: "host-gw"
26 |
27 | # coreos_channel can be configured as stable, alpha, beta.
28 | coreos_channel: "stable"
29 |
30 | # coreos_version can be configured as the special version num or "current"
31 | coreos_version: "1122.2.0"
32 |
33 | # centos version
34 | download_kernel: y
35 | # configure mirror_site, sunch as:
36 | # http://mirrors.163.com/centos
37 | mirror_site: http://archive.kernel.org/centos-vault
38 | # Configure the centos version for isos and rpms
39 | centos_version: "7.2.1511"
40 |
41 | # gpu drivers version
42 | set_gpu: y
43 | gpu_drivers_version: "375.26"
44 |
45 | ingress_hostnetwork: true
46 |
47 | # Ntpserver set_ntp option for the cluster configuration.
48 | set_ntp: y
49 | # Set DNSMASQ DHCP least time
50 | lease: "infinite"
51 |
52 | #OS type: CentOS or CoreOS
53 | os_name: "CentOS"
54 |
55 | # Centos repository: default repository for bootstrapper,
56 | # If you need to configure the other repository, need to open the configuration switch.
57 | # Currently supports only add 163 repository.
58 | # for example: 163 repo:
59 | # set_yum_repo: "mirrors.163.com"
60 | set_yum_repo: "mirrors.aliyun.com"
61 |
62 | # kube master ip, there should be cluster ip
63 | kube_master_ip:
64 | - "10.100.0.1"
65 | - "192.168.61.73"
66 | - "192.168.48.23"
67 | kube_master_dns:
68 | - "aa-bb-cc-dd"
69 |
70 | coreos:
71 | reboot_strategy: "etcd-lock"
72 | start_time: "03:00"
73 | time_length: "3h"
74 |
75 | ceph:
76 | zap_and_start_osd: n
77 | osd_journal_size: 5000
78 |
79 | images:
80 | hyperkube: "gcr.io/google_containers/hyperkube-amd64:v1.6.2"
81 | pause: "gcr.io/google_containers/pause-amd64:3.0"
82 | flannel: "quay.io/coreos/flannel:v0.7.1-amd64"
83 | ingress: "gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.3"
84 | default_backend: "gcr.io/google_containers/defaultbackend:1.0"
85 | kubedns: "gcr.io/google_containers/kubedns-amd64:1.9"
86 | kubednsmasq: "gcr.io/google_containers/kube-dnsmasq-amd64:1.4"
87 | healthz: "gcr.io/google_containers/exechealthz-amd64:1.2"
88 | addon_manager: "gcr.io/google_containers/kube-addon-manager-amd64:v6.4-beta.1"
89 | ceph: "typhoon1986/ceph-daemon:tag-build-master-jewel-ubuntu-14.04-fix370"
90 | ntp: "redaphid/docker-ntp-server"
91 | heapster: "kubernetes/heapster:canary"
92 | grafana: "lupan/heapster_grafana:v2.6.0-2"
93 | influxdb: "lupan/heapster_influxdb:v0.5"
94 | dashboard: "gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.0"
95 |
96 |
97 | # if you wan't use mac as hostname, please delete cur_host_name or
98 | # set them to ""
99 | nodes:
100 | - mac: "00:25:90:c0:f7:80"
101 | cur_host_name: "node1"
102 | ceph_monitor: n
103 | kube_master: y
104 | etcd_member: y
105 | ingress_label: n
106 | - mac: "0c:c4:7a:82:c5:bc"
107 | cur_host_name: "node2"
108 | ceph_monitor: n
109 | kube_master: n
110 | etcd_member: y
111 | ingress_label: n
112 | - mac: "0c:c4:7a:82:c5:b8"
113 | cur_host_name: "node3"
114 | ceph_monitor: n
115 | kube_master: n
116 | etcd_member: y
117 | ingress_label: y
118 | - mac: "00:25:90:c0:f6:ee"
119 | cur_host_name: "node4"
120 | ceph_monitor: n
121 | kube_master: n
122 | etcd_member: n
123 | ingress_label: n
124 | - mac: "00:25:90:c0:f6:d6"
125 | cur_host_name: "node5"
126 | ceph_monitor: n
127 | kube_master: n
128 | etcd_member: n
129 | ingress_label: n
130 |
131 | ssh_authorized_keys: |1+
132 | - "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAzAy8KEKxDMmjd55RMKLFs8bhNGHgC+pvjbC7BOp4gibozfZAr84nWsfZPs44h1jMq0pX2qzGOpzGEN9RH/ALFCe/OixWkh+INnVTIr8scZr6M+3NzN+chBVGvmIAebUfhXrrP7pUXwK06T2MyT7HaDumfUiHF+n3vNIQTpsxnJA7lmx2IJvz6EujK9le75vJM19MsbUZDk61wuiqhbUZMwQEAKrWsvt9CPhqyHD2Ueul0cG/0fHqOXS/fw7Ikg29rUwdzRuYnvw6izuvBoaHF6nNxR+qSiVi3uyJdNox0/nd87OVvd0fE5xEz+xZ8aFwGyAZabo/KWgcMxk6WN0O1Q== lipeng@Megatron"
133 | - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVwfLAgA8DICHp0//xfBTgfU34fVOtKpxgrkceC605HGQ6GIPsBHKw6CYeGziwZBDNtMZxTeyQ7+79sqA2VUR2I5nrhlxw/Wc80yTsjbRmcIbr3mUNCd3+cOqnOAsWEucZCHHcNYwUQ3wIOoyP0cBLKI4b25ucgtawxCmB7PJ1Cme+vIf1cVffeQqedu7hmlpQf/DnQc7O1iBRhEAqKgy1Y+hb0Ryc7StAe0nDHCj+2b08vHlNXaS2sJKrXUE0HhCZZP46APaLmZPmmHeoJKx31M0IERWYaZRvLe0Pl7Pp6DueOSJvvNwR5YbNe5aQ2pO3xiv3wCj6n66dlqAhpmmD vien.lee@localhost"
134 | - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrYpsQVHBRUA/sZfxgK+9jZBGZfoulXXe0faPCGC0b3L6z/qYzJnNFf1d4gj6hQaGyHGvVlr6Kd/6y+0Eour51R2H+8FO+9Y7BaomuluHzm/jcgruAmbVrXZ8vKDDPDx4Lf1tnU1SqPpKFRgdro+BUcj/0LZ45tzsblpA2JOiMJkpqtx17WPKIzc9q5OZKVcV+zh/O+JuKLW/bDIndGiQRVJBGa87ZkCf+fzO5ME4nl7MsG/YY+9J/UkwDbZQd3wFTRqmHncrSupNhu1R2DttP9eWSHQsJIaEXmqKv4p7p4byztix3A/2hBUILZa3iDwxlCZq7OBrQCc/xOI45VMR7 liangjiameng@liangjiameng-Ubuntu"
135 | - "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAr5WIU6wES7WLrWTd3Y+vykAKERYdCzUne3xtlzk9tkcVTQ1IZ5I/cd+x7yw1BM69iRGkGWGRR4Z7k7CzQEbQ8udvK4KEOdZ+JWQfqm8XSlG4CA/cxevu55Trnp7kL4Kb5AtYxnIDhxS6NkrNrte5S4HBpQTA92DXtRW+nplyZ5TAk/qfOMcLoY1tdlTzGdPjWksvb13vvsBv8WkzqIXnBo+2ZJ9ZdieWLJlU0ExPqCH+kdPfv54kf7d8VY8+5jPXZ4IKGOMwi5929iVmkSzrKjvWdMT0aYSAzysohdchLbZcsm4iyQcAwU/J7kkZBbfvOcKr7EGQOif+F1Ag2LtNsQ== liuqs@Megatron"
136 | - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJ+IorvkvsgiUYzu10DyQl6lIaZWolDLpatZMc+yHJv5YM/j4//NeviAOZAIRS5YjoTbRrr5rGvY6FDX+I1Z+4fXMKYW21HvSSgZBwkZpxSlnkz4s0/osJB6B30EX1FG2bMPXHcMKvVAZCc8InNQoMZd0a0QEHVNw7o2v721IVZQ/DvUk+1zAGn5fjLP8G0sHM1H8y+D8DIuB+8+eoDp1KJ8fl0etkVRLQon94w/EwS9Qwpt2PVYq8W2FK5vs1PSHiLCFpllenQS56dIoFoSt1cZSAy/Uvfdip+/Bb856YIqL896BjpwxkZcJDKZEKGi7wxrqyIyLuR7tp2j/b6WWl Liang@JiaendeMacBook-Air.local"
137 | - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHHyuMYt9Q4v16EEQt/sDebbg8bM3W2sHDgoAzu0L38L2Ac7fiCo/3yr7r3qu4KAw6BQ5JbiBGEiXfwbsp/mqsQ6lKwGNmUiLUFqrQ7XwAp0388I8j/KF3PXDViKknjCM27rep0+7Hqu7QoQBmeEnNBajyxMESx06muS/1SzqvNMlfd0jSqJh+uaFzokSvOF9Zfe99b+Pj2aEXvu3hB+aWDjNyPrenQ7xOhpDshmkOH/bdqCmCVG+8JDWk9XQ8zdm2eSqyiGamYxmlvp5Dn6N+6o74D/6+i1vzdt2psb4mq74UN8arakVgGqwdmlcM6iSvFO8Ee3y986/+IR2hW4Sp xuerq@bogon"
138 | - "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAllVLACpyGPH3tMLMLIS6YrobXqSbLcCuIcUxQmRjnKb+7sCW6/3LLKZDdCjNdeUM2BCqbSZROh5ojmmd+nkaJDx8wB/mDXI91nnNesDar5agO564WL7h5hhLCW11PuLjgHaw9LWiOHrFOuun1O6O1kJZTsm/Kkfkb/lveWefJN14UBdhf2bk39FdnjMJR9BVLmPFHfHDLAtB++4b0pG3a2EY7erqI7XuzLxHzmvaJbGklE1aj6KreHYGLpzPa6b+s1Q/20gx0jQBSfjFwF64wFdUrWJBJ1LzY3CD7HdWCefcMWcdQmzpQNhAO5qKaIxC2s6skwF3CuXJBnbZ6Q7KKQ== liyijie@Megatron"
139 |
--------------------------------------------------------------------------------
/golang/template/template.go:
--------------------------------------------------------------------------------
1 | package template
2 |
3 | import (
4 | "io"
5 | "io/ioutil"
6 | "strings"
7 | "text/template"
8 |
9 | "github.com/k8sp/sextant/golang/certgen"
10 | "github.com/k8sp/sextant/golang/clusterdesc"
11 | "github.com/topicai/candy"
12 | "gopkg.in/yaml.v2"
13 | )
14 |
15 | // ExecutionConfig struct config a Coreos's cloud config file which use for installing Coreos in k8s cluster.
16 | type ExecutionConfig struct {
17 | Hostname string
18 | IP string
19 | CephMonitor bool
20 | KubeMaster bool
21 | EtcdMember bool
22 | IngressLabel bool
23 | FlannelIface string
24 | InitialCluster string
25 | SSHAuthorizedKeys string
26 | EtcdEndpoints string
27 | MasterIP string
28 | MasterHostname string
29 | BootstrapperIP string
30 | CentOSYumRepo string
31 | CaCrt string
32 | Crt string
33 | Key string
34 | Dockerdomain string
35 | K8sClusterDNS string
36 | K8sServiceClusterIPRange string
37 | ZapAndStartOSD bool
38 | Images map[string]string
39 | FlannelBackend string
40 | RebootStrategy string
41 | StartTime string
42 | TimeLength string
43 | CoreOSVersion string
44 | GPUDriversVersion string
45 | OSName string
46 | StartPXE bool
47 | }
48 |
49 | // Execute load template files from "ccTemplateDir", parse clusterDescFile to
50 | // "clusterdesc.Cluster" struct and then run the templateName
51 | func Execute(w io.Writer, mac, templateName, ccTemplateDir, clusterDescFile, caKey, caCrt string) error {
52 | // Load data from file every time, no need to read from remote url
53 | t, parseErr := template.ParseGlob(ccTemplateDir + "/*")
54 | if parseErr != nil {
55 | return parseErr
56 | }
57 | clusterDescBuff, readErr := ioutil.ReadFile(clusterDescFile)
58 | if readErr != nil {
59 | return readErr
60 | }
61 | c := &clusterdesc.Cluster{}
62 | candy.Must(yaml.Unmarshal(clusterDescBuff, c))
63 | confData := GetConfigDataByMac(mac, c, caKey, caCrt)
64 | return t.ExecuteTemplate(w, templateName, *confData)
65 | }
66 |
67 | // GetConfigDataByMac returns data struct for cloud-config template to execute
68 | func GetConfigDataByMac(mac string, clusterdesc *clusterdesc.Cluster, caKey, caCrt string) *ExecutionConfig {
69 | node := getNodeByMAC(clusterdesc, mac)
70 | ca, e := ioutil.ReadFile(caCrt)
71 | var k, c []byte
72 | if e == nil {
73 | k, c = certgen.Gen(false, node.Hostname(), caKey, caCrt, clusterdesc.KubeMasterIP, clusterdesc.KubeMasterDNS)
74 | if node.KubeMaster == true {
75 | k, c = certgen.Gen(true, node.Hostname(), caKey, caCrt, clusterdesc.KubeMasterIP, clusterdesc.KubeMasterDNS)
76 | }
77 | }
78 |
79 | return &ExecutionConfig{
80 | Hostname: node.Hostname(),
81 | CephMonitor: node.CephMonitor,
82 | KubeMaster: node.KubeMaster,
83 | EtcdMember: node.EtcdMember,
84 | IngressLabel: node.IngressLabel,
85 | FlannelIface: node.FlannelIface,
86 | InitialCluster: clusterdesc.InitialEtcdCluster(),
87 | SSHAuthorizedKeys: clusterdesc.SSHAuthorizedKeys,
88 | MasterHostname: clusterdesc.GetMasterHostname(),
89 | EtcdEndpoints: clusterdesc.GetEtcdEndpoints(),
90 | BootstrapperIP: clusterdesc.Bootstrapper,
91 | CentOSYumRepo: clusterdesc.CentOSYumRepo,
92 | Dockerdomain: clusterdesc.Dockerdomain,
93 | K8sClusterDNS: clusterdesc.K8sClusterDNS,
94 | K8sServiceClusterIPRange: clusterdesc.K8sServiceClusterIPRange,
95 | ZapAndStartOSD: clusterdesc.Ceph.ZapAndStartOSD,
96 | Images: clusterdesc.Images,
97 | // Mulit-line context in yaml should keep the indent,
98 | // there is no good idea for templaet package to auto keep the indent so far,
99 | // so insert 6*whitespace at the begging of every line
100 | CaCrt: strings.Join(strings.Split(string(ca), "\n"), "\n "),
101 | Crt: strings.Join(strings.Split(string(c), "\n"), "\n "),
102 | Key: strings.Join(strings.Split(string(k), "\n"), "\n "),
103 | FlannelBackend: clusterdesc.FlannelBackend,
104 | RebootStrategy: clusterdesc.CoreOS.RebootStrategy,
105 | StartTime: clusterdesc.CoreOS.StartTime,
106 | TimeLength: clusterdesc.CoreOS.TimeLength,
107 | CoreOSVersion: clusterdesc.CoreOSVersion,
108 | GPUDriversVersion: clusterdesc.GPUDriversVersion,
109 | OSName: clusterdesc.OSName,
110 | StartPXE: clusterdesc.StartPXE,
111 | }
112 | }
113 |
114 | func getNodeByMAC(c *clusterdesc.Cluster, mac string) clusterdesc.Node {
115 | for _, n := range c.Nodes {
116 | if n.Mac() == mac {
117 | return n
118 | }
119 | }
120 | return clusterdesc.Node{MAC: mac, CephMonitor: false, KubeMaster: false, EtcdMember: false}
121 | }
122 |
--------------------------------------------------------------------------------
/golang/template/template_test.go:
--------------------------------------------------------------------------------
1 | package template
2 |
3 | import (
4 | "bytes"
5 | "io"
6 | "io/ioutil"
7 | "log"
8 | "os"
9 | "testing"
10 | "text/template"
11 |
12 | "github.com/k8sp/sextant/golang/certgen"
13 | "github.com/k8sp/sextant/golang/clusterdesc"
14 | "github.com/stretchr/testify/assert"
15 | "github.com/topicai/candy"
16 | "gopkg.in/yaml.v2"
17 | )
18 |
19 | func TestExecute(t *testing.T) {
20 | out, err := ioutil.TempDir("", "")
21 | candy.Must(err)
22 | defer func() {
23 | if e := os.RemoveAll(out); e != nil {
24 | log.Printf("Generator.Gen failed deleting %s", out)
25 | }
26 | }()
27 | caKey, caCrt := certgen.GenerateRootCA(out)
28 |
29 | config := candy.WithOpened("./cluster-desc.sample.yaml", func(r io.Reader) interface{} {
30 | b, e := ioutil.ReadAll(r)
31 | candy.Must(e)
32 |
33 | c := &clusterdesc.Cluster{}
34 | assert.Nil(t, yaml.Unmarshal(b, &c))
35 | return c
36 | }).(*clusterdesc.Cluster)
37 |
38 | tmpl, e := template.ParseGlob("./templatefiles/*")
39 | candy.Must(e)
40 | var ccTmpl bytes.Buffer
41 | confData := GetConfigDataByMac("00:25:90:c0:f7:80", config, caKey, caCrt)
42 | candy.Must(tmpl.ExecuteTemplate(&ccTmpl, "cc-template", *confData))
43 | yml := make(map[interface{}]interface{})
44 | candy.Must(yaml.Unmarshal(ccTmpl.Bytes(), yml))
45 | switch i := config.OSName; i {
46 | case "CoreOS":
47 | initialEtcdCluster := yml["coreos"].(map[interface{}]interface{})["etcd2"].(map[interface{}]interface{})["initial-cluster-token"]
48 | assert.Equal(t, initialEtcdCluster, "etcd-cluster-1")
49 | case "CentOS":
50 | for _, fileinfo := range yml["write_files"].([]interface{}) {
51 | m := fileinfo.(map[interface{}]interface{})["path"]
52 | if m == "/etc/systemd/system/setup-network-environment.service" {
53 | assert.Equal(t, m, "/etc/systemd/system/setup-network-environment.service")
54 | }
55 | }
56 | }
57 |
58 | }
59 |
--------------------------------------------------------------------------------
/golang/template/templatefiles/cc-centos-post.template:
--------------------------------------------------------------------------------
1 | {{ define "centos-post-script" }}#!/usr/bin/env bash
2 |
3 | set_hostname() {
4 | default_iface=$(awk '$2 == 00000000 { print $1 }' /proc/net/route | uniq)
5 |
6 | printf "Default interface: ${default_iface}\n"
7 | default_iface=`echo ${default_iface} | awk '{ print $1 }'`
8 | mac_addr=`ip addr show dev ${default_iface} | awk '$1 ~ /^link\// { print $2 }'`
9 |
10 | printf "Interface: ${default_iface} MAC address: ${mac_addr}\n"
11 |
12 | hostname_str=${mac_addr//:/-}
13 | echo ${hostname_str} >/etc/hostname
14 | }
15 |
16 | set_docker() {
17 | # For install multi-kernel, set the first line kernel in grub list as default to boot
18 | grub2-set-default 0
19 | # load overlay for docker storage driver
20 | echo "overlay" > /etc/modules-load.d/overlay.conf
21 | # set overaly as docker storage driver instead of devicemapper (the default one on centos)
22 | sed -i -e '/^ExecStart=/ s/$/ --storage-driver=overlay/' /usr/lib/systemd/system/docker.service
23 |
24 | # set iptables to false for docker version >=1.13, to solve the iptables FORWARD drop
25 | sed -i -e '/^ExecStart=/ s/$/ --iptables=false/' /usr/lib/systemd/system/docker.service
26 |
27 | # set path
28 | if [[ ! -z "${docker_data_path}" ]]; then
29 | sed -i -e "/^ExecStart=/ s@\$@ -g ${docker_data_path} @" /usr/lib/systemd/system/docker.service
30 | fi
31 |
32 | # Explicit Docker option
33 | sed -i -e '/^ExecStart=/ s/$/ $DOCKER_OPT_BIP $DOCKER_OPT_IPMASQ $DOCKER_OPT_MTU $DOCKER_NETWORK_OPTIONS/' /usr/lib/systemd/system/docker.service
34 | }
35 |
36 | set_ssh_config() {
37 | mkdir -p /root/.ssh
38 | chmod 700 /root/.ssh
39 | cat > /root/.ssh/config </etc/yum.repos.d/Local.repo << EOF
55 | [LocalRepo]
56 | name=Local Repository
57 | baseurl=http://{{ .BootstrapperIP }}/static/CentOS7/dvd_content/
58 | enabled=1
59 | gpgcheck=0
60 |
61 | EOF
62 |
63 | else
64 |
65 | cp /etc/yum.repos.d/CentOS-Base.repo{,.bak}
66 | sed -i -e 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Base.repo
67 | sed -i -e 's@#baseurl=http://[^/]*@baseurl=http://{{ .CentOSYumRepo }}@g' /etc/yum.repos.d/CentOS-Base.repo
68 |
69 | fi
70 |
71 | #yum clean all
72 | #yum makecache
73 |
74 | }
75 |
76 | if [[ "${set_mac_hostname}" != " n" ]]; then
77 | set_hostname
78 | fi
79 |
80 | set_docker
81 | set_ssh_config
82 | set_yum_repo
83 |
84 | {{ end }}
85 |
--------------------------------------------------------------------------------
/golang/template/templatefiles/cc-centos.template:
--------------------------------------------------------------------------------
1 | {{ define "centos" }}
2 | - path: /usr/lib/systemd/system/etcd.service
3 | owner: root
4 | permissions: 0644
5 | content: |
6 | [Unit]
7 | Description=etcd
8 | After=network.target
9 | After=network-online.target
10 | Wants=network-online.target
11 |
12 | [Service]
13 | Type=notify
14 | User=etcd
15 | Environment=ETCD_NAME=%H
16 | Environment=ETCD_DATA_DIR=/var/lib/etcd
17 | Environment=ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001
18 | {{- if .EtcdMember }}
19 | Environment=ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster-1
20 | Environment=ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ .Hostname }}:2380
21 | Environment=ETCD_LISTEN_PEER_URLS=http://{{ .Hostname }}:2380,http://{{ .Hostname }}:7001
22 | Environment=ETCD_ADVERTISE_CLIENT_URLS=http://{{ .Hostname }}:2379
23 | Environment=ETCD_INITIAL_CLUSTER_STATE=new
24 | {{- else }}
25 | Environment=ETCD_PROXY=on
26 | {{- end }}
27 | Environment=ETCD_INITIAL_CLUSTER={{ .InitialCluster }}
28 | ExecStart=/usr/bin/etcd
29 | Restart=always
30 | RestartSec=10s
31 | LimitNOFILE=40000
32 | TimeoutStartSec=0
33 |
34 | [Install]
35 | WantedBy=multi-user.target
36 | - path: /usr/lib/systemd/system/flanneld.service
37 | owner: root
38 | permissions: 0644
39 | content: |
40 | # /usr/lib/systemd/system/flanneld.service
41 | [Unit]
42 | Description=Flanneld overlay address etcd agent
43 | After=network.target
44 | After=network-online.target
45 | Wants=network-online.target
46 | After=etcd.service
47 | Before=docker.service
48 |
49 | [Service]
50 | Type=notify
51 | RestartSec=5
52 | EnvironmentFile=/etc/sysconfig/flanneld
53 | EnvironmentFile=-/etc/sysconfig/docker-network
54 | {{- if .KubeMaster }}
55 | ExecStartPre=/usr/bin/etcdctl set /flannel/network/config '{ "Network": "10.1.0.0/16", "Backend": {"Type": "{{ .FlannelBackend }}"}}'
56 | {{- end }}
57 |
58 | {{- if .FlannelIface }}
59 | ExecStart=/usr/bin/flanneld -etcd-endpoints=http://{{ .Hostname }}:2379 -etcd-prefix=/flannel/network -iface={{ .FlannelIface }}
60 | {{- else }}
61 | ExecStart=/usr/bin/flanneld -etcd-endpoints=http://{{ .Hostname }}:2379 -etcd-prefix=/flannel/network $FLANNEL_OPTIONS
62 | {{- end }}
63 | ExecStartPost=/usr/libexec/flannel/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
64 | Restart=always
65 |
66 | [Install]
67 | WantedBy=multi-user.target
68 | RequiredBy=docker.service
69 | - path: /etc/systemd/system/settimezone.service
70 | owner: root
71 | permissions: 0644
72 | content: |
73 | [Unit]
74 | Description=Set the time zone
75 |
76 | [Service]
77 | ExecStart=/usr/bin/timedatectl set-timezone Asia/Shanghai
78 | RemainAfterExit=no
79 | Type=oneshot
80 | [Install]
81 | WantedBy=multi-user.target
82 |
83 | - path: /etc/systemd/system/setup-network-environment.service
84 | owner: root
85 | permissions: 0644
86 | content: |
87 | [Unit]
88 | Description=Setup Network Environment
89 | Documentation=https://github.com/kelseyhightower/setup-network-environment
90 | Requires=network-online.target
91 | After=network-online.target
92 | [Service]
93 | ExecStartPre=-/usr/bin/mkdir -p /opt/bin
94 | ExecStartPre=-/usr/bin/wget --quiet -O /opt/bin/setup-network-environment http://{{ .BootstrapperIP }}/static/setup-network-environment-1.0.1
95 | ExecStartPre=-/usr/bin/chmod +x /opt/bin/setup-network-environment
96 | ExecStart=/opt/bin/setup-network-environment
97 | RemainAfterExit=yes
98 | Type=oneshot
99 | [Install]
100 | WantedBy=multi-user.target
101 | {{- if .CephMonitor }}
102 | - path: /etc/systemd/system/ceph-mon.service
103 | owner: root
104 | permissions: 0644
105 | content: |
106 | [Unit]
107 | Description=Install ceph mon services
108 | Requires=etcd.service
109 | After=etcd.service
110 | Requires=network.target
111 | After=network.target
112 |
113 | [Service]
114 | ExecStartPre=/usr/bin/mkdir -p /opt/bin
115 | ExecStart=/bin/bash -c 'while ! etcdctl cluster-health >/dev/null 2&>1 ; do sleep 5; done'
116 | ExecStart=/usr/bin/wget --quiet -O /opt/bin/install-mon.sh http://{{ .BootstrapperIP }}/static/ceph/install-mon.sh
117 | ExecStart=/bin/bash /opt/bin/install-mon.sh {{ .Dockerdomain }}:5000
118 | RemainAfterExit=no
119 | Type=oneshot
120 |
121 | [Install]
122 | WantedBy=multi-user.target
123 | {{- end }}
124 |
125 | {{- if .ZapAndStartOSD }}
126 | - path: /etc/systemd/system/ceph-osd.service
127 | owner: root
128 | permissions: 0644
129 | content: |
130 | [Unit]
131 | Description=Install ceph osd service
132 | Requires=etcd.service
133 | After=etcd.service
134 | Requires=network.target
135 | After=network.target
136 |
137 | [Service]
138 | ExecStartPre=/usr/bin/mkdir -p /opt/bin
139 | ExecStart=/bin/bash -c 'while ! etcdctl cluster-health >/dev/null 2&>1 ; do sleep 5; done'
140 | ExecStart=/usr/bin/wget -O /opt/bin/install-osd.sh http://{{ .BootstrapperIP }}/static/ceph/install-osd.sh
141 | ExecStart=/bin/bash /opt/bin/install-osd.sh {{ .Dockerdomain }}:5000
142 | RemainAfterExit=no
143 | Type=oneshot
144 |
145 | [Install]
146 | WantedBy=multi-user.target
147 | {{- end}}
148 | {{- if .KubeMaster }}
149 | - path: /etc/systemd/system/kube-addons.service
150 | owner: root
151 | permissions: 0644
152 | content: |
153 | [Unit]
154 | Description=Install Kubernetes addons
155 | After=kubelet.service
156 | Requires=kubelet.service
157 | [Service]
158 | ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/addons
159 | ExecStartPre=/usr/bin/wget -P /etc/kubernetes/addons/ http://{{ .BootstrapperIP }}/static/addons-config/*.yaml
160 | ExecStart=/usr/bin/docker run --rm --net=host \
161 | -e KUBECTL_OPTS=--server=http://{{ .MasterHostname }}:8080 \
162 | -v /etc/kubernetes/addons/:/etc/kubernetes/addons/ \
163 | {{ .Dockerdomain }}:5000/{{ .Images.addon_manager }}
164 | [Install]
165 | WantedBy=multi-user.target
166 |
167 | - path: /etc/systemd/system/kubelet.service
168 | owner: root
169 | permissions: 0644
170 | content: |
171 | [Unit]
172 | Description=Kubernetes Kubelet
173 | Documentation=https://github.com/kubernetes/kubernetes
174 | Requires=docker.service
175 | After=docker.service
176 | [Service]
177 | Environment=KUBELET_VERSION=v1.2.4_coreos.1
178 | EnvironmentFile=/etc/network-environment
179 | ExecStartPre=/bin/wget --quiet -O /opt/bin/kubelet http://{{ .BootstrapperIP }}/static/kubelet
180 | ExecStartPre=/usr/bin/chmod +x /opt/bin/kubelet
181 | ExecStart=/opt/bin/kubelet \
182 | --pod_infra_container_image={{ .Dockerdomain }}:5000/{{ .Images.pause }} \
183 | --register-node=true \
184 | --api-servers=http://{{ .MasterHostname }}:8080 \
185 | --network-plugin-dir=/etc/kubernetes/cni/net.d \
186 | --network-plugin=${NETWORK_PLUGIN} \
187 | --register-schedulable=false \
188 | --allow-privileged=true \
189 | --pod-manifest-path=/etc/kubernetes/manifests \
190 | --hostname-override={{ .MasterHostname }} \
191 | --cluster-dns={{ .K8sClusterDNS }} \
192 | --cluster-domain=cluster.local \
193 | --cgroup-driver=systemd \
194 | --feature-gates=Accelerators=true
195 | Restart=always
196 | RestartSec=10
197 | [Install]
198 | WantedBy=multi-user.target
199 | {{- else }}
200 | - path: /etc/systemd/system/kubelet.service
201 | owner: root
202 | permissions: 0644
203 | content: |
204 | [Unit]
205 | Description=Kubernetes Kubelet
206 | Documentation=https://github.com/kubernetes/kubernetes
207 | After=docker.service
208 | Requires=docker.service
209 | [Service]
210 | EnvironmentFile=/etc/network-environment
211 | Environment=KUBELET_VERSION=v1.2.4_coreos.1
212 | ExecStartPre=/bin/wget --quiet -O /opt/bin/kubelet http://{{ .BootstrapperIP }}/static/kubelet
213 | ExecStartPre=/usr/bin/chmod +x /opt/bin/kubelet
214 | ExecStart=/opt/bin/kubelet \
215 | --pod_infra_container_image={{ .Dockerdomain }}:5000/{{ .Images.pause }} \
216 | --address=0.0.0.0 \
217 | --allow-privileged=true \
218 | --cluster-dns={{ .K8sClusterDNS }} \
219 | --cluster-domain=cluster.local \
220 | --pod-manifest-path=/etc/kubernetes/manifests \
221 | --hostname-override={{ .Hostname }} \
222 | --api-servers=https://{{ .MasterHostname }}:443 \
223 | --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \
224 | --tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem \
225 | --tls-cert-file=/etc/kubernetes/ssl/worker.pem \
226 | --feature-gates=Accelerators=true \
227 | {{- if .IngressLabel }}
228 | --logtostderr=true \
229 | --node-labels=role=ingress \
230 | {{- else }}
231 | --logtostderr=true \
232 | {{- end }}
233 | --network-plugin= \
234 | --network-plugin-dir=/etc/cni/net.d
235 | Restart=always
236 | RestartSec=10
237 | [Install]
238 | WantedBy=multi-user.target
239 | {{- end}}
240 | ssh_authorized_keys:
241 | {{ .SSHAuthorizedKeys }}
242 | runcmd:
243 | - systemctl daemon-reload
244 | {{- if .CephMonitor }}
245 | - systemctl enable ceph-mon.service
246 | {{- end}}
247 | {{- if .ZapAndStartOSD }}
248 | - systemctl enable ceph-osd.service
249 | {{- end}}
250 | {{- if .KubeMaster }}
251 | - systemctl enable etcd.service flanneld.service kubelet.service setup-network-environment.service kube-addons.service settimezone.service
252 | {{- else }}
253 | - systemctl enable etcd.service flanneld.service kubelet.service setup-network-environment.service settimezone.service
254 | {{- end }}
255 | - reboot
256 | {{ end }}
257 |
--------------------------------------------------------------------------------
/golang/template/templatefiles/cc-common.template:
--------------------------------------------------------------------------------
1 | {{ define "common" }}
2 | - path: /etc/modules-load.d/rbd.conf
3 | content: rbd
4 | - path: /etc/kubernetes/ssl/ca.pem
5 | owner: root
6 | permissions: 0600
7 | content: |
8 | {{ .CaCrt }}
9 | - path: /etc/docker/certs.d/{{ .Dockerdomain }}:5000/ca.crt
10 | owner: root
11 | permissions: 0600
12 | content: |
13 | {{ .CaCrt }}
14 | {{- if .StartPXE }}
15 | - path: /etc/hosts
16 | owner: root
17 | content: |
18 | 127.0.0.1 localhost
19 | {{ .BootstrapperIP }} {{ .Dockerdomain }}
20 | {{- end}}
21 | {{/* ********************************************************* */}}
22 | {{- if .KubeMaster }}
23 | - path: /etc/kubernetes/ssl/apiserver.pem
24 | owner: root
25 | permissions: 0600
26 | content: |
27 | {{ .Crt }}
28 | - path: /etc/kubernetes/ssl/apiserver-key.pem
29 | owner: root
30 | permissions: 0600
31 | content: |
32 | {{ .Key }}
33 | - path: /etc/kubernetes/manifests/kubernetes_master.manifest
34 | owner: root
35 | permissions: 0644
36 | content: |
37 | apiVersion: v1
38 | kind: Pod
39 | metadata:
40 | name: kube-controller
41 | spec:
42 | hostNetwork: true
43 | volumes:
44 | - name: "etc-kubernetes"
45 | hostPath:
46 | path: "/etc/kubernetes"
47 | - name: ssl-certs-kubernetes
48 | hostPath:
49 | path: /etc/kubernetes/ssl
50 | - name: "ssl-certs-host"
51 | hostPath:
52 | path: "/usr/share/ca-certificates"
53 | - name: "var-run-kubernetes"
54 | hostPath:
55 | path: "/var/run/kubernetes"
56 | - name: "etcd-datadir"
57 | hostPath:
58 | path: "/var/lib/etcd"
59 | - name: "usr"
60 | hostPath:
61 | path: "/usr"
62 | - name: "lib64"
63 | hostPath:
64 | path: "/lib64"
65 | containers:
66 | - name: kube-apiserver
67 | image: {{ .Dockerdomain }}:5000/{{ .Images.hyperkube }}
68 | command:
69 | - /hyperkube
70 | - apiserver
71 | - --allow-privileged=true
72 | - --bind-address=0.0.0.0
73 | - --insecure-bind-address=0.0.0.0
74 | - --secure-port=443
75 | - --etcd-servers=http://{{ .MasterHostname }}:4001
76 | - --service-cluster-ip-range={{ .K8sServiceClusterIPRange }}
77 | - --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
78 | - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem
79 | - --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
80 | - --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem
81 | - --client-ca-file=/etc/kubernetes/ssl/ca.pem
82 | - --logtostderr=true
83 | ports:
84 | - containerPort: 443
85 | hostPort: 443
86 | name: https
87 | - containerPort: 8080
88 | hostPort: 8080
89 | name: local
90 | volumeMounts:
91 | - mountPath: /etc/kubernetes/ssl
92 | name: ssl-certs-kubernetes
93 | readOnly: true
94 | - mountPath: /etc/ssl/certs
95 | name: ssl-certs-host
96 | readOnly: true
97 | - mountPath: /etc/kubernetes
98 | name: "etc-kubernetes"
99 | - mountPath: /var/run/kubernetes
100 | name: "var-run-kubernetes"
101 |
102 | - name: kube-controller-manager
103 | image: {{ .Dockerdomain }}:5000/{{ .Images.hyperkube }}
104 | command:
105 | - /hyperkube
106 | - controller-manager
107 | - --master=http://127.0.0.1:8080
108 | - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
109 | - --root-ca-file=/etc/kubernetes/ssl/ca.pem
110 | livenessProbe:
111 | httpGet:
112 | host: 127.0.0.1
113 | path: /healthz
114 | port: 10252s
115 | initialDelaySeconds: 15
116 | timeoutSeconds: 1
117 | volumeMounts:
118 | - mountPath: /etc/kubernetes/ssl
119 | name: ssl-certs-kubernetes
120 | readOnly: true
121 | - mountPath: /etc/ssl/certs
122 | name: ssl-certs-host
123 | readOnly: true
124 |
125 | - name: kube-scheduler
126 | image: {{ .Dockerdomain }}:5000/{{ .Images.hyperkube }}
127 | command:
128 | - /hyperkube
129 | - scheduler
130 | - --master=http://127.0.0.1:8080
131 | livenessProbe:
132 | httpGet:
133 | host: 127.0.0.1
134 | path: /healthz
135 | port: 10251
136 | initialDelaySeconds: 15
137 | timeoutSeconds: 1
138 |
139 | - name: kube-proxy
140 | image: {{ .Dockerdomain }}:5000/{{ .Images.hyperkube }}
141 | command:
142 | - /hyperkube
143 | - proxy
144 | - --master=http://127.0.0.1:8080
145 | - --proxy-mode=iptables
146 | securityContext:
147 | privileged: true
148 | volumeMounts:
149 | - mountPath: /etc/ssl/certs
150 | name: ssl-certs-host
151 | readOnly: true
152 | {{/* ********************************************************* */}}
153 | {{- else }}
154 | - path: /etc/kubernetes/ssl/worker.pem
155 | owner: root
156 | permissions: 0600
157 | content: |
158 | {{ .Crt }}
159 | - path: /etc/kubernetes/ssl/worker-key.pem
160 | owner: root
161 | permissions: 0600
162 | content: |
163 | {{ .Key }}
164 | - path: /etc/kubernetes/worker-kubeconfig.yaml
165 | owner: root
166 | permissions: 0755
167 | content: |
168 | apiVersion: v1
169 | kind: Config
170 | clusters:
171 | - name: local
172 | cluster:
173 | certificate-authority: /etc/kubernetes/ssl/ca.pem
174 | users:
175 | - name: kubelet
176 | user:
177 | client-certificate: /etc/kubernetes/ssl/worker.pem
178 | client-key: /etc/kubernetes/ssl/worker-key.pem
179 | contexts:
180 | - context:
181 | cluster: local
182 | user: kubelet
183 | name: kubelet-context
184 | current-context: kubelet-context
185 |
186 | - path: /etc/kubernetes/manifests/kube-proxy.manifest
187 | owner: root
188 | permissions: 0755
189 | content: |
190 | apiVersion: v1
191 | kind: Pod
192 | metadata:
193 | name: kube-proxy
194 | spec:
195 | hostNetwork: true
196 | containers:
197 | - name: kube-proxy
198 | image: {{ .Dockerdomain }}:5000/{{ .Images.hyperkube }}
199 | command:
200 | - /hyperkube
201 | - proxy
202 | - --master=https://{{ .MasterHostname }}:443
203 | - --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml
204 | - --proxy-mode=iptables
205 | securityContext:
206 | privileged: true
207 | volumeMounts:
208 | - mountPath: /etc/ssl/certs
209 | name: "ssl-certs"
210 | - mountPath: /etc/kubernetes/worker-kubeconfig.yaml
211 | name: "kubeconfig"
212 | readOnly: true
213 | - mountPath: /etc/kubernetes/ssl
214 | name: "etc-kube-ssl"
215 | readOnly: true
216 | volumes:
217 | - name: "ssl-certs"
218 | hostPath:
219 | path: "/usr/share/ca-certificates"
220 | - name: "kubeconfig"
221 | hostPath:
222 | path: "/etc/kubernetes/worker-kubeconfig.yaml"
223 | - name: "etc-kube-ssl"
224 | hostPath:
225 | path: "/etc/kubernetes/ssl"
226 | {{/* ********************************************************* */}}
227 | {{- end }}
228 | {{ end }}
229 |
--------------------------------------------------------------------------------
/golang/template/templatefiles/cc-coreos.template:
--------------------------------------------------------------------------------
1 | {{ define "coreos" }}
2 | coreos:
3 | etcd2:
4 | name: "%H"
5 | listen-client-urls: "http://0.0.0.0:2379,http://0.0.0.0:4001"
6 | initial-cluster: "{{ .InitialCluster }}"
7 | {{- if .EtcdMember }}
8 | initial-cluster-token: "etcd-cluster-1"
9 | initial-advertise-peer-urls: "http://{{ .Hostname }}:2380"
10 | listen-peer-urls: "http://{{ .Hostname }}:2380,http://{{ .Hostname }}:7001"
11 | advertise-client-urls: "http://{{ .Hostname }}:2379"
12 | initial-cluster-state: new
13 | {{- else }}
14 | proxy: on
15 | {{- end }}
16 | flannel:
17 | {{- if .FlannelIface }}
18 | interface: "{{ .FlannelIface }}"
19 | etcd_endpoints: "{{ .EtcdEndpoints }}"
20 | {{- else }}
21 | etcd_endpoints: "{{ .EtcdEndpoints }}"
22 | {{- end }}
23 | {{- if eq .RebootStrategy "off" }}
24 | update:
25 | reboot-strategy: {{ .RebootStrategy }}
26 | {{- else }}
27 | update:
28 | reboot-strategy: {{ .RebootStrategy }}
29 | locksmith:
30 | window_start: {{ .StartTime }}
31 | window_length: {{ .TimeLength }}
32 | {{- end }}
33 | units:
34 | - name: 00-eth0.network
35 | runtime: true
36 | content: |
37 | [Match]
38 | Name=eth0
39 | [Network]
40 | DHCP=ipv4
41 | [DHCPv4]
42 | UseHostname=false
43 | - name: "systemd-modules-load.service"
44 | command: restart
45 | - name: "etcd2.service"
46 | command: "start"
47 | - name: "fleet.service"
48 | command: "start"
49 | - name: "early-docker.service"
50 | command: "start"
51 | runtime: true
52 | - name: "flanneld.service"
53 | command: "start"
54 | content: |
55 | [Unit]
56 | Description=Network fabric for containers
57 | Documentation=https://github.com/coreos/flannel
58 | Requires=early-docker.service
59 | After=etcd.service etcd2.service early-docker.service
60 | Before=early-docker.target
61 |
62 | [Service]
63 | Type=notify
64 | Restart=always
65 | RestartSec=5
66 | Environment="TMPDIR=/var/tmp/"
67 | Environment="DOCKER_HOST=unix:///var/run/early-docker.sock"
68 | Environment="FLANNEL_IMG={{ .Dockerdomain }}:5000/{{ .Images.flannel }}"
69 | Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
70 | Environment="FLANNEL_ENV_FILE=/run/flannel/options.env"
71 | LimitNOFILE=40000
72 | LimitNPROC=1048576
73 | ExecStartPre=/sbin/modprobe ip_tables
74 | ExecStartPre=/usr/bin/mkdir -p /run/flannel
75 | ExecStartPre=/usr/bin/mkdir -p ${ETCD_SSL_DIR}
76 | ExecStartPre=-/usr/bin/touch ${FLANNEL_ENV_FILE}
77 | {{- if .KubeMaster }}
78 | ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{ "Network": "10.1.0.0/16", "Backend": {"Type": "{{ .FlannelBackend }}"}}'
79 | {{- end }}
80 |
81 | ExecStart=/usr/libexec/sdnotify-proxy /run/flannel/sd.sock \
82 | /usr/bin/docker run --net=host --privileged=true --rm \
83 | --volume=/run/flannel:/run/flannel \
84 | --env=NOTIFY_SOCKET=/run/flannel/sd.sock \
85 | --env=AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
86 | --env=AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
87 | --env-file=${FLANNEL_ENV_FILE} \
88 | --volume=/usr/share/ca-certificates:/etc/ssl/certs:ro \
89 | --volume=${ETCD_SSL_DIR}:${ETCD_SSL_DIR}:ro \
90 | ${FLANNEL_IMG} /opt/bin/flanneld --ip-masq=true
91 |
92 | # Update docker options
93 | ExecStartPost=/usr/bin/docker run --net=host --rm --volume=/run:/run \
94 | ${FLANNEL_IMG} \
95 | /opt/bin/mk-docker-opts.sh -d /run/flannel_docker_opts.env -i
96 |
97 | [Install]
98 | WantedBy=multi-user.target
99 |
100 | - name: setup-network-environment.service
101 | runtime: true
102 | command: start
103 | content: |
104 | [Unit]
105 | Description=Setup Network Environment
106 | Documentation=https://github.com/kelseyhightower/setup-network-environment
107 | Requires=network-online.target
108 | After=network-online.target
109 | [Service]
110 | ExecStartPre=-/usr/bin/mkdir -p /opt/bin
111 | ExecStartPre=-/usr/bin/wget --quiet -O /opt/bin/setup-network-environment http://{{ .BootstrapperIP }}/static/setup-network-environment-1.0.1
112 | ExecStartPre=-/usr/bin/chmod +x /opt/bin/setup-network-environment
113 | ExecStart=/opt/bin/setup-network-environment
114 | RemainAfterExit=yes
115 | Type=oneshot
116 |
117 | - name: setup-gpu.service
118 | command: start
119 | content: |
120 | [Unit]
121 | Description=Setup GPU
122 | Requires=network.target
123 | After=network.target
124 | [Service]
125 | ExecStartPre=/usr/bin/mkdir -p /opt/gpu
126 | ExecStartPre=/usr/bin/wget -P /opt/gpu -r -nd http://{{ .BootstrapperIP }}/static/gpu-drivers/coreos/{{ .CoreOSVersion }}
127 | ExecStart=/bin/bash /opt/gpu/setup_gpu.sh {{ .CoreOSVersion }} {{ .GPUDriversVersion }}
128 | RemainAfterExit=no
129 | Type=oneshot
130 |
131 |
132 | - name: "settimezone.service"
133 | command: start
134 | content: |
135 | [Unit]
136 | Description=Set the time zone
137 |
138 | [Service]
139 | ExecStart=/usr/bin/timedatectl set-timezone Asia/Shanghai
140 | RemainAfterExit=no
141 | Type=oneshot
142 |
143 |
144 | {{- if .CephMonitor }}
145 | - name: "ceph-mon.service"
146 | command: start
147 | content: |
148 | [Unit]
149 | Description=Install ceph mon services
150 | Requires=etcd2.service
151 | After=etcd2.service
152 | Requires=network.target
153 | After=network.target
154 |
155 | [Service]
156 | ExecStart=/bin/bash -c 'while ! etcdctl cluster-health >/dev/null 2&>1 ; do sleep 5; done'
157 | ExecStart=/usr/bin/wget --quiet -O /home/core/install-emon.sh http://{{ .BootstrapperIP }}/static/ceph/install-mon.sh
158 | ExecStart=/bin/bash /home/core/install-mon.sh {{ .Dockerdomain }}:5000
159 | RemainAfterExit=no
160 | Type=oneshot
161 | {{- end }}
162 |
163 |
164 | {{- if .ZapAndStartOSD }}
165 | - name: "ceph-osd.service"
166 | command: start
167 | content: |
168 | [Unit]
169 | Description=Install ceph osd service
170 | Requires=etcd2.service
171 | After=etcd2.service
172 | Requires=network.target
173 | After=network.target
174 |
175 | [Service]
176 | ExecStart=/bin/bash -c 'while ! etcdctl cluster-health >/dev/null 2&>1 ; do sleep 5; done'
177 | ExecStart=/usr/bin/wget -O /home/core/install-osd.sh http://{{ .BootstrapperIP }}/static/ceph/install-osd.sh
178 | ExecStart=/bin/bash /home/core/install-osd.sh {{ .Dockerdomain }}:5000
179 | RemainAfterExit=no
180 | Type=oneshot
181 | {{- end}}
182 |
183 |
184 | - name: docker.service
185 | runtime: true
186 | command: start
187 | drop-ins:
188 | - name: 40-docker-flannel.conf
189 | content: |
190 | [Unit]
191 | After=docker.socket early-docker.target network.target flanneld.service
192 | Requires=docker.socket early-docker.target flanneld.service
193 |
194 | {{- if .KubeMaster }}
195 | - name: kube-addons.service
196 | command: start
197 | content: |
198 | [Unit]
199 | Description=Install Kubernetes addons
200 | After=kubelet.service
201 | Requires=kubelet.service
202 | [Service]
203 | ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/addons
204 | ExecStartPre=/usr/bin/wget -O /etc/kubernetes/addons/ingress.yaml http://{{ .BootstrapperIP }}/static/ingress.yaml
205 | ExecStartPre=/usr/bin/wget -O /etc/kubernetes/addons/kubedns-controller.yaml http://{{ .BootstrapperIP }}/static/kubedns-controller.yaml
206 | ExecStartPre=/usr/bin/wget -O /etc/kubernetes/addons/kubedns-svc.yaml http://{{ .BootstrapperIP }}/static/kubedns-svc.yaml
207 | ExecStartPre=/usr/bin/wget -O /etc/kubernetes/addons/default-backend.yaml http://{{ .BootstrapperIP }}/static/default-backend.yaml
208 | ExecStartPre=/usr/bin/wget -O /etc/kubernetes/addons/default-backend-svc.yaml http://{{ .BootstrapperIP }}/static/default-backend-svc.yaml
209 | ExecStartPre=/usr/bin/wget -O /etc/kubernetes/addons/heapster-service.yaml http://{{ .BootstrapperIP }}/static/heapster-service.yaml
210 | ExecStartPre=/usr/bin/wget -O /etc/kubernetes/addons/influxdb-service.yaml http://{{ .BootstrapperIP }}/static/influxdb-service.yaml
211 | ExecStartPre=/usr/bin/wget -O /etc/kubernetes/addons/grafana-service.yaml http://{{ .BootstrapperIP }}/static/grafana-service.yaml
212 | ExecStartPre=/usr/bin/wget -O /etc/kubernetes/addons/influxdb-grafana-controller.yaml http://{{ .BootstrapperIP }}/static/influxdb-grafana-controller.yaml
213 | ExecStartPre=/usr/bin/wget -O /etc/kubernetes/addons/heapster-controller.yaml http://{{ .BootstrapperIP }}/static/heapster-controller.yaml
214 |
215 | ExecStart=/usr/bin/docker run --rm --net=host \
216 | -e "KUBECTL_OPTS=--server=http://{{ .MasterHostname }}:8080" \
217 | -v /etc/kubernetes/addons/:/etc/kubernetes/addons/ \
218 | {{ .Dockerdomain }}:5000/{{ .Images.addon_manager }}
219 |
220 | - name: kubelet.service
221 | runtime: true
222 | command: start
223 | content: |
224 | [Unit]
225 | Description=Kubernetes Kubelet
226 | Documentation=https://github.com/kubernetes/kubernetes
227 | Requires=docker.service
228 | After=docker.service
229 | [Service]
230 | Environment=KUBELET_VERSION=v1.2.4_coreos.1
231 | EnvironmentFile=/etc/network-environment
232 | ExecStartPre=/bin/wget --quiet -O /opt/bin/kubelet http://{{ .BootstrapperIP }}/static/kubelet
233 | ExecStartPre=/usr/bin/chmod +x /opt/bin/kubelet
234 | ExecStart=/opt/bin/kubelet \
235 | --pod_infra_container_image={{ .Dockerdomain }}:5000/{{ .Images.pause }} \
236 | --register-node=true \
237 | --api-servers=http://{{ .MasterHostname }}:8080 \
238 | --network-plugin-dir=/etc/kubernetes/cni/net.d \
239 | --network-plugin=${NETWORK_PLUGIN} \
240 | --register-schedulable=false \
241 | --allow-privileged=true \
242 | --pod-manifest-path=/etc/kubernetes/manifests \
243 | --hostname-override={{ .MasterHostname }} \
244 | --cluster-dns={{ .K8sClusterDNS }} \
245 | --cluster-domain=cluster.local \
246 | --feature-gates=Accelerators=true
247 | Restart=always
248 | RestartSec=10
249 | [Install]
250 | WantedBy=multi-user.target
251 |
252 |
253 | {{- else }}
254 | - name: kubelet.service
255 | runtime: true
256 | command: start
257 | content: |
258 | [Unit]
259 | Description=Kubernetes Kubelet
260 | Documentation=https://github.com/kubernetes/kubernetes
261 | After=docker.service
262 | Requires=docker.service
263 | [Service]
264 | EnvironmentFile=/etc/network-environment
265 | Environment=KUBELET_VERSION=v1.2.4_coreos.1
266 | ExecStartPre=/bin/wget --quiet -O /opt/bin/kubelet http://{{ .BootstrapperIP }}/static/kubelet
267 | ExecStartPre=/usr/bin/chmod +x /opt/bin/kubelet
268 | ExecStart=/opt/bin/kubelet \
269 | --pod_infra_container_image={{ .Dockerdomain }}:5000/{{ .Images.pause }} \
270 | --address=0.0.0.0 \
271 | --allow-privileged=true \
272 | --cluster-dns={{ .K8sClusterDNS }} \
273 | --cluster-domain=cluster.local \
274 | --pod-manifest-path=/etc/kubernetes/manifests \
275 | --hostname-override={{ .Hostname }} \
276 | --api-servers=https://{{ .MasterHostname }}:443 \
277 | --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \
278 | --tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem \
279 | --tls-cert-file=/etc/kubernetes/ssl/worker.pem \
280 | --feature-gates=Accelerators=true \
281 | {{ if .IngressLabel }} \
282 | --logtostderr=true \
283 | --node-labels=role=ingress \
284 | {{ else }} \
285 | --logtostderr=true \
286 | {{ end }} \
287 | --network-plugin= \
288 | --network-plugin-dir=/etc/cni/net.d
289 | Restart=always
290 | RestartSec=10
291 | [Install]
292 | WantedBy=multi-user.target
293 | {{- end}}
294 |
295 | hostname: "{{ .Hostname }}"
296 | ssh_authorized_keys:
297 | {{ .SSHAuthorizedKeys }}
298 | {{ end }}
299 |
--------------------------------------------------------------------------------
/golang/template/templatefiles/cloud-config.template:
--------------------------------------------------------------------------------
1 | {{ define "cc-template" }}#cloud-config
2 | write_files:
3 | {{ template "common" .}}
4 | {{- if ne .OSName "CentOS" }}
5 | {{/* coreos section define coreos units */}}
6 | {{ template "coreos" .}}
7 | {{ else }}
8 | {{/* centos cloud-config will continue write_files section */}}
9 | {{ template "centos" .}}
10 | {{- end }}
11 | {{ end }}
12 |
--------------------------------------------------------------------------------
/golang/validate-yaml/validate-yaml.go:
--------------------------------------------------------------------------------
1 | // cloud-config-server starts an HTTP server, which can be accessed
2 | // via URLs in the form of
3 | //
4 | // http://?mac=aa:bb:cc:dd:ee:ff
5 | //
6 | // and returns the cloud-config YAML file specificially tailored for
7 | // the node whose primary NIC's MAC address matches that specified in
8 | // above URL.
9 | package main
10 |
11 | import (
12 | "bytes"
13 | "errors"
14 | "flag"
15 | "github.com/golang/glog"
16 | "github.com/k8sp/sextant/golang/certgen"
17 | "github.com/k8sp/sextant/golang/clusterdesc"
18 | cctemplate "github.com/k8sp/sextant/golang/template"
19 | "github.com/topicai/candy"
20 | "gopkg.in/yaml.v2"
21 | "io/ioutil"
22 | "os"
23 | "strings"
24 | )
25 |
26 | func main() {
27 | clusterDesc := flag.String("cluster-desc", "./cluster-desc.yml", "Configurations for a k8s cluster.")
28 | ccTemplateDir := flag.String("cloud-config-dir", "./cloud-config.template", "cloud-config file template.")
29 | flag.Parse()
30 |
31 | glog.Info("Checking %s ...", *clusterDesc)
32 | err := validation(*clusterDesc, *ccTemplateDir)
33 | if err != nil {
34 | glog.Info("Failed: \n" + err.Error())
35 | os.Exit(1)
36 | }
37 | glog.Info("Successed!")
38 | os.Exit(0)
39 | }
40 |
41 | // Validate cluster-desc.yaml and check the generated cloud-config file format.
42 | func validation(clusterDescFile string, ccTemplateDir string) error {
43 | clusterDesc, err := ioutil.ReadFile(clusterDescFile)
44 | candy.Must(err)
45 | _, direrr := os.Stat(ccTemplateDir)
46 | if os.IsNotExist(direrr) {
47 | return direrr
48 | }
49 |
50 | c := &clusterdesc.Cluster{}
51 | // validate cluster-desc format
52 | err = yaml.Unmarshal(clusterDesc, c)
53 | if err != nil {
54 | return errors.New("cluster-desc file formate failed: " + err.Error())
55 | }
56 |
57 | // flannel backend only support host-gw and udp for now
58 | if c.FlannelBackend != "host-gw" && c.FlannelBackend != "udp" && c.FlannelBackend != "vxlan" {
59 | return errors.New("Flannl backend should be host-gw or udp.")
60 | }
61 |
62 | // Inlucde one master and one etcd member at least
63 | countEtcdMember := 0
64 | countKubeMaster := 0
65 | for _, node := range c.Nodes {
66 | if node.EtcdMember {
67 | countEtcdMember++
68 | }
69 | if node.KubeMaster {
70 | countKubeMaster++
71 | }
72 | }
73 | if countEtcdMember == 0 || countKubeMaster == 0 {
74 | return errors.New("Cluster description yaml should include one master and one etcd member at least.")
75 | }
76 |
77 | if len(c.SSHAuthorizedKeys) == 0 {
78 | return errors.New("Cluster description yaml should include one ssh key.")
79 | }
80 |
81 | caKey := "./tmp_ca.key"
82 | caCrt := "./tmp_ca.crt"
83 | certgen.GenerateCA(caKey, caCrt)
84 | var ccTmplBuffer bytes.Buffer
85 | for _, n := range c.Nodes {
86 | mac := n.Mac()
87 | err = cctemplate.Execute(&ccTmplBuffer, mac, "cc-template", ccTemplateDir, clusterDescFile, caKey, caCrt)
88 | if err != nil {
89 | return errors.New("Generate cloud-config failed with mac: " + mac + "\n" + err.Error())
90 | }
91 |
92 | yml := make(map[interface{}]interface{})
93 | err = yaml.Unmarshal(ccTmplBuffer.Bytes(), yml)
94 | if err != nil {
95 | return errors.New("Generate cloud-config format failed with mac: " + mac + "\n" + err.Error())
96 | }
97 | ccTmplBuffer.Reset()
98 | // check generated cloud-config in yaml format
99 | for _, wfunit := range yml["write_files"].([]interface{}) {
100 | fn := wfunit.(map[interface{}]interface{})["path"].(string)
101 | fcontent := wfunit.(map[interface{}]interface{})["content"].(string)
102 | if strings.HasSuffix(fn, "ca.pem") && fcontent == "" {
103 | return errors.New("cloud-config has no CA contents")
104 | }
105 | }
106 | }
107 | return nil
108 | }
109 |
--------------------------------------------------------------------------------
/golang/validate-yaml/validate-yaml_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 | "testing"
6 | )
7 |
8 | func TestValidateYAML(t *testing.T) {
9 | os.Exit(0)
10 | }
11 |
--------------------------------------------------------------------------------
/install-ceph/README.md:
--------------------------------------------------------------------------------
1 | # Using ceph rbd on kubernetes on coreos
2 |
3 | ## Environment Preparation
4 | Setup ceph client environment on one of your coreos machine(usually the master).
5 |
6 | create a file /opt/bin/rbd and put paste lines below.
7 | ```
8 | #!/bin/bash
9 | docker run -i --rm \
10 | --privileged \
11 | --pid host \
12 | --net host \
13 | --volume /dev:/dev \
14 | --volume /sys:/sys \
15 | --volume=/sbin/modprobe:/sbin/modprobe \
16 | --volume=/lib/modules:/lib/modules \
17 | --volume /etc/ceph:/etc/ceph \
18 | --volume /var/lib/ceph:/var/lib/ceph \
19 | --entrypoint $(basename $0) \
20 | ceph/rbd "$@"
21 | ```
22 | Make the script runnable, and make sure `/opt/bin` is under PATH env:
23 | ```
24 | chmod +x /opt/bin/rbd
25 | ```
26 | If ceph is has already been installed on the current machine, we will have
27 | `/etc/ceph` directory containing `ceph.conf` and your keyring. If not, copy
28 | the `/etc/ceph` configurations from your ceph installation.
29 |
30 | Then you'll be able to run `rbd` command to create, rm and list images.
31 |
32 | ## Create your rbd image
33 | Run `rbd [--user myuser] create [imagename] --size [image size MB] --pool [poolname]`
34 | to create rbd image under your pool,
35 | like: `rbd create bar --size 1024 --pool swimmingpool`.
36 |
37 | Then run `rbd ls` will list the images you've created.
38 |
39 | According to issues mentioned [here](http://www.zphj1987.com/2016/06/07/rbd无法map(rbd-feature-disable)/)
40 | we need to disable the new features of rbd images in order to run under
41 | kubernetes:
42 | ```
43 | rbd feature disable mypool/myimage deep-flatten
44 | rbd feature disable mypool/myimage fast-diff
45 | rbd feature disable mypool/myimage object-map
46 | rbd feature disable mypool/myimage exclusive-lock
47 | ```
48 |
49 | ## Mount rbd volume in a kubernetes pod
50 | Do the following steps to create a kubernetes secret for cephx.
51 |
52 | Get the base64 encoded keyring:
53 | ```
54 | echo "AQBAMo1VqE1OMhAAVpERPcyQU5pzU6IOJ22x1w==" | base64
55 | QVFCQU1vMVZxRTFPTWhBQVZwRVJQY3lRVTVwelU2SU9KMjJ4MXc9PQo=
56 | ```
57 |
58 | Edit your ceph-secret.yml with the base64 key:
59 | ```
60 | apiVersion: v1
61 | kind: Secret
62 | metadata:
63 | name: ceph-secret
64 | data:
65 | key: QVFCQU1vMVZxRTFPTWhBQVZwRVJQY3lRVTVwelU2SU9KMjJ4MXc9PQo=
66 | ```
67 |
68 | Add your secret to Kubernetes:
69 | ```
70 | kubectl create -f secret/ceph-secret.yaml
71 | kubectl get secret
72 | NAME TYPE DATA
73 | ceph-secret Opaque 1
74 | ```
75 |
76 | Now, we edit our rbd-with-secret.json pod file.
77 | This file describes the content of your pod:
78 | ```
79 | {
80 | "apiVersion": "v1",
81 | "kind": "Pod",
82 | "metadata": {
83 | "name": "rbd1"
84 | },
85 | "spec": {
86 | "containers": [
87 | {
88 | "name": "rbd-test001",
89 | "image": "nginx",
90 | "volumeMounts": [
91 | {
92 | "mountPath": "/mnt/rbd",
93 | "name": "rbdpd"
94 | }
95 | ]
96 | }
97 | ],
98 | "nodeSelector": {
99 | "role": "worker"
100 | },
101 | "volumes": [
102 | {
103 | "name": "rbdpd",
104 | "rbd": {
105 | "monitors": [
106 | "192.168.119.150:6789",
107 | "192.168.119.151:6789",
108 | "192.168.119.152:6789"
109 | ],
110 | "pool": "lgk8s",
111 | "image": "nginx",
112 | "user": "lgk8s",
113 | "secretRef": {
114 | "name": "ceph-secret"
115 | },
116 | "fsType": "ext4",
117 | "readOnly": true
118 | }
119 | }
120 | ]
121 | }
122 | }
123 | ```
124 | Several explains:
125 |
126 | * define your rbd image mount in the section "volumes", and use it in pod spec.
127 | * in pod spec "volumeMounts" section, define "mountPath" for rbd mount point
128 | in your pod, and "name" is the volume name defined in "volumes" section.
129 | * rbd volume defination must contain:
130 | * monitors: a list of ceph monitors address, eg. ["192.168.119.150:6789"]
131 | * pool: which ceph pool your image in
132 | * image: rbd image name created by ceph
133 | * user/secretRef: user and the keyring for auth
134 | * fsType: ext4 or xfs etc.
135 | * readOnly: true of false
136 |
137 | Now it’s time to fire it up your pod:
138 | ```
139 | kubectl create -f rbd-with-secret.json
140 | kubectl get pods
141 | NAME READY REASON RESTARTS AGE
142 | rbd2 1/1 Running 0 1m
143 | ```
144 |
145 | After that your pod can access rbd data from your mountPath(eg. /mnt/rbd)
146 |
--------------------------------------------------------------------------------
/install-ceph/install-mon.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker_hub=$1
4 | if [[ ! -z $docker_hub ]]; then
5 | docker_hub=$docker_hub"/"
6 | fi
7 |
8 | CEPH_CLUSTER_NAME=ceph
9 | CEPH_MON_DOCKER_NAME=ceph_mon
10 | CEPH_MDS_DOCKER_NAME=ceph_mds
11 | CEPH_IMG_TAG=tag-build-master-jewel-ubuntu-14.04-fix370
12 |
13 | # cephx enabled ?
14 | etcdctl get /ceph-config/$CEPH_CLUSTER_NAME/auth/cephx
15 | # populate kvstore
16 | # NOTICE: put OSD_JOURNAL_SIZE settings in a default file
17 | # as of: https://github.com/ceph/ceph-docker/blob/master/ceph-releases/jewel/ubuntu/14.04/daemon/entrypoint.sh#L173
18 | # NOTICE: use docker run --rm to ensure container is deleted after execution
19 | if [ $? -ne 0 ]; then
20 | echo "Enable cephx."
21 | docker run --rm --net=host \
22 | --name ceph_kvstore \
23 | -v /etc/ceph/:/etc/ceph/ \
24 | -v /var/lib/ceph/:/var/lib/ceph \
25 | -e CLUSTER=$CEPH_CLUSTER_NAME \
26 | -e KV_TYPE=etcd \
27 | -e KV_IP=127.0.0.1 \
28 | -e KV_PORT=2379 \
29 | -e OSD_JOURNAL_SIZE= \
30 | --entrypoint=/bin/bash \
31 | "$docker_hub"ceph/daemon -c "sed -i.bak \"/^\/osd\/osd_journal_size/d\" /ceph.defaults && echo \"/osd/osd_journal_size \" >> /ceph.defaults && /entrypoint.sh populate_kvstore"
32 | fi
33 |
34 | # MON
35 | if docker ps -a | grep -q $CEPH_MON_DOCKER_NAME ; then
36 | echo "docker container $CEPH_MON_DOCKER_NAME exists, start it now"
37 | docker start $CEPH_MON_DOCKER_NAME
38 | else
39 | # Start the ceph monitor
40 | echo "docker container $CEPH_MON_DOCKER_NAME doesn't exist, run it now"
41 | docker run -d --restart=on-failure --net=host \
42 | --name $CEPH_MON_DOCKER_NAME \
43 | -v /etc/ceph:/etc/ceph \
44 | -v /var/lib/ceph/:/var/lib/ceph \
45 | -e CLUSTER=$CEPH_CLUSTER_NAME \
46 | -e KV_TYPE=etcd \
47 | -e NETWORK_AUTO_DETECT=4 \
48 | --entrypoint=/entrypoint.sh \
49 | "$docker_hub"ceph/daemon mon
50 | fi
51 |
52 | # MDS
53 | if docker ps -a | grep -q $CEPH_MDS_DOCKER_NAME ; then
54 | echo "docker container $CEPH_MDS_DOCKER_NAME exists, start it now"
55 | docker start $CEPH_MDS_DOCKER_NAME
56 | else
57 | # Start the ceph monitor
58 | echo "docker container $CEPH_MDS_DOCKER_NAME doesn't exist, run it now"
59 | docker run -d --restart=on-failure --net=host \
60 | --name ceph_mds \
61 | -e CLUSTER=$CEPH_CLUSTER_NAME \
62 | -e CEPHFS_CREATE=1 \
63 | -e KV_TYPE=etcd \
64 | --entrypoint=/entrypoint.sh \
65 | "$docker_hub"ceph/daemon mds
66 | fi
67 |
--------------------------------------------------------------------------------
/install-ceph/install-osd.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker_hub=$1
4 | if [[ ! -z $docker_hub ]]; then
5 | docker_hub=$docker_hub"/"
6 | fi
7 |
8 | #Obtain devices
9 | devices=$(lsblk -l |awk '$6=="disk"{print $1}')
10 | systemdevice=$(lsblk -l |awk '$7=="/"{print $1}' |sed 's/[0-9]\+$//')
11 |
12 | CEPH_CLUSTER_NAME=ceph
13 |
14 | # Run OSD daemon for each device
15 | for d in $devices
16 | do
17 | if [[ $d != $systemdevice ]]; then
18 | device="/dev/$d"
19 | CEPH_OSD_DOCKER_NAME=ceph_osd_${d}
20 | docker rm $CEPH_OSD_DOCKER_NAME
21 | docker run -d --restart=on-failure --pid=host --net=host --privileged=true \
22 | --name $CEPH_OSD_DOCKER_NAME \
23 | -v /etc/ceph:/etc/ceph \
24 | -v /var/lib/ceph:/var/lib/ceph \
25 | -v /dev:/dev \
26 | -e KV_TYPE=etcd \
27 | -e CLUSTER=$CEPH_CLUSTER_NAME \
28 | -e OSD_DEVICE=${device} \
29 | --entrypoint=/bin/bash \
30 | "$docker_hub"ceph/daemon -x /entrypoint.sh osd
31 |
32 | # FIXME: wait utill the container finishes bootstrapping
33 | st=$(docker ps --format "{{.Status}} {{.Names}}"|grep $CEPH_OSD_DOCKER_NAME | awk '{print $1}')
34 | while [ $st != "Up" ] ;
35 | do
36 | st=$(docker ps --format "{{.Status}} {{.Names}}"|grep $CEPH_OSD_DOCKER_NAME | awk '{print $1}')
37 | sleep 5;
38 | done
39 | fi
40 | done
41 |
--------------------------------------------------------------------------------
/logo/Sextant.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/k8sp/sextant/0f4fec9ae68aa5eba689aeb1b7584977033ab907/logo/Sextant.png
--------------------------------------------------------------------------------
/scripts/centos.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | GPU_DIR='gpu_drivers'
4 | ABSOLUTE_GPU_DIR="$BSROOT/html/static/CentOS7/$GPU_DIR"
5 | HTTP_GPU_DIR="http://$BS_IP/static/CentOS7/$GPU_DIR"
6 |
7 | download_centos_images() {
8 | VERSION=CentOS7
9 | mkdir -p $BSROOT/tftpboot
10 | log info "Downloading syslinux ..."
11 | wget --quiet -c -N -P $BSROOT/tftpboot https://www.kernel.org/pub/linux/utils/boot/syslinux/syslinux-6.03.tar.gz || { log fatal "Failed"; exit 1; }
12 | cd $BSROOT/tftpboot
13 | tar xzf syslinux-6.03.tar.gz || { log fatal "Failed"; exit 1; }
14 | cp syslinux-6.03/bios/core/pxelinux.0 $BSROOT/tftpboot || { log fatal "Failed"; exit 1; }
15 | cp syslinux-6.03/bios/com32/menu/vesamenu.c32 $BSROOT/tftpboot || { log fatal "Failed"; exit 1; }
16 | cp syslinux-6.03/bios/com32/elflink/ldlinux/ldlinux.c32 $BSROOT/tftpboot || { log fatal "Failed"; exit 1; }
17 | rm -rf syslinux-6.03 || { log fatal "Failed"; exit 1; } # Clean the untarred.
18 | log info "Done"
19 |
20 | log info "Downloading CentOS 7 PXE vmlinuz image ..."
21 | cd $BSROOT/tftpboot
22 | mkdir -p $BSROOT/tftpboot/CentOS7
23 | wget --quiet -c -N -P $BSROOT/tftpboot/CentOS7 $cluster_desc_mirror_site/$cluster_desc_centos_version/os/x86_64/images/pxeboot/initrd.img || { log fatal "Failed"; exit 1; }
24 | wget --quiet -c -N -P $BSROOT/tftpboot/CentOS7 $cluster_desc_mirror_site/$cluster_desc_centos_version/os/x86_64/images/pxeboot/vmlinuz || { log fatal "Failed"; exit 1; }
25 | log info "Done"
26 |
27 | log info "Downloading CentOS 7 ISO ..."
28 | mkdir -p $BSROOT/html/static/CentOS7
29 | centos7_src=$cluster_desc_mirror_site/$cluster_desc_centos_version/isos/x86_64/CentOS-7-x86_64-Everything-${cluster_desc_centos_version##*.}.iso
30 | echo $centos7_src
31 | wget --quiet -c -N -P $BSROOT/html/static/CentOS7 ${centos7_src} || { log fatal "Failed"; exit 1; }
32 | log info "Done"
33 | }
34 |
35 |
36 | generate_pxe_centos_config() {
37 | log info "Generating pxelinux.cfg ..."
38 | mkdir -p $BSROOT/tftpboot/pxelinux.cfg
39 | cat > $BSROOT/tftpboot/pxelinux.cfg/default < $BSROOT/html/static/CentOS7/ks.cfg <> /root/cloudinit.log
128 |
129 | %end
130 |
131 | EOF
132 | log info "Done"
133 | }
134 |
135 |
136 | generate_post_cloudinit_script() {
137 | log info "Generating post cloudinit script ..."
138 | mkdir -p $BSROOT/html/static/CentOS7
139 | cat > $BSROOT/html/static/CentOS7/post_cloudinit_provision.sh <<'EOF'
140 | #!/bin/bash
141 | default_iface=$(awk '$2 == 00000000 { print $1 }' /proc/net/route | uniq)
142 | if [[ -z ${bootstrapper_ip} ]]; then
143 | bootstrapper_ip=$(grep nameserver /etc/resolv.conf|cut -d " " -f2)
144 | fi
145 | printf "Default interface: ${default_iface}\n"
146 | default_iface=`echo ${default_iface} | awk '{ print $1 }'`
147 |
148 | mac_addr=`ip addr show dev ${default_iface} | awk '$1 ~ /^link\// { print $2 }'`
149 | printf "Interface: ${default_iface} MAC address: ${mac_addr}\n"
150 |
151 |
152 | sed -i 's/disable_root: 1/disable_root: 0/g' /etc/cloud/cloud.cfg
153 | sed -i 's/ssh_pwauth: 0/ssh_pwauth: 1/g' /etc/cloud/cloud.cfg
154 | echo "FLANNEL_OPTIONS=\"-iface=${default_iface}\"" >> /etc/sysconfig/flanneld
155 |
156 | mkdir -p /var/lib/cloud/seed/nocloud-net/
157 | cd /var/lib/cloud/seed/nocloud-net/
158 |
159 | wget -O user-data http://$bootstrapper_ip/cloud-config/${mac_addr}
160 |
161 | if [[ ! -z ${etcd_data_path} ]]; then
162 | sed -i "s@Environment=ETCD_DATA_DIR=.*@Environment=ETCD_DATA_DIR=${etcd_data_path}@" \
163 | /var/lib/cloud/seed/nocloud-net/user-data
164 | fi
165 |
166 | cat > /var/lib/cloud/seed/nocloud-net/meta-data << eof
167 | instance-id: iid-local01
168 | eof
169 |
170 | cloud-init init --local
171 | cloud-init init
172 |
173 | systemctl stop NetworkManager
174 | systemctl disable NetworkManager
175 | systemctl enable docker
176 | EOF
177 | log info "Done"
178 | }
179 |
180 |
181 | generate_rpmrepo_config() {
182 | log info "Generating rpm repo configuration files ..."
183 | mkdir -p $BSROOT/html/static/CentOS7/repo
184 |
185 | cat > $BSROOT/html/static/CentOS7/repo/cloud-init.repo < $LIBS_FILES
32 | tar -C ${NVIDIA_RUN_NAME} -cvj ${TOOLS} > $TOOLS_FILES
33 | }
34 |
35 | install_lib_and_ko() {
36 | mkdir -p ${NVIDIA_BIN_DIR}
37 | tar -xjf $TOOLS_FILES -C ${NVIDIA_BIN_DIR}
38 | mkdir -p ${NVIDIA_LIB_DIR}
39 | tar -xjf $LIBS_FILES -C ${NVIDIA_LIB_DIR}
40 |
41 | for LIBRARY_NAME in libcuda libGLESv1_CM \
42 | libGL libEGL \
43 | libnvidia-cfg libnvidia-encode libnvidia-fbc \
44 | libnvidia-ifr libnvidia-ml libnvidia-opencl \
45 | libnvcuvid libvdpau
46 | do
47 | ln -sf $NVIDIA_LIB_DIR/${LIBRARY_NAME}.so.${DRIVER_VERSION} $NVIDIA_LIB_DIR/${LIBRARY_NAME}.so.1
48 | ln -sf $NVIDIA_LIB_DIR/${LIBRARY_NAME}.so.1 $NVIDIA_LIB_DIR/${LIBRARY_NAME}.so
49 | done
50 |
51 | ln -sf $NVIDIA_LIB_DIR/libOpenCL.so.1.0.0 $NVIDIA_LIB_DIR/libOpenCL.so.1
52 | ln -sf $NVIDIA_LIB_DIR/libOpenCL.so.1 $NVIDIA_LIB_DIR/libOpenCL.so
53 | ln -sf $NVIDIA_LIB_DIR/libGLESv2.so.${DRIVER_VERSION} $NVIDIA_LIB_DIR/libGLESv2.so.2
54 | ln -sf $NVIDIA_LIB_DIR/libGLESv2.so.2 $NVIDIA_LIB_DIR/libGLESv2.so
55 | ln -sf $NVIDIA_LIB_DIR/libvdpau_nvidia.so.${DRIVER_VERSION} $NVIDIA_LIB_DIR/libvdpau_nvidia.so
56 | ln -sf $NVIDIA_LIB_DIR/libvdpau_trace.so.${DRIVER_VERSION} $NVIDIA_LIB_DIR/libvdpau_trace.so
57 | }
58 |
59 | disable_nouveau() {
60 | cat > /etc/modprobe.d/nvidia-installer-disable-nouveau.conf <<'EOF'
61 | blacklist nouveau
62 | options nouveau modeset=0
63 | EOF
64 | /sbin/modprobe -r nouveau
65 |
66 | }
67 |
68 |
69 | disable_nouveau
70 | build_lib_and_ko
71 | install_lib_and_ko
72 |
73 | wget -P /root ${HTTP_GPU_DIR}/nvidia-gpu-mkdev.sh
74 | /bin/bash /root/nvidia-gpu-mkdev.sh
75 | echo "/bin/bash /root/nvidia-gpu-mkdev.sh" >>/etc/rc.local
76 | chmod +x /etc/rc.local
77 |
--------------------------------------------------------------------------------
/scripts/centos/gpu/nvidia-gpu-mkdev.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | /sbin/modprobe -r nouveau
4 | /sbin/modprobe nvidia
5 | # Count the number of NVIDIA controllers found.
6 | NVDEVS=$(lspci | grep -i NVIDIA)
7 | N3D=`echo "$NVDEVS" | grep "3D controller" | wc -l`
8 | NVGA=`echo "$NVDEVS" | grep "VGA compatible controller" | wc -l`
9 | N=`expr $N3D + $NVGA - 1`
10 | for i in `seq 0 $N`; do
11 | mknod -m 666 /dev/nvidia$i c 195 $i
12 | done
13 | mknod -m 666 /dev/nvidiactl c 195 255
14 | /sbin/modprobe nvidia-uvm
15 | # Find out the major device number used by the nvidia-uvm driver
16 | D=$(grep nvidia-uvm /proc/devices | awk '{print $1}');
17 | mknod -m 666 /dev/nvidia-uvm c $D 0
18 |
--------------------------------------------------------------------------------
/scripts/common.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Common utilities, variables and checks for all build scripts.
4 | set -o errexit
5 | set -o nounset
6 | set -o pipefail
7 |
8 | if [[ "$#" -lt 1 || "$#" -gt 2 ]]; then
9 | echo "Usage: bsroot.sh [\$SEXTANT_DIR/bsroot]"
10 | exit 1
11 | fi
12 |
13 | # Remember fullpaths, so that it is not required to run bsroot.sh from its local Git repo.
14 | realpath() {
15 | [[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"
16 | }
17 |
18 | SEXTANT_DIR=$(dirname $(realpath $0))
19 | INSTALL_CEPH_SCRIPT_DIR=$SEXTANT_DIR/install-ceph
20 | CLUSTER_DESC=$(realpath $1)
21 |
22 | source $SEXTANT_DIR/scripts/load_yaml.sh
23 | # load yaml from "cluster-desc.yaml"
24 | load_yaml $CLUSTER_DESC cluster_desc_
25 |
26 | # Check sextant dir
27 | if [[ "$SEXTANT_DIR" != "$GOPATH/src/github.com/k8sp/sextant" ]]; then
28 | echo "\$SEXTANT_DIR=$SEXTANT_DIR differs from $GOPATH/src/github.com/k8sp/sextant."
29 | echo "Please set GOPATH environment variable and use 'go get' to retrieve sextant."
30 | exit 1
31 | fi
32 |
33 | if [[ "$#" == 2 ]]; then
34 | BSROOT=$(realpath $2)
35 | else
36 | BSROOT=$SEXTANT_DIR/bsroot
37 | fi
38 | if [[ -d $BSROOT ]]; then
39 | echo "$BSROOT already exists. Overwrite without removing it."
40 | else
41 | mkdir -p $BSROOT
42 | fi
43 |
44 | BS_IP=`grep "bootstrapper:" $CLUSTER_DESC | awk '{print $2}' | sed 's/ //g'`
45 | if [[ "$?" -ne 0 || "$BS_IP" == "" ]]; then
46 | echo "Failed parsing cluster-desc file $CLUSTER_DESC for bootstrapper IP".
47 | exit 1
48 | fi
49 | echo "Using bootstrapper server IP $BS_IP"
50 |
51 | KUBE_MASTER_HOSTNAME=`head -n $(grep -n 'kube_master\s*:\s*y' $CLUSTER_DESC | cut -d: -f1) $CLUSTER_DESC | grep mac: | tail | grep -o '..:..:..:..:..:..' | tr ':' '-'`
52 | if [[ "$?" -ne 0 || "$KUBE_MASTER_HOSTNAME" == "" ]]; then
53 | echo "The cluster-desc file should container kube-master node."
54 | exit 1
55 | fi
56 |
57 | mkdir -p $BSROOT/config
58 | cp $CLUSTER_DESC $BSROOT/config/cluster-desc.yml
59 |
60 | # check_prerequisites checks for required software packages.
61 | function check_prerequisites() {
62 | log info "Checking prerequisites ... "
63 | local err=0
64 | for tool in wget tar gpg docker tr go make; do
65 | command -v $tool >/dev/null 2>&1 || { echo "Install $tool before run this script"; err=1; }
66 | done
67 | if [[ $err -ne 0 ]]; then
68 | exit 1
69 | fi
70 | log info "Done"
71 | }
72 |
73 |
74 | check_cluster_desc_file() {
75 | log info "Cross-compiling validate-yaml ... "
76 | docker run --rm -it \
77 | --volume $GOPATH:/go \
78 | -e CGO_ENABLED=0 \
79 | -e GOOS=linux \
80 | -e GOARCH=amd64 \
81 | golang:wheezy \
82 | go get github.com/k8sp/sextant/golang/validate-yaml \
83 | || { log fatal "Build sextant failed..."; exit 1; }
84 | log info "Done"
85 |
86 |
87 | log info "Copying cloud-config template and cluster-desc.yml ... "
88 | mkdir -p $BSROOT/config > /dev/null 2>&1
89 | cp -r $SEXTANT_DIR/golang/template/templatefiles $BSROOT/config
90 | cp $CLUSTER_DESC $BSROOT/config
91 | log info "Done"
92 |
93 | log info "Checking cluster description file ..."
94 | docker run --rm -it \
95 | --volume $GOPATH:/go \
96 | --volume $BSROOT:/bsroot \
97 | golang:wheezy \
98 | /go/bin/validate-yaml \
99 | --cloud-config-dir /bsroot/config/templatefiles \
100 | -cluster-desc /bsroot/config/cluster-desc.yml \
101 | > /dev/null 2>&1 || { echo "Failed"; exit 1; }
102 | log info "Done"
103 | }
104 |
105 | generate_registry_config() {
106 | log info "Generating Docker registry config file ... "
107 | mkdir -p $BSROOT/registry_data
108 | [ ! -d $BSROOT/config ] && mkdir -p $BSROOT/config
109 | cat > $BSROOT/config/registry.yml </$OSD_JOURNAL_SIZE/g" \
145 | > $BSROOT/html/static/ceph/install-mon.sh || { echo "install-mon Failed"; exit 1; }
146 |
147 | sed "s/ceph\/daemon/$CEPH_DAEMON_IMAGE/g" $INSTALL_CEPH_SCRIPT_DIR/install-osd.sh \
148 | > $BSROOT/html/static/ceph/install-osd.sh || { echo "install-osd Failed"; exit 1; }
149 | log info "Done"
150 |
151 | }
152 |
153 |
154 | build_bootstrapper_image() {
155 | # cloud-config-server and addon compile moved to check_cluster_desc_file
156 | log info "Cross-compiling cloud-config-server, addons ... "
157 | docker run --rm -it \
158 | --volume $GOPATH:/go \
159 | -e CGO_ENABLED=0 \
160 | -e GOOS=linux \
161 | -e GOARCH=amd64 \
162 | golang:wheezy \
163 | go get github.com/k8sp/sextant/golang/cloud-config-server github.com/k8sp/sextant/golang/addons \
164 | || { log fatal "Build sextant failed..."; exit 1; }
165 | log info "Done"
166 |
167 |
168 | rm -rf $SEXTANT_DIR/docker/{cloud-config-server,addons}
169 | cp $GOPATH/bin/{cloud-config-server,addons} $SEXTANT_DIR/docker
170 | log info "Done"
171 |
172 | log info "Building bootstrapper image ... "
173 | docker rm -f bootstrapper > /dev/null 2>&1 || echo "No such container: bootstrapper ,Pass..."
174 | docker rmi bootstrapper:latest > /dev/null 2>&1 || echo "No such images: bootstrapper ,Pass..."
175 | cd $SEXTANT_DIR/docker
176 | docker build -t bootstrapper .
177 | docker save bootstrapper:latest > $BSROOT/bootstrapper.tar || { echo "Failed"; exit 1; }
178 | log info "Done"
179 |
180 | log info "Copying bash scripts ... "
181 | cp $SEXTANT_DIR/start_bootstrapper_container.sh $BSROOT/
182 | chmod +x $BSROOT/start_bootstrapper_container.sh
183 | cp $SEXTANT_DIR/scripts/load_yaml.sh $BSROOT/
184 | log info "Done"
185 |
186 | log info "Make directory ..."
187 | mkdir -p $BSROOT/dnsmasq
188 | log info "Done"
189 | }
190 |
191 |
192 | download_k8s_images() {
193 | # Fetch release binary tarball from github accroding to the versions
194 | # defined in "cluster-desc.yml"
195 | hyperkube_version=`grep "hyperkube:" $CLUSTER_DESC | grep -o '".*hyperkube.*:.*"' | sed 's/".*://; s/"//'`
196 | log info "Downloading kubelet ${hyperkube_version} ... "
197 | src="https://storage.googleapis.com/kubernetes-release/release/${hyperkube_version}/bin/linux/amd64/kubelet"
198 | log debug $src
199 | wget --quiet -c -N -O $BSROOT/html/static/kubelet $src || { log fatal "Failed"; exit 1; }
200 | log info "Done"
201 |
202 | # setup-network-environment will fetch the default system IP infomation
203 | # when using cloud-config file to initiate a kubernetes cluster node
204 | log info "Downloading setup-network-environment file ... "
205 | wget --quiet -c -N -O $BSROOT/html/static/setup-network-environment-1.0.1 https://github.com/kelseyhightower/setup-network-environment/releases/download/1.0.1/setup-network-environment || { echo "Failed"; exit 1; }
206 | log info "Done"
207 |
208 |
209 | for DOCKER_IMAGE in $(set | grep '^cluster_desc_images_' | grep -o '".*"' | sed 's/"//g'); do
210 | # NOTE: if we updated remote image but didn't update its tag,
211 | # the following lines wouldn't pull because there is a local
212 | # image with the same tag.
213 | local DOCKER_DOMAIN_IMAGE_URL=$cluster_desc_dockerdomain:5000/${DOCKER_IMAGE}
214 | local DOCKER_TAR_FILE=$BSROOT/`echo $DOCKER_IMAGE.tar | sed "s/:/_/g" |awk -F'/' '{print $NF}'`
215 | if [[ ! -f $DOCKER_TAR_FILE ]]; then
216 | if ! docker images --format '{{.Repository}}:{{.Tag}}' | grep $DOCKER_DOMAIN_IMAGE_URL > /dev/null; then
217 | log info "Pulling image ${DOCKER_IMAGE} ... "
218 | docker pull $DOCKER_IMAGE > /dev/null 2>&1
219 | docker tag $DOCKER_IMAGE $DOCKER_DOMAIN_IMAGE_URL
220 | log info "Done"
221 | fi
222 | log info "Exporting $DOCKER_TAR_FILE ... "
223 | docker save $DOCKER_DOMAIN_IMAGE_URL > $DOCKER_TAR_FILE.progress
224 | mv $DOCKER_TAR_FILE.progress $DOCKER_TAR_FILE
225 | log info "Done"
226 | else
227 | echo "Use existing $DOCKER_TAR_FILE"
228 | fi
229 | done
230 | }
231 |
232 |
233 | generate_tls_assets() {
234 | mkdir -p $BSROOT/tls
235 | cd $BSROOT/tls
236 |
237 | if [[ -f ca.pem ]] && [[ -f ca-key.pem ]] && [[ -f bootstrapper.key ]] \
238 | && [[ -f bootstrapper.csr ]] && [[ -f bootstrapper.crt ]]; then
239 |
240 | echo "Use exist CA TLS assets"
241 |
242 | else
243 |
244 | log info "Generating CA TLS assets ... "
245 | openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
246 | openssl req -x509 -new -nodes -key ca-key.pem -days 3650 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
247 | log info "Done"
248 |
249 | log info "Generating bootstrapper TLS assets ... "
250 | openssl genrsa -out bootstrapper.key 2048 > /dev/null 2>&1
251 | openssl req -new -key bootstrapper.key -out bootstrapper.csr -subj "/CN=bootstrapper" > /dev/null 2>&1
252 | openssl x509 -req -in bootstrapper.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out bootstrapper.crt -days 3650 > /dev/null 2>&1
253 | log info "Done"
254 |
255 | fi
256 | }
257 |
258 | prepare_setup_kubectl() {
259 | hyperkube_version=`grep "hyperkube:" $CLUSTER_DESC | grep -o '".*hyperkube.*:.*"' | sed 's/".*://; s/"//'`
260 | log info "Downloading kubectl ${hyperkube_version} ... "
261 | wget --quiet -c -N -O $BSROOT/html/static/kubectl https://storage.googleapis.com/kubernetes-release/release/$hyperkube_version/bin/linux/amd64/kubectl || { echo "Failed"; exit 1; }
262 | log info "Done"
263 |
264 | log info "Preparing setup kubectl ... "
265 | sed -i -e "s//$KUBE_MASTER_HOSTNAME/g" $SEXTANT_DIR/setup-kubectl.bash
266 | sed -i -e "s//$BS_IP/g" $SEXTANT_DIR/setup-kubectl.bash
267 | cp $SEXTANT_DIR/setup-kubectl.bash $BSROOT/setup_kubectl.bash
268 | chmod +x $BSROOT/setup_kubectl.bash
269 | log info "Done"
270 | }
271 |
272 | generate_addons_config() {
273 | log info "Generating configuration files ..."
274 | mkdir -p $BSROOT/html/static/addons-config/
275 |
276 | docker run --rm -it \
277 | --volume $GOPATH:/go \
278 | --volume $CLUSTER_DESC:/cluster-desc.yaml \
279 | --volume $BSROOT:/bsroot \
280 | --volume $SEXTANT_DIR/scripts/common/addons.sh:/addons.sh \
281 | --volume $SEXTANT_DIR/golang/addons:/addons \
282 | golang:wheezy \
283 | /bin/bash /addons.sh
284 |
285 | for file in $(ls $SEXTANT_DIR/golang/addons/template/|grep \.yaml$)
286 | do
287 | cp $SEXTANT_DIR/golang/addons/template/$file $BSROOT/html/static/addons-config/$file;
288 | done
289 |
290 | log info "Done"
291 | }
292 |
293 |
--------------------------------------------------------------------------------
/scripts/common/addons.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o errexit
4 | set -o nounset
5 | set -o pipefail
6 |
7 | /go/bin/addons -cluster-desc-file /cluster-desc.yaml \
8 | -template-file /addons/template/ingress.template \
9 | -config-file /bsroot/html/static/addons-config/ingress.yaml
10 |
11 |
12 | /go/bin/addons -cluster-desc-file /cluster-desc.yaml \
13 | -template-file /addons/template/kubedns-controller.template \
14 | -config-file /bsroot/html/static/addons-config/kubedns-controller.yaml
15 |
16 |
17 | /go/bin/addons -cluster-desc-file /cluster-desc.yaml \
18 | -template-file /addons/template/kubedns-svc.template \
19 | -config-file /bsroot/html/static/addons-config/kubedns-svc.yaml
20 |
21 |
22 | /go/bin/addons -cluster-desc-file /cluster-desc.yaml \
23 | -template-file /addons/template/dnsmasq.conf.template \
24 | -config-file /bsroot/config/dnsmasq.conf
25 |
26 |
27 | /go/bin/addons -cluster-desc-file /cluster-desc.yaml \
28 | -template-file /addons/template/default-backend.template \
29 | -config-file /bsroot/html/static/addons-config/default-backend.yaml
30 |
31 |
32 | /go/bin/addons -cluster-desc-file /cluster-desc.yaml \
33 | -template-file /addons/template/heapster-controller.template \
34 | -config-file /bsroot/html/static/addons-config/heapster-controller.yaml
35 |
36 | /go/bin/addons -cluster-desc-file /cluster-desc.yaml \
37 | -template-file /addons/template/influxdb-grafana-controller.template \
38 | -config-file /bsroot/html/static/addons-config/influxdb-grafana-controller.yaml
39 |
40 | /go/bin/addons -cluster-desc-file /cluster-desc.yaml \
41 | -template-file /addons/template/dashboard-controller.template \
42 | -config-file /bsroot/html/static/addons-config/dashboard-controller.yaml
43 |
--------------------------------------------------------------------------------
/scripts/coreos.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | check_coreos_version () {
5 | printf "Checking the CoreOS version ... "
6 | VERSION=$(curl -s https://$cluster_desc_coreos_channel.release.core-os.net/amd64-usr/$cluster_desc_coreos_version/version.txt | grep 'COREOS_VERSION=' | cut -f 2 -d '=')
7 | if [[ $VERSION == "" ]]; then
8 | echo "Failed"; exit 1;
9 | fi
10 | echo "Done with coreos channel: " $cluster_desc_coreos_channel "version: " $VERSION
11 | }
12 |
13 | update_coreos_images() {
14 | printf "Updating CoreOS images ... "
15 | mkdir -p $BSROOT/html/static/$VERSION
16 |
17 | wget --quiet -c -N -P $BSROOT/html/static/$VERSION https://$cluster_desc_coreos_channel.release.core-os.net/amd64-usr/$cluster_desc_coreos_version/version.txt
18 | wget --quiet -c -N -P $BSROOT/html/static/$VERSION https://$cluster_desc_coreos_channel.release.core-os.net/amd64-usr/$cluster_desc_coreos_version/coreos_production_image.bin.bz2 || { echo "Failed"; exit 1; }
19 | wget --quiet -c -N -P $BSROOT/html/static/$VERSION https://$cluster_desc_coreos_channel.release.core-os.net/amd64-usr/$cluster_desc_coreos_version/coreos_production_image.bin.bz2.sig || { echo "Failed"; exit 1; }
20 | cd $BSROOT/html/static/$VERSION
21 | gpg --verify coreos_production_image.bin.bz2.sig > /dev/null 2>&1 || { echo "Failed"; exit 1; }
22 | cd $BSROOT/html/static
23 | # Never change 'current' to 'current/', I beg you.
24 | rm -rf current > /dev/null 2>&1
25 | ln -sf ./$VERSION current || { echo "Failed"; exit 1; }
26 | echo "Done"
27 | }
28 |
29 | download_pxe_images() {
30 | mkdir -p $BSROOT/tftpboot
31 | printf "Downloading syslinux ... "
32 | wget --quiet -c -N -P $BSROOT/tftpboot https://www.kernel.org/pub/linux/utils/boot/syslinux/syslinux-6.03.tar.gz || { echo "Failed"; exit 1; }
33 | cd $BSROOT/tftpboot
34 | tar xzf syslinux-6.03.tar.gz || { echo "Failed"; exit 1; }
35 | cp syslinux-6.03/bios/core/pxelinux.0 $BSROOT/tftpboot || { echo "Failed"; exit 1; }
36 | cp syslinux-6.03/bios/com32/menu/vesamenu.c32 $BSROOT/tftpboot || { echo "Failed"; exit 1; }
37 | cp syslinux-6.03/bios/com32/elflink/ldlinux/ldlinux.c32 $BSROOT/tftpboot || { echo "Failed"; exit 1; }
38 | rm -rf syslinux-6.03 || { echo "Failed"; exit 1; } # Clean the untarred.
39 | echo "Done"
40 |
41 | printf "Importing CoreOS signing key ... "
42 | wget --quiet -c -N -P $BSROOT/tftpboot https://coreos.com/security/image-signing-key/CoreOS_Image_Signing_Key.asc || { echo "Failed"; exit 1; }
43 | gpg --import --keyid-format LONG $BSROOT/tftpboot/CoreOS_Image_Signing_Key.asc > /dev/null 2>&1 || { echo "Failed"; exit 1; }
44 | echo "Done"
45 |
46 | printf "Downloading CoreOS PXE vmlinuz image ... "
47 | cd $BSROOT/tftpboot/
48 | wget --quiet -c -N -P $BSROOT/html/static/$VERSION https://$cluster_desc_coreos_channel.release.core-os.net/amd64-usr/$VERSION/coreos_production_pxe.vmlinuz || { echo "Failed"; exit 1; }
49 | rm -f $BSROOT/tftpboot/coreos_production_pxe.vmlinuz > /dev/null 2>&1 || { echo "Failed"; exit 1; }
50 | ln -s ../html/static/$VERSION/coreos_production_pxe.vmlinuz ./ > /dev/null 2>&1 || { echo "Failed"; exit 1; }
51 |
52 | wget --quiet -c -N -P $BSROOT/html/static/$VERSION https://$cluster_desc_coreos_channel.release.core-os.net/amd64-usr/$VERSION/coreos_production_pxe.vmlinuz.sig || { echo "Failed"; exit 1; }
53 | rm -f $BSROOT/tftpboot/coreos_production_pxe.vmlinuz.sig > /dev/null 2>&1 || { echo "Failed"; exit 1; }
54 | ln -s ../html/static/$VERSION/coreos_production_pxe.vmlinuz.sig ./ > /dev/null 2>&1 || { echo "Failed"; exit 1; }
55 | cd $BSROOT/tftpboot
56 | gpg --verify coreos_production_pxe.vmlinuz.sig > /dev/null 2>&1 || { echo "Failed"; exit 1; }
57 | echo "Done"
58 |
59 | printf "Downloading CoreOS PXE CPIO image ... "
60 | wget --quiet -c -N -P $BSROOT/html/static/$VERSION https://$cluster_desc_coreos_channel.release.core-os.net/amd64-usr/$VERSION/coreos_production_pxe_image.cpio.gz || { echo "Failed"; exit 1; }
61 | rm -f $BSROOT/tftpboot/coreos_production_pxe_image.cpio.gz > /dev/null 2>&1 || { echo "Failed"; exit 1; }
62 | ln -s ../html/static/$VERSION/coreos_production_pxe_image.cpio.gz ./ > /dev/null 2>&1 || { echo "Failed"; exit 1; }
63 |
64 | wget --quiet -c -N -P $BSROOT/html/static/$VERSION https://$cluster_desc_coreos_channel.release.core-os.net/amd64-usr/$VERSION/coreos_production_pxe_image.cpio.gz.sig || { echo "Failed"; exit 1; }
65 | rm -f $BSROOT/tftpboot/coreos_production_pxe_image.cpio.gz.sig > /dev/null 2>&1 || { echo "Failed"; exit 1; }
66 | ln -s ../html/static/$VERSION/coreos_production_pxe_image.cpio.gz.sig ./ > /dev/null 2>&1 || { echo "Failed"; exit 1; }
67 | gpg --verify coreos_production_pxe_image.cpio.gz.sig > /dev/null 2>&1 || { echo "Failed"; exit 1; }
68 | echo "Done"
69 | }
70 |
71 |
72 | generate_pxe_config() {
73 | printf "Generating pxelinux.cfg ... "
74 | mkdir -p $BSROOT/tftpboot/pxelinux.cfg
75 | cat > $BSROOT/tftpboot/pxelinux.cfg/default <build.sh DRIVER_VERSION CHANNEL COREOS_VERSION
30 |
31 | e.g.
32 |
33 | `./build.sh 367.35 stable 1068.9.0`
34 |
35 | The scripts will download both the official NVIDIA archive and the CoreOS
36 | developer images, caching them afterwards. It will then create three archives:
37 |
38 | 压缩包 | 说明
39 | -------|-------
40 | libraries-[DRIVER_VERSION].tar.bz2 | GPU 动态库
41 | tools-[DRIVER_VERSION].tar.bz2 | GPU 工具
42 | modules-[COREOS_VERSION]-[DRIVER_VERSION].tar.bz2 | GPU 驱动
43 |
44 |
45 | ## 方法B编译结果
46 |
47 | CoreOS Version | Nvidia driver | 编译结果
48 | ---|---|---
49 | 1010.5.0(4.5.0-coreos-rc1) | 367.35 | 编译通过,不能使用
50 | 1068.9.0(4.6.3-coreos) | 367.35 | 编译通过,正常使用
51 | 1122.2.0(4.7.0-coreos) | 367.57 | 编译通过,正常使用
52 | 1122.3.0(4.7.0-coreos-rc1) | 367.35 | 编译不通过,kernel 接口有改动
53 | 1122.3.0(4.7.0-coreos-rc1) | 367.57 | 编译通过,不能使用
54 |
55 | **结论**
56 |
57 | **编译通过,而不能使用是 nvidia-uvm.ko 驱动文件大小过小,正常使用 nvidia-uvm.ko 和 nvidia.ko 大小相当(>10M),不能使用的 nvidia-uvm.ko 的文件大小在1M左右**
58 |
59 | ## 参考
60 | * 方法 A:https://github.com/emergingstack/es-dev-stack
61 | * 方法 B:https://github.com/Clarifai/coreos-nvidia
62 | * http://www.emergingstack.com/2016/01/10/Nvidia-GPU-plus-CoreOS-plus-Docker-plus-TensorFlow.html
63 | * http://tleyden.github.io/blog/2014/11/04/coreos-with-nvidia-cuda-gpu-drivers/
64 | * https://github.com/indigo-dc/Ubuntu1404_pyopencl/blob/master/Dockerfile
65 | * https://github.com/NVIDIA/nvidia-docker/wiki/CUDA#requirements
66 |
--------------------------------------------------------------------------------
/scripts/coreos_gpu/_container_build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | VERSION=$1
4 | echo Building ${VERSION}
5 |
6 | emerge-gitclone
7 |
8 | emerge -gKq coreos-sources
9 | cd /usr/src/linux
10 | cp /lib/modules/*-coreos/build/.config .config
11 |
12 | make olddefconfig
13 | make modules_prepare
14 |
15 | cd /nvidia_installers/NVIDIA-Linux-x86_64-${VERSION}
16 | ./nvidia-installer -s -n --kernel-source-path=/usr/src/linux \
17 | --no-check-for-alternate-installs --no-opengl-files \
18 | --kernel-install-path=${PWD} --log-file-name=${PWD}/nvidia-installer.log
19 |
20 | cat nvidia-installer.log
21 |
--------------------------------------------------------------------------------
/scripts/coreos_gpu/_export.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | ARTIFACT_DIR=$1
4 | VERSION=$2
5 | COMBINED_VERSION=$3
6 |
7 | TOOLS="nvidia-debugdump nvidia-cuda-mps-control nvidia-xconfig nvidia-modprobe nvidia-smi nvidia-cuda-mps-server
8 | nvidia-persistenced nvidia-settings"
9 |
10 | # Create archives with no paths
11 | tar -C ${ARTIFACT_DIR} -cvj $(basename -a ${ARTIFACT_DIR}/*.so.*) > libraries-${VERSION}.tar.bz2
12 | tar -C ${ARTIFACT_DIR} -cvj ${TOOLS} > tools-${VERSION}.tar.bz2
13 | tar -C ${ARTIFACT_DIR}/kernel -cvj $(basename -a ${ARTIFACT_DIR}/kernel/*.ko) > modules-${COMBINED_VERSION}.tar.bz2
14 |
--------------------------------------------------------------------------------
/scripts/coreos_gpu/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Build NVIDIA drivers for a given CoreOS version
4 | #
5 |
6 | SCRIPTS_DIR=$1
7 | WORK_DIR=$2
8 | DRIVER_VERSION=${3:-367.57}
9 | COREOS_TRACK=${4:-stable}
10 | COREOS_VERSION=${5:-1122.2.0}
11 |
12 | DRIVER_ARCHIVE=NVIDIA-Linux-x86_64-${DRIVER_VERSION}
13 | DRIVER_ARCHIVE_PATH=${WORK_DIR}/nvidia_installers
14 | DEV_CONTAINER=coreos_developer_container.bin.${COREOS_VERSION}
15 | PKG_DIR=${WORK_DIR}/pkg/run_files/${COREOS_VERSION}
16 |
17 | function finish {
18 | rm -Rf tmp
19 | }
20 |
21 | trap finish exit
22 |
23 | mkdir -p $WORK_DIR
24 | cd $WORK_DIR
25 |
26 | :</dev/null 2>&1 \
57 | || { echo "systemd-nspawn is required, but it's not installed. Aborting."; exit -1; }
58 |
59 | echo "sudo systemd-nspawn -i ${DEV_CONTAINER} --share-system \
60 | --bind=${WORK_DIR}/_container_build.sh:/_container_build.sh \
61 | --bind=${PKG_DIR}:/nvidia_installers \
62 | /bin/bash -x /_container_build.sh ${DRIVER_VERSION}"
63 |
64 | sudo systemd-nspawn -i ${DEV_CONTAINER} --share-system \
65 | --bind=${WORK_DIR}/_container_build.sh:/_container_build.sh \
66 | --bind=${PKG_DIR}:/nvidia_installers \
67 | /bin/bash -x /_container_build.sh ${DRIVER_VERSION}
68 |
69 | sudo chown -R ${UID}:${GROUPS[0]} ${PKG_DIR}
70 |
71 | bash -x ${WORK_DIR}/_export.sh ${PKG_DIR}/*-${DRIVER_VERSION} \
72 | ${DRIVER_VERSION} ${COREOS_VERSION}-${DRIVER_VERSION}
73 |
--------------------------------------------------------------------------------
/scripts/coreos_gpu/check.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Check for NVIDIA driver updates
4 | #
5 |
6 | DRIVER=${1:-367.27}
7 |
8 | TRACKS="${2:-alpha beta stable}"
9 | for track in ${TRACKS}
10 | do
11 | # Can't use $(< because it prints errors for a missing file!
12 | last=$(cat last.${track} 2>/dev/null)
13 |
14 | # Grab the most recent directory
15 | curl -s https://${track}.release.core-os.net/amd64-usr/ |
16 | grep -oE '' | grep -o '[0-9\.]*' |
17 | sort -n | tail -1 | while read v; do
18 | # Use sort -V version comparison
19 | if [ "${last}" != "$(/bin/echo -e ${v}\\n${last} | sort -rV | head -1)" ]
20 | then
21 | # We rely on the previous sorting to build the most recent version last
22 | bash -x ./build.sh ${DRIVER} ${track} ${v} && echo ${v} > last.${track}
23 | fi
24 | done
25 | done
26 | exit
27 |
--------------------------------------------------------------------------------
/scripts/coreos_gpu/setup_gpu.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | set -x
4 |
5 | function usage(){
6 | echo "sudo bash $0 coreos-verison nvidia-drivier-verison"
7 | echo "e.g. sudo bash $0 1122.2.0 367.57"
8 | }
9 |
10 | if [ $# -ne 2 ]; then
11 | usage
12 | exit 1;
13 | fi
14 |
15 |
16 | realpath() {
17 | [[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"
18 | }
19 |
20 | WORKDIR=$(dirname $(realpath $0))
21 |
22 | mkdir -p /opt/bin
23 | mkdir -p /var/lib/nvidia
24 |
25 | TEMPLATE=/etc/ld.so.conf.d/nvidia.conf
26 | [ -f $TEMPLATE ] || {
27 | echo "TEMPLATE: $TEMPLATE"
28 | mkdir -p $(dirname $TEMPLATE)
29 | cat << EOF > $TEMPLATE
30 | /var/lib/nvidia
31 | EOF
32 | }
33 |
34 | MODULES="$WORKDIR/modules-$1-$2"
35 | TOOLS="$WORKDIR/tools-$2"
36 | LIBRARIES="$WORKDIR/libraries-$2"
37 |
38 | rm -rf {$MODULES,$TOOLS,$LIBRARIES}
39 | mkdir -p {$MODULES,$TOOLS,$LIBRARIES}
40 | tar -xf ${MODULES}.tar.bz2 -C $MODULES
41 | tar -xf ${TOOLS}.tar.bz2 -C $TOOLS
42 | tar -xf ${LIBRARIES}.tar.bz2 -C $LIBRARIES
43 |
44 |
45 | rmmod nvidia-uvm
46 | rmmod nvidia
47 | insmod $MODULES/nvidia.ko
48 | insmod $MODULES/nvidia-uvm.ko
49 |
50 | cp $TOOLS/* /opt/bin
51 |
52 | cp $LIBRARIES/* /var/lib/nvidia/
53 |
54 | ldconfig
55 |
56 | # Count the number of NVIDIA controllers found.
57 | NVDEVS=`lspci | grep -i NVIDIA`
58 | N3D=`echo "$NVDEVS" | grep "3D controller" | wc -l`
59 | NVGA=`echo "$NVDEVS" | grep "VGA compatible controller" | wc -l`
60 | N=`expr $N3D + $NVGA - 1`
61 |
62 | for i in `seq 0 $N`; do
63 | mknod -m 666 /dev/nvidia$i c 195 $i
64 | done
65 |
66 | mknod -m 666 /dev/nvidiactl c 195 255
67 |
68 | # Find out the major device number used by the nvidia-uvm driver
69 | D=`grep nvidia-uvm /proc/devices | awk '{print $1}'`
70 | mknod -m 666 /dev/nvidia-uvm c $D 0
71 |
72 |
--------------------------------------------------------------------------------
/scripts/load_yaml.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # parse_yaml was shamelessly stolen from
4 | # https://gist.github.com/pkuczynski/8665367. It encapsulates a AWK
5 | # script which converts a .yaml file into a .bash file, where each
6 | # bash variable corresponds to a key-value pair in the .yaml file.
7 | #
8 | # For example, the following invocation generates parseResult.bash,
9 | # where every bash variable's name is composed of the prefix,
10 | # cluster_desc_, and the key name (including all its ancestor keys).
11 | #
12 | # parse_yaml example.yaml "cluster_desc_" > parseResult.bash
13 | #
14 |
15 | ## derived from https://gist.github.com/epiloque/8cf512c6d64641bde388
16 | ## works for arrays of hashes, as long as the hashes do not have arrays
17 | parse_yaml() {
18 | local prefix=$2
19 | local s
20 | local w
21 | local fs
22 | s='[[:space:]]*'
23 | w='[a-zA-Z0-9_]*'
24 | fs="$(echo @|tr @ '\034')"
25 | sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
26 | -e "s|^\($s\)\($w\)$s[:-]$s\(.*\)$s\$|\1$fs\2$fs\3|p" "$1" |
27 | awk -F"$fs" '{
28 | indent = length($1)/2;
29 | if (length($2) == 0) { conj[indent]="+";} else {conj[indent]="";}
30 | vname[indent] = $2;
31 | for (i in vname) {if (i > indent) {delete vname[i]}}
32 | if (length($3) > 0) {
33 | vn=""; for (i=0; i $parsedYaml
52 | source $parsedYaml
53 | rm $parsedYaml
54 | }
55 |
--------------------------------------------------------------------------------
/scripts/log.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | loglevel=0 #debug:0; info:1; warn:2; error:3; fatal:4
3 | function log {
4 | local msg;local logtype
5 | logtype=$1
6 | msg=$2
7 | datetime=`date +'%F %H:%M:%S'`
8 | logformat="[${logtype}]${datetime} ${FUNCNAME[@]/log/} [line:`caller 0 | awk '{print$1}'`]\t${msg}"
9 | {
10 | case $logtype in
11 | debug)
12 | [[ $loglevel -le 0 ]] && echo -e "\033[30m${logformat}\033[0m" ;;
13 | info)
14 | [[ $loglevel -le 1 ]] && echo -e "\033[32m${logformat}\033[0m" ;;
15 | warn)
16 | [[ $loglevel -le 2 ]] && echo -e "\033[33m${logformat}\033[0m" ;;
17 | error)
18 | [[ $loglevel -le 3 ]] && echo -e "\033[31m${logformat}\033[0m" ;;
19 | fatal)
20 | [[ $loglevel -le 4 ]] && echo -e "\033[31m${logformat}\033[0m" && exit 1; ;;
21 |
22 | esac
23 | }
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/setup-kubectl.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | KUBE_MASTER_HOSTNAME=
4 | BS_IP=
5 | setup_kubectl() {
6 | # Download kubectl binary
7 | wget --quiet -c -O "./kubectl" http://$BS_IP/static/kubectl
8 | chmod +x ./kubectl
9 | if [[ ! -d ~/bin ]]; then
10 | mkdir ~/bin
11 | fi
12 | cp ./kubectl ~/bin/
13 | # Configure kubectl
14 | echo $KUBE_MASTER_HOSTNAME
15 | kubectl config set-cluster default-cluster --server=http://$KUBE_MASTER_HOSTNAME:8080
16 | kubectl config set-context default-system --cluster=default-cluster
17 | kubectl config use-context default-system
18 | }
19 |
20 | setup_kubectl
21 |
--------------------------------------------------------------------------------
/start_bootstrapper_container.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # start_bootstrapper_container.sh load docker image from bsroot and
4 | # then push them to registry
5 | if [[ "$#" -gt 1 ]]; then
6 | echo "Usage: start_bootstrapper_container.sh [bsroot-path]"
7 | exit 1
8 | elif [[ "$#" -ne 1 ]]; then
9 | BSROOT=$(cd `dirname $0`; pwd)
10 | else
11 | BSROOT=$1
12 | fi
13 |
14 | if [[ ! -d $BSROOT ]]; then
15 | echo "$BSROOT is not a directory"
16 | exit 2
17 | fi
18 |
19 | if [[ $BSROOT != /* ]]; then
20 | echo "bsroot path not start with / !"
21 | exit 1
22 | fi
23 |
24 | source $BSROOT/load_yaml.sh
25 | load_yaml $BSROOT/config/cluster-desc.yml cluster_desc_
26 |
27 | if [[ -e "$BSROOT/html/static/CentOS7/CentOS-7-x86_64-Everything-${cluster_desc_centos_version##*.}.iso" ]]; then
28 | if [[ ! -d "$BSROOT/html/static/CentOS7/dvd_content" ]]; then
29 | mkdir -p $BSROOT/html/static/CentOS7/dvd_content
30 | fi
31 | if [[ ! -f "$BSROOT/html/static/CentOS7/dvd_content/.treeinfo" ]]; then
32 | sudo mount -t iso9660 -o loop $BSROOT/html/static/CentOS7/CentOS-7-x86_64-Everything-${cluster_desc_centos_version##*.}.iso $BSROOT/html/static/CentOS7/dvd_content || { echo "Mount iso failed"; exit 1; }
33 | fi
34 | fi
35 |
36 | # Config Registry tls
37 | mkdir -p /etc/docker/certs.d/bootstrapper:5000
38 | rm -rf /etc/docker/certs.d/bootstrapper:5000/*
39 | cp $BSROOT/tls/ca.pem /etc/docker/certs.d/bootstrapper:5000/ca.crt
40 |
41 | if ! grep -q "127.0.0.1 bootstrapper" /etc/hosts
42 | then echo "127.0.0.1 bootstrapper" >> /etc/hosts
43 | fi
44 |
45 | docker rm -f bootstrapper > /dev/null 2>&1
46 | docker rmi bootstrapper:latest > /dev/null 2>&1
47 | docker load < $BSROOT/bootstrapper.tar > /dev/null 2>&1 || { echo "Docker can not load bootstrapper.tar!"; exit 1; }
48 | docker run -d \
49 | --name bootstrapper \
50 | --net=host \
51 | --privileged \
52 | -v /var/run/docker.sock:/var/run/docker.sock \
53 | -v $BSROOT:/bsroot \
54 | bootstrapper ${cluster_desc_start_pxe} || { log fatal "Failed"; exit -1; }
55 |
56 | # Sleep 3 seconds, waitting for registry started.
57 | sleep 3
58 |
59 |
60 | for DOCKER_IMAGE in $(set | grep '^cluster_desc_images_' | grep -o '".*"' | sed 's/"//g'); do
61 | DOCKER_TAR_FILE=$BSROOT/$(echo ${DOCKER_IMAGE}.tar | sed "s/:/_/g" |awk -F'/' '{print $NF}')
62 | printf "docker load & push $LOCAL_DOCKER_URL ... "
63 | LOCAL_DOCKER_URL=$cluster_desc_dockerdomain:5000/${DOCKER_IMAGE}
64 | docker load < $DOCKER_TAR_FILE >/dev/null 2>&1
65 | docker push $LOCAL_DOCKER_URL >/dev/null 2>&1
66 | echo "Done."
67 | done
68 |
69 | if [[ $cluster_desc_set_ntp == " y" ]]; then
70 | docker rm -f ntpserver > /dev/null 2>&1
71 | NTP_DOCKER_IMAGE=$cluster_desc_dockerdomain:5000/${cluster_desc_images_ntp}
72 | docker run -d \
73 | --name ntpserver \
74 | --net=host \
75 | --privileged \
76 | $NTP_DOCKER_IMAGE || { echo "Failed"; exit -1; }
77 | fi
78 |
79 |
--------------------------------------------------------------------------------
/testdata/example.yaml:
--------------------------------------------------------------------------------
1 | animal: cat
2 | fruits:
3 | - apple
4 | - orange
5 |
--------------------------------------------------------------------------------
/vm-cluster/README.md:
--------------------------------------------------------------------------------
1 | ## vm-cluster 自动化安装k8s
2 | ### 环境准备
3 |
4 | | 虚拟机 | 角色 |网络组成 |
5 | | ------------- |-------------| ----|
6 | | bootstrapper | dnsmasq(dhcp,dns),cloudconfig server,boorstrapper server,registry|eth0 nat网络,eth1内部网络 ,eth2 hostonly网络|
7 | | master | k8s master |eth0 内部网络|
8 | | worker | k8s worker |eth0 内部网络|
9 |
10 | 注意:
11 | 1,内部网络用于三台虚拟机之间相互通信使用
12 | 2,vagrant的挂载需要依赖hostonly网卡
13 |
14 | ### 操作步骤
15 |
16 | 1.修改 vagrantfile 中 cluster-desc.yml.template配置
17 |
18 | 2.启动bootstrapper
19 | ```
20 | cd vm-cluster
21 | ./prepare_install_bootstrapper.sh
22 | vagrant up bootstrapper
23 | ```
24 |
25 | * 执行 bsroot.sh 脚本(下载pxe镜像,生成 pxe 的配置,dns dhcp配置,registry 配置,配置 cloudconfig server 环境,下载k8s依赖镜像)
26 |
27 | 3.启动 k8s master,安装 k8s master 节点
28 | ```
29 | cd vm-cluster
30 | vagrant up master
31 | ```
32 | 启动的过程成会弹出 virtualbox 窗口,在窗口中会出现如下提示:
33 | ```
34 | Press F8 for menu.(5)
35 | ```
36 | 按 F8 后,会出现从网络安装 CoreOS 的提示如下提示:
37 | ```
38 | Install CoreOS from network server
39 | ```
40 | 直接按 enter,然后开始从 pxe server 加载 coreos 镜像
41 | 注意:coreos 首次仅仅是内存安装,可以通过 jounalctl -xef 查看系统日志,当提示 coreos 硬盘安装成功后系统会重启。
42 | 重启后,coreos 虚拟机会根据 cloudconfig 配置文件自动化安装k8s。可以通过在 bootstrapper vm上ssh core@master 免密码连接 master vm。几分钟后,可以通过docker ps查看 k8s master 是否启动成功
43 |
44 | 4.安装 k8s worker 节点参考 master 节点安装步骤
45 |
46 | ###troubleshooting
47 |
48 | 问题1:
49 | ```
50 | Stderr: VBoxManage: error: Implementation of the USB 2.0 controller not found!
51 | ```
52 | 解决办法:
53 | ```
54 | To fix this problem, install the 'Oracle VM VirtualBox Extension Pack'
55 | ```
56 | https://www.virtualbox.org/wiki/Downloads 可以下载安装最新的VirtualBox和Extension Pack
57 | * VirtualBox 5.1.4 for Linux hosts
58 | * VirtualBox 5.1.4 Oracle VM VirtualBox Extension Pack
59 |
60 | 问题2:
61 | ```
62 | Error response from daemon: client is newer than server (client API version: 1.23, server API version: 1.22)
63 | ```
64 | 解决办法:
65 | 修改 Dockerfile,添加如何参数, 指定 docker api 的版本为1.22,可以解决版本不一致的问题
66 | ```
67 | ENV DOCKER_API_VERSION=1.22
68 | ```
69 |
70 | 问题3:
71 | 无法找到 pxe server
72 | 解决办法:
73 | 删除 dnsmasq.conf 中如下两行
74 | ```
75 | interface=eth0
76 | bind-interfaces
77 | ```
78 |
--------------------------------------------------------------------------------
/vm-cluster/Vagrantfile:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | $update_channel = "alpha"
3 | $image_version = "current"
4 | $shared_folders = {}
5 |
6 | Vagrant.configure("2") do |config|
7 | # always use Vagrants insecure key
8 | config.ssh.insert_key = false
9 | # forward ssh agent to easily ssh into the different machines
10 | config.ssh.forward_agent = true
11 |
12 | config.vm.provider :virtualbox do |v|
13 | # On VirtualBox, we don't have guest additions or a functional vboxsf
14 | # in CoreOS, so tell Vagrant that so it can be smarter.
15 | v.check_guest_additions = false
16 | v.functional_vboxsf = false
17 | end
18 |
19 | # plugin conflict
20 | if Vagrant.has_plugin?("vagrant-vbguest") then
21 | config.vbguest.auto_update = false
22 | end
23 |
24 | #定义boostrapper虚拟机
25 | config.vm.define "bootstrapper" do |bs|
26 | bs.vm.box = "coreos-stable"
27 | bs.vm.box_url = "https://storage.googleapis.com/stable.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json" % [$update_channel, $image_version]
28 | bs.vm.hostname = "bootstrapper"
29 | #创建内部网卡,便于和其他虚拟机通信
30 | bs.vm.network "private_network", ip: "192.168.8.101",virtualbox__intnet: true
31 | #将本地目录挂载到bootstrapper虚拟机
32 | bs.vm.synced_folder "./../bsroot", "/bsroot", id: "core", :nfs => true, :mount_options => ['nolock,vers=3,udp']
33 | $shared_folders.each_with_index do |(host_folder, guest_folder), index|
34 | config.vm.synced_folder host_folder.to_s, guest_folder.to_s, id: "core-share%02d" % index, nfs: true, mount_options: ['nolock,vers=3,udp']
35 | end
36 | #挂载的时候,需要依赖hostonly网卡
37 | bs.vm.network "private_network", ip: "192.168.50.4", :adapter=>3
38 | bs.vm.provision "shell", path: "provision_bootstrapper_vm.sh"
39 | bs.vm.provider "virtualbox" do |vb|
40 | vb.gui = false
41 | vb.memory = "2048"
42 | end
43 | end
44 |
45 | # 定义k8s master虚拟机
46 | config.vm.define "master" do |master|
47 | master.vm.box ="c33s/empty"
48 | master.vm.network "private_network", type: "dhcp", virtualbox__intnet: true, :mac => "0800274a2da1", :adapter=>1, auto_config: false
49 | master.vm.provider "virtualbox" do |ms|
50 | ms.gui = true
51 | ms.memory = "1536"
52 | ms.customize ["modifyvm", :id, "--boot1", "disk", "--boot2", "net", "--usb", "off", "--usbehci", "off"]
53 | end
54 | end
55 |
56 | #定义worker虚拟机
57 | config.vm.define "worker" do |worker|
58 | worker.vm.box ="c33s/empty"
59 | worker.vm.network "private_network", type: "dhcp", virtualbox__intnet: true, :adapter=>1, auto_config: false
60 | worker.vm.provider "virtualbox" do |wk|
61 | wk.gui = true
62 | wk.memory = "1536"
63 | wk.customize ["modifyvm", :id, "--boot1", "disk", "--boot2", "net", "--macaddress1", "auto", "--usb", "off", "--usbehci", "off"]
64 | wk.customize ["storagectl", :id, "--name", "SATA", "--remove"]
65 | wk.customize ["storagectl", :id, "--name", "SATA Controller", "--add", "sata"]
66 | (1..3).each do |i|
67 | file_to_disk = "./extra-disk-#{i}.vdi"
68 | unless File.exist?(file_to_disk)
69 | wk.customize ['createhd', '--filename', file_to_disk, '--size', '8196']
70 | wk.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', i-1, '--device', 0, '--type', 'hdd', '--medium', file_to_disk]
71 | end
72 | end
73 | end
74 | end
75 | end
76 |
--------------------------------------------------------------------------------
/vm-cluster/cluster-desc.yml.template:
--------------------------------------------------------------------------------
1 | bootstrapper: 192.168.8.101
2 | subnet: 192.168.8.0
3 | netmask: 255.255.255.0
4 | iplow: 192.168.8.110
5 | iphigh: 192.168.8.220
6 | routers: [192.168.8.101]
7 | broadcast: 192.168.8.255
8 | nameservers: [192.168.8.101]
9 | upstreamnameservers: [8.8.8.8, 8.8.4.4]
10 | domainname: "k8s.baifendian.com"
11 | dockerdomain: "bootstrapper"
12 | k8s_service_cluster_ip_range: 10.100.0.0/24
13 | k8s_cluster_dns: 10.100.0.10
14 |
15 | # Flannel backend only support host-gw and udp for now.
16 | flannel_backend: "host-gw"
17 |
18 | # coreos_channel can be configured as stable, alpha, beta
19 | coreos_channel: "stable"
20 |
21 | # coreos_version can be configured as the special version num or "current"
22 | coreos_version: "1122.2.0"
23 | centos_version: "7.3.1611"
24 |
25 | # gpu drivers version
26 | set_gpu: n
27 | gpu_drivers_version: "367.57"
28 |
29 | # Ntpserver set_ntp option for the cluster configuration.
30 | set_ntp: y
31 |
32 | # OS type: CentOS or CoreOS
33 | os_name: "CentOS"
34 |
35 | # Centos repository: default repository for bootstrapper,
36 | # If you need to configure the other repository, need to open the configuration switch.
37 | # Currently supports only add 163 repository.
38 | # for example: 163 repo:
39 | # set_yum_repo: "mirrors.163.com"
40 | set_yum_repo: "bootstrapper"
41 |
42 | ingress_hostnetwork: true
43 |
44 | # kube master ip, there should be cluster ip
45 | kube_master_ip:
46 | - "10.100.0.1"
47 | - "192.168.61.73"
48 | - "192.168.48.23"
49 | kube_master_dns:
50 | - "aa-bb-cc-dd"
51 |
52 | coreos:
53 | reboot_strategy: "etcd-lock"
54 | start_time: "03:00"
55 | time_length: "3h"
56 |
57 | ceph:
58 | zap_and_start_osd: y
59 | osd_journal_size: 500
60 |
61 | images:
62 | hyperkube: "typhoon1986/hyperkube-amd64:v1.3.6"
63 | pause: "typhoon1986/pause-amd64:3.0"
64 | flannel: "typhoon1986/flannel:0.5.5"
65 | ingress: "yancey1989/nginx-ingress-controller:0.8.3"
66 | kube2sky: "yancey1989/kube2sky:1.14"
67 | healthz: "typhoon1986/exechealthz:1.0"
68 | addon_manager: "yancey1989/kube-addon-manager-amd64:v5.1"
69 | skydns: "typhoon1986/skydns:latest"
70 | ceph: "typhoon1986/ceph-daemon:tag-build-master-jewel-ubuntu-14.04-fix370"
71 | default_backend: "yancey1989/defaultbackend:1.0"
72 | ntp: "redaphid/docker-ntp-server"
73 | heapster: "kubernetes/heapster:canary"
74 | grafana: "lupan/heapster_grafana:v2.6.0-2"
75 | influxdb: "lupan/heapster_influxdb:v0.5"
76 |
77 | nodes:
78 | - mac: "08:00:27:4a:2d:a1"
79 | ceph_monitor: y
80 | kube_master: y
81 | etcd_member: y
82 | ingress_label: n
83 |
84 | ssh_authorized_keys: |1+
85 | - ""
86 |
--------------------------------------------------------------------------------
/vm-cluster/prepare_install_bootstrapper.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # In addition to run bsroot.sh to generate bsroot directory, this
4 | # script also creates a SSH key pair and copies it to bootstrapper VM
5 | # and to other VMs via cloud-config file. So that all these VMs can
6 | # SSH to each other without password.
7 |
8 | # Create a temporary directory in OS X or
9 | # Linux. c.f. http://unix.stackexchange.com/questions/30091/fix-or-alternative-for-mktemp-in-os-x
10 | TMPDIR=$(mktemp -d 2>/dev/null || mktemp -d -t '/tmp')
11 |
12 | # Generate the SSH public/private key pair.
13 | rm -rf $TMPDIR/*
14 | ssh-keygen -t rsa -f $TMPDIR/id_rsa -P ''
15 |
16 | # Replace the public key into cluster-desc.yml.template and generate cluster-desc.yml
17 | PUB_KEY=$(cat $TMPDIR/id_rsa.pub)
18 | SEXTANT_DIR=$GOPATH/src/github.com/k8sp/sextant
19 | sed -e 's##'"$PUB_KEY"'#' $SEXTANT_DIR/vm-cluster/cluster-desc.yml.template > $SEXTANT_DIR/cluster-desc.yml
20 |
21 | # Generate $SEXTANT_DIR/bsroot
22 | cd $SEXTANT_DIR
23 | ./bsroot.sh $SEXTANT_DIR/cluster-desc.yml
24 |
25 | # Put SSH keys into $SEXTANT_DIR/bsroot, which will be mounted to the bootstrapper VM.
26 | mkdir -p $SEXTANT_DIR/bsroot/vm-keys
27 | mv $TMPDIR/* $SEXTANT_DIR/bsroot/vm-keys
28 |
--------------------------------------------------------------------------------
/vm-cluster/provision_bootstrapper_vm.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Copy VM keys out from /bsroot/vm-keys.
4 | mkdir -p /root/.ssh
5 | rm -rf /root/.ssh/*
6 | cp /bsroot/vm-keys/* /root/.ssh
7 | cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
8 |
9 | #修复无法找到pxe server的异常
10 | sed -i '/interface=eth0/,/bind-interfaces/d' /bsroot/config/dnsmasq.conf
11 | # Set system time zone
12 | /usr/bin/timedatectl set-timezone Asia/Shanghai
13 |
14 | cd /bsroot
15 | ./start_bootstrapper_container.sh /bsroot
16 | echo "nameserver 192.168.8.101" > /etc/resolv.conf
17 | echo "domain k8s.baifendian.com" >> /etc/resolv.conf
18 |
--------------------------------------------------------------------------------