├── .gitignore
├── readme.md
├── hyperctl.sh
└── hyperctl.ps1
/.gitignore:
--------------------------------------------------------------------------------
1 | /tmp
2 | /.distro
3 | /*.yaml
4 | .DS_Store
5 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | # Kubernetes and Docker on Mac and Windows
2 |
3 | ## Quick jump
4 | - [Mac / Hyperkit](#mac--hyperkit)
5 | - [Windows / Hyper-V](#windows--hyper-v)
6 |
7 | ## Supported scenarios
8 | - Multi-node (or single-node) Kubernetes on CentOS/Ubuntu in Hyper-V/Hyperkit
9 | - Docker on Desktop without Docker for Desktop
10 |
11 | ## Changelog
12 | - v1.0.3 remove runc workaround also from mac; remove helm 2 support; always update kubectl and helm
13 | - v1.0.2 preselect k8s version; remove runc workaround
14 | - v1.0.1 repo cmd; selectable pkg versions; auto-untaint master if no workers
15 | - v1.0.0 initial release
16 |
17 | # Mac / Hyperkit
18 | ```bash
19 |
20 | # tested on Hyperkit 0.20190802 on macOS 10.14.5 w/ APFS, guest images Centos 1907 and Ubuntu 18.04
21 | # note: `sudo` is necessary for access to macOS Hypervisor and vmnet frameworks, and /etc/hosts config
22 | # hint: disable sudo timeout by:
23 | # `echo "Defaults timestamp_timeout=-1" | sudo tee /private/etc/sudoers.d/sudo-notimeout`
24 |
25 | # download the script
26 | cd workdir
27 | curl https://raw.githubusercontent.com/youurayy/hyperctl/master/hyperctl.sh -O
28 | chmod +x hyperctl.sh
29 |
30 | # display short synopsis for the available commands
31 | ./hyperctl.sh
32 | '
33 | Usage: ./hyperctl.sh command+
34 |
35 | Commands:
36 |
37 | (pre-requisites are marked with ->)
38 |
39 | -> install - install basic homebrew packages
40 | config - show script config vars
41 | print - print contents of relevant config files
42 | -> net - create or update the vmnet config
43 | -> dhcp - append to the dhcp registry
44 | reset - reset the vmnet and dhpc configs
45 | -> hosts - append node names to etc/hosts
46 | -> image - download the VM image
47 | master - create and launch master node
48 | nodeN - create and launch worker node (node1, node2, ...)
49 | info - display info about nodes
50 | init - initialize k8s and setup host kubectl
51 | reboot - soft-reboot the nodes
52 | shutdown - soft-shutdown the nodes
53 | stop - stop the VMs
54 | start - start the VMs
55 | kill - force-stop the VMs
56 | delete - delete the VM files
57 | iso - write cloud config data into a local yaml
58 | timesync - setup sleepwatcher time sync
59 | docker - setup local docker with the master node
60 | share - setup local fs sharing with docker on master
61 | helm - setup helm 3
62 | repo - install local docker repo in k8s
63 | '
64 |
65 | # performs `brew install hyperkit qemu kubernetes-cli`.
66 | # (will not install the package if you already have the required binary in path; kubernetes-cli will be updated)
67 | # (qemu is necessary for `qemu-img`)
68 | # you may perform these manually / selectively instead.
69 | ./hyperctl.sh install
70 |
71 | # display configured variables (edit the script to change them)
72 | # note: to quickly change distro, do `echo bionic >> .distro`
73 | ./hyperctl.sh config
74 | '
75 | VERSION: v1.0.0
76 | CONFIG: centos
77 | DISTRO: centos
78 | WORKDIR: ./tmp
79 | GUESTUSER: user
80 | SSHPATH: /Users/user/.ssh/id_rsa.pub
81 | IMAGEURL: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1907.raw.tar.gz
82 | DISKFILE: CentOS-7-x86_64-GenericCloud-1907.raw
83 | CIDR: 10.10.0.0/24
84 | CPUS: 4
85 | RAM: 4GB
86 | HDD: 40G
87 | CNI: flannel
88 | CNINET: 10.244.0.0/16
89 | CNIYAML: https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
90 | DOCKERCLI: https://download.docker.com/mac/static/stable/x86_64/docker-19.03.1.tgz
91 | '
92 |
93 | # print external configs that this script can change
94 | ./hyperctl.sh print
95 |
96 | # cleans or creates /Library/Preferences/SystemConfiguration/com.apple.vmnet.plist
97 | # and sets the CIDR configured in the script.
98 | # if other apps already use the vmnet framework, then you don't want to change it, in
99 | # which case don't run this command, but instead set the CIDR inside this script
100 | # to the value from the vmnet.plist (as shown by the 'print' command).
101 | ./hyperctl.sh net
102 |
103 | # appends IPs and MACs from the NODES config to the /var/db/dhcpd_leases.
104 | # this is necessary to have predictable IPs.
105 | # (MACs are generated from UUIDs by the vmnet framework.)
106 | ./hyperctl.sh dhcp
107 |
108 | # if initialization through 'net' and 'dhcp' commands is unsuccessful, i.e. the
109 | # vmnet framework assigns our VMs wrong IP addresses, this commands deletes
110 | # the above two files, after which you can manually reboot and try again.
111 | ./hyperctl.sh reset
112 |
113 | # appends IP/hostname pairs from the NODES config to the /etc/hosts.
114 | # (the same hosts entries will also be installed into every node)
115 | ./hyperctl.sh hosts
116 |
117 | # download, prepare and cache the VM image templates
118 | ./hyperctl.sh image
119 |
120 | # create/launch the nodes
121 | ./hyperctl.sh master
122 | ./hyperctl.sh node1
123 | ./hyperctl.sh nodeN...
124 | # ---- or -----
125 | ./hyperctl.sh master node1 node2 nodeN...
126 |
127 | # ssh to the nodes if necessary (e.g. for manual k8s init)
128 | # by default, your `.ssh/id_rsa.pub` key was copied into the VMs' ~/.ssh/authorized_keys
129 | # uses your host username (which is the default), e.g.:
130 | ssh master
131 | ssh node1
132 | ssh node2
133 | ...
134 |
135 | # performs automated k8s init (will wait for VMs to finish init first)
136 | ./hyperctl.sh init
137 |
138 | # after init, you can do e.g.:
139 | hyperctl get pods --all-namespaces
140 | '
141 | NAMESPACE NAME READY STATUS RESTARTS AGE
142 | kube-system coredns-5c98db65d4-b92p9 1/1 Running 1 5m31s
143 | kube-system coredns-5c98db65d4-dvxvr 1/1 Running 1 5m31s
144 | kube-system etcd-master 1/1 Running 1 4m36s
145 | kube-system kube-apiserver-master 1/1 Running 1 4m47s
146 | kube-system kube-controller-manager-master 1/1 Running 1 4m46s
147 | kube-system kube-flannel-ds-amd64-6kj9p 1/1 Running 1 5m32s
148 | kube-system kube-flannel-ds-amd64-r87qw 1/1 Running 1 5m7s
149 | kube-system kube-flannel-ds-amd64-wdmxs 1/1 Running 1 4m43s
150 | kube-system kube-proxy-2p2db 1/1 Running 1 5m32s
151 | kube-system kube-proxy-fg8k2 1/1 Running 1 5m7s
152 | kube-system kube-proxy-rtjqv 1/1 Running 1 4m43s
153 | kube-system kube-scheduler-master 1/1 Running 1 4m38s
154 | '
155 |
156 | # reboot the nodes
157 | ./hyperctl.sh reboot
158 |
159 | # show info about existing VMs (size, run state)
160 | ./hyperctl.sh info
161 | '
162 | NAME PID %CPU %MEM RSS STARTED TIME DISK SPARSE STATUS
163 | master 36399 0.4 2.1 341M 3:51AM 0:26.30 40G 3.1G RUNNING
164 | node1 36418 0.3 2.1 341M 3:51AM 0:25.59 40G 3.1G RUNNING
165 | node2 37799 0.4 2.0 333M 3:56AM 0:16.78 40G 3.1G RUNNING
166 | '
167 |
168 | # shutdown all nodes thru ssh
169 | ./hyperctl.sh shutdown
170 |
171 | # start all nodes
172 | ./hyperctl.sh start
173 |
174 | # stop all nodes
175 | ./hyperctl.sh stop
176 |
177 | # force-stop all nodes
178 | ./hyperctl.sh kill
179 |
180 | # delete all nodes' data (will not delete image templates)
181 | ./hyperctl.sh delete
182 |
183 | # kill only a particular node
184 | sudo kill -TERM 36418
185 |
186 | # delete only a particular node
187 | rm -rf ./tmp/node1/
188 |
189 | # remove everything
190 | sudo killall -9 hyperkit
191 | rm -rf ./tmp
192 |
193 | # exports the cloud-init yaml into ./$distro.yaml for review
194 | ./hyperctl.sh iso
195 |
196 | # installs and configures sleepwatcher to call this script to update the
197 | # VMs clocks after your Mac wakes up from sleep
198 | ./hyperctl.sh timesync
199 |
200 | # installs local docker cli (docker.exe) and helps you configure it to connect
201 | # to the docker running on the master node
202 | ./hyperctl.sh docker
203 |
204 | # walks you through a file sharing setup between local machine and the master node,
205 | # so that you can work with docker volumes.
206 | # this is semi-interactive so that your password is never stored anywhere insecurely.
207 | # this also means that you have to repeat this if you restart the master node.
208 | # alternatively, you can add the mount into master's fstab with a password= option.
209 | # note: the SMB file sharing does not support filesystem inotify events.
210 | ./hyperctl.sh share
211 |
212 | ```
213 |
214 | # Windows / Hyper-V
215 | ```powershell
216 |
217 | # tested with PowerShell 5.1 on Windows 10 Pro 1903, guest images Centos 1907 and Ubuntu 18.04
218 | # note: admin access is necessary for access to Windows Hyper-V framework and etc/hosts config
219 | # note: you may need to recurisvely remove public access (at least) from your ~/.ssh dir,
220 | # (suggested: whole homedir) othwerwise ssh will refuse to work.
221 |
222 | # open PowerShell (Admin) prompt
223 | cd $HOME\your-workdir
224 |
225 | # download the script
226 | curl https://raw.githubusercontent.com/youurayy/hyperctl/master/hyperctl.ps1 -outfile hyperctl.ps1
227 | # enable script run permission
228 | set-executionpolicy remotesigned
229 |
230 | # display short synopsis for the available commands
231 | .\hyperctl.ps1
232 | '
233 | Usage: .\hyperctl.ps1 command+
234 |
235 | Commands:
236 |
237 | (pre-requisites are marked with ->)
238 |
239 | -> install - install basic chocolatey packages
240 | config - show script config vars
241 | print - print etc/hosts, network interfaces and mac addresses
242 | -> net - install private or public host network
243 | -> hosts - append private network node names to etc/hosts
244 | -> image - download the VM image
245 | master - create and launch master node
246 | nodeN - create and launch worker node (node1, node2, ...)
247 | info - display info about nodes
248 | init - initialize k8s and setup host kubectl
249 | reboot - soft-reboot the nodes
250 | shutdown - soft-shutdown the nodes
251 | save - snapshot the VMs
252 | restore - restore VMs from latest snapshots
253 | stop - stop the VMs
254 | start - start the VMs
255 | delete - stop VMs and delete the VM files
256 | delnet - delete the network
257 | iso - write cloud config data into a local yaml
258 | docker - setup local docker with the master node
259 | share - setup local fs sharing with docker on master
260 | helm - setup helm 3
261 | repo - install local docker repo in k8s
262 | '
263 |
264 | # performs `choco install 7zip.commandline qemu-img kubernetes-cli`
265 | # (will not install the package if you already have the required binary in path; kubernetes-cli will be updated)
266 | # you may instead perform these manually / selectively instead.
267 | # note: 7zip is needed to extract .xz archives
268 | # note: qemu-img is needed convert images to vhdx
269 | .\hyperctl.ps1 install
270 |
271 | # display configured variables (edit the script to change them)
272 | # note: to quickly change distro, do e.g. `echo centos >> .distro`
273 | .\hyperctl.ps1 config
274 | '
275 | version: v1.0.0
276 | config: bionic
277 | distro: ubuntu
278 | workdir: .\tmp
279 | guestuser: user
280 | sshpath: C:\Users\user\.ssh\id_rsa.pub
281 | imageurl: https://cloud-images.ubuntu.com/releases/server/18.04/release/ubuntu-18.04-server-cloudimg-amd64.img
282 | vhdxtmpl: .\tmp\ubuntu-18.04-server-cloudimg-amd64.vhdx
283 | cidr: 10.10.0.0/24
284 | switch: switch
285 | nettype: private
286 | natnet: natnet
287 | cpus: 4
288 | ram: 4GB
289 | hdd: 40GB
290 | cni: flannel
291 | cninet: 10.244.0.0/16
292 | cniyaml: https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
293 | dockercli: https://github.com/StefanScherer/docker-cli-builder/releases/download/19.03.1/docker.exe
294 | '
295 |
296 | # print relevant configuration - etc/hosts, mac addresses, network interfaces
297 | .\hyperctl.ps1 print
298 |
299 | # create a private network for the VMs, as set by the `cidr` variable
300 | .\hyperctl.ps1 net
301 |
302 | # appends IP/hostname pairs to the /etc/hosts.
303 | # (the same hosts entries will also be installed into every node)
304 | .\hyperctl.ps1 hosts
305 |
306 | # download, prepare and cache the VM image templates
307 | .\hyperctl.ps1 image
308 |
309 | # create/launch the nodes
310 | .\hyperctl.ps1 master
311 | .\hyperctl.ps1 node1
312 | .\hyperctl.ps1 nodeN...
313 | # ---- or -----
314 | .\hyperctl.ps1 master node1 node2 nodeN...
315 |
316 | # ssh to the nodes if necessary (e.g. for manual k8s init)
317 | # by default, your `.ssh/id_rsa.pub` key was copied into the VMs' ~/.ssh/authorized_keys
318 | # uses your host username (which is the default), e.g.:
319 | ssh master
320 | ssh node1
321 | ssh node2
322 | ...
323 |
324 | # perform automated k8s init (will wait for VMs to finish init first)
325 | # (this will checkpoint the nodes just before `kubeadm init`)
326 | .\hyperctl.ps1 init
327 |
328 | # after init, you can do e.g.:
329 | hyperctl get pods --all-namespaces
330 | '
331 | NAMESPACE NAME READY STATUS RESTARTS AGE
332 | kube-system coredns-5c98db65d4-b92p9 1/1 Running 1 5m31s
333 | kube-system coredns-5c98db65d4-dvxvr 1/1 Running 1 5m31s
334 | kube-system etcd-master 1/1 Running 1 4m36s
335 | kube-system kube-apiserver-master 1/1 Running 1 4m47s
336 | kube-system kube-controller-manager-master 1/1 Running 1 4m46s
337 | kube-system kube-flannel-ds-amd64-6kj9p 1/1 Running 1 5m32s
338 | kube-system kube-flannel-ds-amd64-r87qw 1/1 Running 1 5m7s
339 | kube-system kube-flannel-ds-amd64-wdmxs 1/1 Running 1 4m43s
340 | kube-system kube-proxy-2p2db 1/1 Running 1 5m32s
341 | kube-system kube-proxy-fg8k2 1/1 Running 1 5m7s
342 | kube-system kube-proxy-rtjqv 1/1 Running 1 4m43s
343 | kube-system kube-scheduler-master 1/1 Running 1 4m38s
344 | '
345 |
346 | # reboot the nodes
347 | .\hyperctl.ps1 reboot
348 |
349 | # show info about existing VMs (size, run state)
350 | .\hyperctl.ps1 info
351 | '
352 | Name State CPUUsage(%) MemoryAssigned(M) Uptime Status Version
353 | ---- ----- ----------- ----------------- ------ ------ -------
354 | master Running 3 5908 00:02:25.5770000 Operating normally 9.0
355 | node1 Running 8 4096 00:02:22.7680000 Operating normally 9.0
356 | node2 Running 2 4096 00:02:20.1000000 Operating normally 9.0
357 | '
358 |
359 | # checkpoint the VMs
360 | .\hyperctl.ps1 save
361 |
362 | # restore the VMs from the lastest snapshot
363 | .\hyperctl.ps1 restore
364 |
365 | # shutdown all nodes thru ssh
366 | .\hyperctl.ps1 shutdown
367 |
368 | # start all nodes
369 | .\hyperctl.ps1 start
370 |
371 | # stop all nodes thru hyper-v
372 | .\hyperctl.ps1 stop
373 |
374 | # delete all nodes' data (will not delete image templates)
375 | .\hyperctl.ps1 delete
376 |
377 | # delete the network
378 | .\hyperctl.ps1 delnet
379 |
380 | # installs local docker cli (docker.exe) and helps you configure it to connect
381 | # to the docker running on the master node
382 | .\hyperctl.ps1 docker
383 |
384 | # walks you through a file sharing setup between local machine and the master node,
385 | # so that you can work with docker volumes.
386 | # this is semi-interactive so that your password is never stored anywhere insecurely.
387 | # this also means that you have to repeat this if you restart the master node.
388 | # alternatively, you can add the mount into master's fstab with a password= option.
389 | .\hyperctl.ps1 share
390 |
391 | # NOTE if Hyper-V stops working after a Windows update, do:
392 | # Windows Security -> App & Browser control -> Exploit protection settings -> Program settings ->
393 | # C:\WINDOWS\System32\vmcompute.exe -> Edit-> Code flow guard (CFG) ->
394 | # uncheck Override system settings -> # net stop vmcompute -> net start vmcompute
395 |
396 | # NOTE if Hyper-V VMs stop responding after updating to Windows 10 1903,
397 | # perform `Restart-Service 'HV Host Service'` in an admin PowerShell.
398 |
399 | ```
400 |
401 | #### License: https://www.apache.org/licenses/LICENSE-2.0
402 |
--------------------------------------------------------------------------------
/hyperctl.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-License-Identifier: Apache-2.0
3 | # For usage overview, read the readme.md at https://github.com/youurayy/hyperctl
4 |
5 | # ---------------------------SETTINGS------------------------------------
6 |
7 | VERSION="v1.0.3"
8 | WORKDIR="./tmp"
9 | GUESTUSER=$USER
10 | SSHPATH="$HOME/.ssh/id_rsa.pub"
11 | if ! [ -a $SSHPATH ]; then
12 | echo -e "\\n please configure $sshpath or place a pubkey at $sshpath \\n"
13 | exit
14 | fi
15 | SSHPUB=$(cat $SSHPATH)
16 |
17 | CONFIG=$(cat .distro 2> /dev/null)
18 | CONFIG=${CONFIG:-"centos"}
19 |
20 | case $CONFIG in
21 | bionic)
22 | DISTRO="ubuntu"
23 | IMGVERS="18.04"
24 | IMAGE="ubuntu-$IMGVERS-server-cloudimg-amd64"
25 | IMAGEURL="https://cloud-images.ubuntu.com/releases/server/$IMGVERS/release"
26 | SHA256FILE="SHA256SUMS"
27 | KERNURL="https://cloud-images.ubuntu.com/releases/server/$IMGVERS/release/unpacked"
28 | KERNEL="$IMAGE-vmlinuz-generic"
29 | INITRD="$IMAGE-initrd-generic"
30 | IMGTYPE="vmdk"
31 | ARCHIVE=
32 | ;;
33 | disco)
34 | DISTRO="ubuntu"
35 | IMGVERS="19.04"
36 | IMAGE="ubuntu-$IMGVERS-server-cloudimg-amd64"
37 | IMAGEURL="https://cloud-images.ubuntu.com/releases/server/$IMGVERS/release"
38 | SHA256FILE="SHA256SUMS"
39 | KERNURL="https://cloud-images.ubuntu.com/releases/server/$IMGVERS/release/unpacked"
40 | KERNEL="$IMAGE-vmlinuz-generic"
41 | INITRD="$IMAGE-initrd-generic"
42 | IMGTYPE="vmdk"
43 | ARCHIVE=""
44 | ;;
45 | centos)
46 | DISTRO="centos"
47 | IMGVERS="1907"
48 | IMAGE="CentOS-7-x86_64-GenericCloud-$IMGVERS"
49 | IMAGEURL="https://cloud.centos.org/centos/7/images"
50 | SHA256FILE="sha256sum.txt"
51 | KERNURL="https://github.com/youurayy/hyperctl/releases/download/centos-kernel/"
52 | KERNEL="vmlinuz-3.10.0-957.27.2.el7.x86_64"
53 | INITRD="initramfs-3.10.0-957.27.2.el7.x86_64.img"
54 | IMGTYPE="raw"
55 | ARCHIVE=".tar.gz"
56 | ;;
57 | esac
58 |
59 | CIDR="10.10.0"
60 | CMDLINE="earlyprintk=serial console=ttyS0 root=/dev/sda1" # root=LABEL=cloudimg-rootfs
61 | ISO="cloud-init.iso"
62 |
63 | CPUS=4
64 | RAM=4GB
65 | HDD=40G
66 |
67 | FORMAT="raw"
68 | FILEPREFIX=""
69 | DISKOPTS=""
70 |
71 | # FORMAT="qcow2"
72 | # FILEPREFIX="file://"
73 | # DISKOPTS=",format=qcow"
74 |
75 | DISKDEV="ahci-hd"
76 | # DISKDEV="virtio-blk"
77 |
78 | # user for debug/tty:
79 | # BACKGROUND=
80 | # use for prod/ssh:
81 | BACKGROUND='> output.log 2>&1 &'
82 |
83 | NODES=(
84 | "master 24AF0C19-3B96-487C-92F7-584C9932DD96 $CIDR.10 32:a2:b4:36:57:16"
85 | "node1 B0F97DC5-5E9F-40FC-B829-A1EF974F5640 $CIDR.11 46:5:bd:af:97:f"
86 | "node2 0BD5B90C-E00C-4E1B-B3CF-117D6FF3C09F $CIDR.12 c6:b7:b1:30:6:fd"
87 | "node3 7B822993-5E08-41D4-9FB6-8F9FD31C9AD8 $CIDR.13 86:eb:d9:e1:f2:ce"
88 | "node4 384C454E-B22B-4945-A33F-2E3E2E9F74B4 $CIDR.14 ae:33:94:63:3a:8f"
89 | "node5 BEC17A85-E2B4-480F-B86C-808412E21823 $CIDR.15 f2:66:d8:80:e5:bd"
90 | "node6 F6C972A8-0B73-4C72-9F7C-202AAC773DD8 $CIDR.16 92:50:d8:18:86:d5"
91 | "node7 F05E1728-7403-46CF-B88E-B243D754B800 $CIDR.17 86:d6:cf:41:e0:3e"
92 | "node8 38659F47-3A64-49E3-AE6E-B41F6A42E1D1 $CIDR.18 ca:c5:12:22:d:ce"
93 | "node9 20DD5167-9FBE-439E-9849-E324E984FB96 $CIDR.19 f6:d4:b9:fd:20:c"
94 | )
95 |
96 | # https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64/repodata/filelists.xml
97 | # https://packages.cloud.google.com/apt/dists/kubernetes-xenial/main/binary-amd64/Packages
98 | # cmd+f "kubeadm"
99 | # KUBEVERSION='1.15.11'
100 | KUBEVERSION='1.16.9'
101 | # KUBEVERSION='1.17.5'
102 | # KUBEVERSION='1.18.2'
103 |
104 | KUBEPACKAGES="\
105 | - docker-ce
106 | - docker-ce-cli
107 | - containerd.io
108 | - [ kubelet, $KUBEVERSION ]
109 | - [ kubeadm, $KUBEVERSION ]
110 | - [ kubectl, $KUBEVERSION ]
111 | "
112 |
113 | CNI="flannel"
114 |
115 | case $CNI in
116 | flannel)
117 | CNIYAML="https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml"
118 | CNINET="10.244.0.0/16"
119 | ;;
120 | weave)
121 | CNIYAML='https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d "\n")'
122 | CNINET="10.32.0.0/12"
123 | ;;
124 | calico)
125 | CNIYAML="https://docs.projectcalico.org/v3.7/manifests/calico.yaml"
126 | CNINET="192.168.0.0/16"
127 | ;;
128 | esac
129 |
130 | SSHOPTS="-o ConnectTimeout=5 -o LogLevel=ERROR -o StrictHostKeyChecking=false -o UserKnownHostsFile=/dev/null"
131 |
132 | DOCKERCLI="https://download.docker.com/mac/static/stable/x86_64/docker-19.03.1.tgz"
133 |
134 | HELMURL="https://get.helm.sh/helm-v3.1.2-darwin-amd64.tar.gz"
135 |
136 | # -------------------------CLOUD INIT-----------------------------------
137 |
138 | cloud-init() {
139 | USERDATA_shared="\
140 | #cloud-config
141 |
142 | mounts:
143 | - [ swap ]
144 |
145 | groups:
146 | - docker
147 |
148 | users:
149 | - name: $GUESTUSER
150 | ssh_authorized_keys:
151 | - '$SSHPUB'
152 | sudo: [ 'ALL=(ALL) NOPASSWD:ALL' ]
153 | groups: [ sudo, docker ]
154 | shell: /bin/bash
155 | # lock_passwd: false # passwd won't work without this
156 | # passwd: '\$6\$rounds=4096\$byY3nxArmvpvOrpV\$2M4C8fh3ZXx10v91yzipFRng1EFXTRNDE3q9PvxiPc3kC7N/NHG8HiwAvhd7QjMgZAXOsuBD5nOs0AJkByYmf/' # 'test'
157 |
158 | write_files:
159 | # resolv.conf hard-set is a workaround for intial setup
160 | - path: /tmp/append-etc-hosts
161 | content: |
162 | $(etc-hosts ' ')
163 | - path: /etc/resolv.conf
164 | content: |
165 | nameserver 8.8.4.4
166 | nameserver 8.8.8.8
167 | - path: /etc/systemd/resolved.conf
168 | content: |
169 | [Resolve]
170 | DNS=8.8.4.4
171 | FallbackDNS=8.8.8.8
172 | - path: /etc/modules-load.d/k8s.conf
173 | content: |
174 | br_netfilter
175 | - path: /etc/sysctl.d/k8s.conf
176 | content: |
177 | net.bridge.bridge-nf-call-ip6tables = 1
178 | net.bridge.bridge-nf-call-iptables = 1
179 | net.bridge.bridge-nf-call-arptables = 1
180 | net.ipv4.ip_forward = 1
181 | - path: /etc/docker/daemon.json
182 | content: |
183 | {
184 | \"exec-opts\": [\"native.cgroupdriver=systemd\"],
185 | \"log-driver\": \"json-file\",
186 | \"log-opts\": {
187 | \"max-size\": \"100m\"
188 | },
189 | \"storage-driver\": \"overlay2\",
190 | \"storage-opts\": [
191 | \"overlay2.override_kernel_check=true\"
192 | ]
193 | }"
194 |
195 | USERDATA_centos="\
196 | $USERDATA_shared
197 | # https://github.com/kubernetes/kubernetes/issues/56850
198 | - path: /usr/lib/systemd/system/kubelet.service.d/12-after-docker.conf
199 | content: |
200 | [Unit]
201 | After=docker.service
202 |
203 | yum_repos:
204 | docker-ce-stable:
205 | name: Docker CE Stable - \$basearch
206 | baseurl: https://download.docker.com/linux/centos/7/\$basearch/stable
207 | enabled: 1
208 | gpgcheck: 1
209 | gpgkey: https://download.docker.com/linux/centos/gpg
210 | priority: 1
211 | kubernetes:
212 | name: Kubernetes
213 | baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
214 | enabled: 1
215 | gpgcheck: 1
216 | repo_gpgcheck: 1
217 | gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
218 | priority: 1
219 |
220 | package_upgrade: true
221 |
222 | packages:
223 | - yum-utils
224 | - cifs-utils
225 | - device-mapper-persistent-data
226 | - lvm2
227 | $KUBEPACKAGES
228 |
229 | runcmd:
230 | - echo 'sudo tail -f /var/log/messages' > /home/$GUESTUSER/log
231 | - cat /tmp/append-etc-hosts >> /etc/hosts
232 | - setenforce 0
233 | - sed -i 's/^SELINUX=enforcing\$/SELINUX=permissive/' /etc/selinux/config
234 | - mkdir -p /etc/systemd/system/docker.service.d
235 | - systemctl disable firewalld
236 | - systemctl daemon-reload
237 | - systemctl enable docker
238 | - systemctl enable kubelet
239 | # https://github.com/kubernetes/kubeadm/issues/954
240 | - echo 'exclude=kube*' >> /etc/yum.repos.d/kubernetes.repo
241 | - systemctl start docker
242 | - touch /home/$GUESTUSER/.init-completed"
243 |
244 | USERDATA_ubuntu="\
245 | $USERDATA_shared
246 | # https://github.com/kubernetes/kubernetes/issues/56850
247 | - path: /etc/systemd/system/kubelet.service.d/12-after-docker.conf
248 | content: |
249 | [Unit]
250 | After=docker.service
251 | - path: /etc/apt/preferences.d/docker-pin
252 | content: |
253 | Package: *
254 | Pin: origin download.docker.com
255 | Pin-Priority: 600
256 | - path: /etc/systemd/network/99-default.link
257 | content: |
258 | [Match]
259 | Path=/devices/virtual/net/*
260 | [Link]
261 | NamePolicy=kernel database onboard slot path
262 | MACAddressPolicy=none
263 |
264 | apt:
265 | sources:
266 | kubernetes:
267 | source: 'deb http://apt.kubernetes.io/ kubernetes-xenial main'
268 | keyserver: 'hkp://keyserver.ubuntu.com:80'
269 | keyid: BA07F4FB
270 | docker:
271 | arches: amd64
272 | source: 'deb https://download.docker.com/linux/ubuntu bionic stable'
273 | keyserver: 'hkp://keyserver.ubuntu.com:80'
274 | keyid: 0EBFCD88
275 |
276 | package_upgrade: true
277 |
278 | packages:
279 | - cifs-utils
280 | - chrony
281 | $KUBEPACKAGES
282 |
283 | runcmd:
284 | - echo 'sudo tail -f /var/log/syslog' > /home/$GUESTUSER/log
285 | - systemctl mask --now systemd-timesyncd
286 | - systemctl enable --now chrony
287 | - systemctl stop kubelet
288 | - cat /tmp/append-etc-hosts >> /etc/hosts
289 | - chmod o+r /lib/systemd/system/kubelet.service
290 | - chmod o+r /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
291 | # https://github.com/kubernetes/kubeadm/issues/954
292 | - apt-mark hold kubeadm kubelet
293 | - touch /home/$GUESTUSER/.init-completed"
294 | }
295 |
296 | # ----------------------------------------------------------------------
297 |
298 | set -e
299 |
300 | BASEDIR=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
301 |
302 | hyperctl="kubectl --kubeconfig $HOME/.kube/config.hyperctl"
303 |
304 | DHCPD_LEASES='/var/db/dhcpd_leases'
305 | VMMET_PLIST='/Library/Preferences/SystemConfiguration/com.apple.vmnet.plist'
306 |
307 | go-to-scriptdir() {
308 | cd $BASEDIR
309 | }
310 |
311 | get_host() {
312 | echo ${NODES[$1]} | awk '{ print $1 }'
313 | }
314 |
315 | get_uuid() {
316 | echo ${NODES[$1]} | awk '{ print $2 }'
317 | }
318 |
319 | get_ip() {
320 | echo ${NODES[$1]} | awk '{ print $3 }'
321 | }
322 |
323 | get_mac() {
324 | echo ${NODES[$1]} | awk '{ print $4 }'
325 | }
326 |
327 | dhcpd-leases() {
328 | cat << EOF | sudo tee -a $DHCPD_LEASES
329 | $(for i in `seq 0 1 9`; do echo "{
330 | name=$(get_host $i)
331 | ip_address=$(get_ip $i)
332 | hw_address=1,$(get_mac $i)
333 | identifier=1,$(get_mac $i)
334 | }"; done)
335 | EOF
336 | }
337 |
338 | etc-hosts() {
339 | cat << EOF
340 | #
341 | $1#
342 | $(for i in `seq 0 1 9`; do echo -e "$1$(get_ip $i) $(get_host $i)"; done)
343 | $1#
344 | $1#
345 | EOF
346 | }
347 |
348 | download-image() {
349 | go-to-scriptdir
350 | mkdir -p $WORKDIR && cd $WORKDIR
351 |
352 | if ! [ -a $IMAGE.$IMGTYPE ]; then
353 | curl $IMAGEURL/$IMAGE.$IMGTYPE$ARCHIVE -O
354 | shasum -a 256 -c <(curl -s $IMAGEURL/$SHA256FILE | grep "$IMAGE.$IMGTYPE$ARCHIVE")
355 |
356 | if [ "$ARCHIVE" = ".tar.gz" ]; then
357 | tar xzf $IMAGE.$IMGTYPE$ARCHIVE
358 | fi
359 |
360 | if [ -n "$KERNURL" ]; then
361 | curl -L $KERNURL/$KERNEL -O
362 | curl -L $KERNURL/$INITRD -O
363 | shasum -a 256 -c <(curl -s -L $KERNURL/$SHA256FILE | grep "$KERNEL")
364 | shasum -a 256 -c <(curl -s -L $KERNURL/$SHA256FILE | grep "$INITRD")
365 | fi
366 | fi
367 | }
368 |
369 | is-machine-running() {
370 | ps -p $(cat $1/machine.pid 2> /dev/null) > /dev/null 2>&1
371 | }
372 |
373 | start-machine() {
374 | sudo ./cmdline
375 |
376 | if [ -z "$BACKGROUND" ]; then
377 | rm -f machine.pid
378 | else
379 | echo "started PID $(cat machine.pid)"
380 | fi
381 | }
382 |
383 | write-user-data() {
384 | cloud-init
385 | varname=USERDATA_$DISTRO
386 |
387 | cat << EOF > $1
388 | ${!varname}
389 | EOF
390 | }
391 |
392 | create-machine() {
393 |
394 | if [ -z $UUID ] || [ -z $NAME ] || [ -z $CPUS ] || [ -z $RAM ] || [ -z $DISK ]; then
395 | echo "create-machine: invalid params"
396 | return
397 | fi
398 |
399 | echo "starting machine $NAME"
400 |
401 | go-to-scriptdir
402 | mkdir -p $WORKDIR/$NAME && cd $WORKDIR/$NAME
403 |
404 | if is-machine-running ../$NAME; then
405 | echo "machine is already running!"
406 | return
407 | fi
408 |
409 | mkdir -p cidata
410 |
411 | cat << EOF > cidata/meta-data
412 | instance-id: id-$NAME
413 | local-hostname: $NAME
414 | EOF
415 |
416 | write-user-data "cidata/user-data"
417 |
418 | rm -f $ISO
419 | hdiutil makehybrid -iso -joliet -o $ISO cidata
420 |
421 | DISKFILE="$IMAGE.$FORMAT"
422 |
423 | if ! [ -a $DISKFILE ]; then
424 | echo Creating $(pwd)/$DISKFILE
425 |
426 | if [ $FORMAT != $IMGTYPE ]; then
427 | qemu-img convert -O $FORMAT ../$IMAGE.$IMGTYPE $DISKFILE
428 | else
429 | cp ../$IMAGE.$IMGTYPE $DISKFILE
430 | fi
431 |
432 | qemu-img resize -f $FORMAT $DISKFILE $DISK
433 | fi
434 |
435 | cat << EOF > cmdline
436 | exec hyperkit -A \
437 | -H \
438 | -U $UUID \
439 | -m $RAM \
440 | -c $CPUS \
441 | -s 0:0,hostbridge \
442 | -s 2:0,virtio-net \
443 | -s 31,lpc \
444 | -l com1,stdio \
445 | -s 1:0,$DISKDEV,$FILEPREFIX$(pwd)/$DISKFILE$DISKOPTS \
446 | -s 5,ahci-cd,$(pwd)/$ISO \
447 | -f "kexec,../$KERNEL,../$INITRD,$CMDLINE" $BACKGROUND
448 | echo \$! > machine.pid
449 | EOF
450 |
451 | chmod +x cmdline
452 | cat cmdline
453 |
454 | start-machine
455 | }
456 |
457 | create-vmnet() {
458 | cat << EOF | sudo tee $VMMET_PLIST
459 |
460 |
461 |
462 |
463 | Shared_Net_Address
464 | $CIDR.1
465 | Shared_Net_Mask
466 | 255.255.255.0
467 |
468 |
469 | EOF
470 | }
471 |
472 | proc-list() {
473 | echo $1
474 | ps auxw | grep hyperkit
475 | }
476 |
477 | node-info() {
478 | if is-machine-running $1; then
479 | etc=$(ps uxw -p $(cat $1/machine.pid 2> /dev/null) 2> /dev/null | tail -n 1 | awk '{ printf("%s\t%s\t%s\t%s\t%s\t%s", $2, $3, $4, int($6/1024)"M", $9, $10); }')
480 | else
481 | etc='-\t-\t-\t-\t-\t-'
482 | fi
483 | name=$(basename $1)
484 | disk=$(ls -lh $1/*.$FORMAT | awk '{print $5}')
485 | sparse=$(du -h $1/*.$FORMAT | awk '{print $1}')
486 | status=$(if is-machine-running $1; then echo "RUNNING"; else echo "NOT RUNNING"; fi)
487 | echo -e "$name\\t$etc\\t$disk\\t$sparse\\t$status"
488 | }
489 |
490 | get-all-nodes() {
491 | find $WORKDIR/* -maxdepth 0 -type d |
492 | while read node; do echo -n " "`basename $node`; done
493 | }
494 |
495 | get-worker-nodes() {
496 | find $WORKDIR/* -maxdepth 0 -type d -not -name master |
497 | while read node; do echo -n " "`basename $node`; done
498 | }
499 |
500 | exec-on-all-nodes() {
501 | go-to-scriptdir
502 | allnodes=( $(get-all-nodes) )
503 | for node in ${allnodes[@]}; do
504 | echo ---------------------$node
505 | ssh $SSHOPTS $GUESTUSER@$node $1
506 | done
507 | }
508 |
509 | wait-for-node-init() {
510 | node=$1
511 | while ! ssh $SSHOPTS $GUESTUSER@$node 'ls ~/.init-completed > /dev/null 2>&1'; do
512 | echo "waiting for $node to init..."
513 | sleep 5
514 | done
515 | }
516 |
517 | kill_all_vms() {
518 | go-to-scriptdir
519 | sudo find $WORKDIR -name machine.pid -exec sh -c 'kill -9 $(cat $1)' sh {} ';'
520 | }
521 |
522 | print-local-repo-tips() {
523 | cat << EOF
524 | # you can now publish your apps, e.g.:
525 |
526 | TAG=master:30699/yourapp:$(git log --pretty=format:'%h' -n 1)
527 | docker build ../yourapp/image/ --tag $TAG
528 | docker push $TAG
529 | hyperhelm install yourapp ../yourapp/chart/ --set image=$TAG
530 | EOF
531 | }
532 |
533 | help() {
534 | cat << EOF
535 | Practice real Kubernetes configurations on a local multi-node cluster.
536 | Inspect and optionally customize this script before use.
537 |
538 | Usage: ./hyperctl.sh command+
539 |
540 | Commands:
541 |
542 | (pre-requisites are marked with ->)
543 |
544 | -> install - install basic homebrew packages
545 | config - show script config vars
546 | print - print contents of relevant config files
547 | -> net - create or update the vmnet config
548 | -> dhcp - append to the dhcp registry
549 | reset - reset the vmnet and dhpc configs
550 | -> hosts - append node names to etc/hosts
551 | -> image - download the VM image
552 | master - create and launch master node
553 | nodeN - create and launch worker node (node1, node2, ...)
554 | info - display info about nodes
555 | init - initialize k8s and setup host kubectl
556 | reboot - soft-reboot the nodes
557 | shutdown - soft-shutdown the nodes
558 | stop - stop the VMs
559 | start - start the VMs
560 | kill - force-stop the VMs
561 | delete - delete the VM files
562 | iso - write cloud config data into a local yaml
563 | timesync - setup sleepwatcher time sync
564 | docker - setup local docker with the master node
565 | share - setup local fs sharing with docker on master
566 | helm2 - setup helm 2 with tiller in k8s
567 | helm3 - setup helm 3
568 | repo - install local docker repo in k8s
569 |
570 | For more info, see: https://github.com/youurayy/hyperctl
571 | EOF
572 | }
573 |
574 | echo
575 |
576 | if [ $# -eq 0 ]; then help; fi
577 |
578 | for arg in "$@"; do
579 | case $arg in
580 | install)
581 | if ! which hyperkit > /dev/null; then
582 | brew install hyperkit
583 | fi
584 | if ! which qemu-img > /dev/null; then
585 | brew install qemu
586 | fi
587 | brew install kubernetes-cli
588 | ;;
589 | config)
590 | echo " VERSION: $VERSION"
591 | echo " CONFIG: $CONFIG"
592 | echo " DISTRO: $DISTRO"
593 | echo " WORKDIR: $WORKDIR"
594 | echo " GUESTUSER: $GUESTUSER"
595 | echo " SSHPATH: $SSHPATH"
596 | echo " IMAGEURL: $IMAGEURL/$IMAGE.$IMGTYPE$ARCHIVE"
597 | echo " DISKFILE: $IMAGE.$FORMAT"
598 | echo " CIDR: $CIDR.0/24"
599 | echo " CPUS: $CPUS"
600 | echo " RAM: $RAM"
601 | echo " HDD: $HDD"
602 | echo " CNI: $CNI"
603 | echo " CNINET: $CNINET"
604 | echo " CNIYAML: $CNIYAML"
605 | echo " DOCKERCLI: $DOCKERCLI"
606 | ;;
607 | print)
608 | sudo echo
609 |
610 | echo "***** com.apple.vmnet.plist *****"
611 | sudo cat $VMMET_PLIST || true
612 |
613 | echo "***** $DHCPD_LEASES *****"
614 | cat $DHCPD_LEASES || true
615 |
616 | echo "***** /etc/hosts *****"
617 | cat /etc/hosts
618 | ;;
619 | net)
620 | create-vmnet
621 | ;;
622 | dhcp)
623 | dhcpd-leases
624 | ;;
625 | reset)
626 | sudo rm -f \
627 | $VMMET_PLIST \
628 | $DHCPD_LEASES
629 | echo -e "deleted\n $VMMET_PLIST\nand\n $DHCPD_LEASES\n\n" \
630 | "-> you sould reboot now, then use ./hyperkit.sh net dhcp"
631 | ;;
632 | hosts)
633 | echo "$(etc-hosts)" | sudo tee -a /etc/hosts
634 | ;;
635 | image)
636 | download-image
637 | ;;
638 | master)
639 | UUID=$(get_uuid 0) NAME=master CPUS=$CPUS RAM=$RAM DISK=$HDD create-machine
640 | ;;
641 | node*)
642 | num=$(echo $arg | sed -E 's:node(.+):\1:')
643 | UUID=$(get_uuid $num) NAME=$arg CPUS=$CPUS RAM=$RAM DISK=$HDD create-machine
644 | ;;
645 | info)
646 | go-to-scriptdir
647 | { echo -e 'NAME\tPID\t%CPU\t%MEM\tRSS\tSTARTED\tTIME\tDISK\tSPARSE\tSTATUS' &
648 | find $WORKDIR/* -maxdepth 0 -type d | while read node; do node-info "$node"; done } | column -ts $'\t'
649 | ;;
650 | init)
651 | go-to-scriptdir
652 | allnodes=( $(get-all-nodes) )
653 | workernodes=( $(get-worker-nodes) )
654 |
655 | for node in ${allnodes[@]}; do
656 | wait-for-node-init $node
657 | done
658 |
659 | echo "all nodes are pre-initialized, going to init k8s..."
660 |
661 | init="sudo kubeadm init --pod-network-cidr=$CNINET &&
662 | mkdir -p \$HOME/.kube &&
663 | sudo cp -i /etc/kubernetes/admin.conf \$HOME/.kube/config &&
664 | sudo chown \$(id -u):\$(id -g) \$HOME/.kube/config &&
665 | kubectl apply -f \$(eval echo $CNIYAML)"
666 |
667 | echo "executing on master: $init"
668 |
669 | if ! ssh $SSHOPTS $GUESTUSER@master $init; then
670 | echo "master init has failed, aborting"
671 | exit 1
672 | fi
673 |
674 | if [ "${#workernodes[@]}" -eq 0 ]; then
675 | echo
676 | echo "no worker nodes, removing NoSchedule taint from master..."
677 | ssh $SSHOPTS $GUESTUSER@master 'kubectl taint nodes master node-role.kubernetes.io/master:NoSchedule-'
678 | echo
679 | else
680 | joincmd=$(ssh $SSHOPTS $GUESTUSER@master 'sudo kubeadm token create --print-join-command')
681 | for node in ${workernodes[@]}; do
682 | echo "executing on $node: $joincmd"
683 | ssh $SSHOPTS $GUESTUSER@$node "sudo $joincmd < /dev/null"
684 | done
685 | fi
686 |
687 | mkdir -p ~/.kube
688 | scp $SSHOPTS $GUESTUSER@master:.kube/config ~/.kube/config.hyperctl
689 |
690 | cachedir="$HOME/.kube/cache/discovery/$CIDR.10_6443/"
691 | if [ -a $cachedir ]; then
692 | echo
693 | echo "deleting previous $cachedir"
694 | echo
695 | rm -rf $cachedir
696 | fi
697 |
698 | echo
699 | $hyperctl get pods --all-namespaces
700 | $hyperctl get nodes
701 | echo
702 | echo "to setup bash alias, exec:"
703 | echo
704 | echo "echo \"alias hyperctl='$hyperctl'\" >> ~/.profile"
705 | echo "source ~/.profile"
706 | ;;
707 | reboot)
708 | exec-on-all-nodes "sudo reboot"
709 | ;;
710 | shutdown)
711 | exec-on-all-nodes "sudo shutdown -h now"
712 | ;;
713 | stop)
714 | go-to-scriptdir
715 | sudo find $WORKDIR -name machine.pid -exec sh -c 'kill -TERM $(cat $1)' sh {} ';'
716 | ;;
717 | start)
718 | allnodes=( $(get-all-nodes) )
719 | for node in ${allnodes[@]}; do
720 | echo "starting $node..."
721 | go-to-scriptdir
722 | cd $WORKDIR/$node
723 | start-machine
724 | done
725 | ;;
726 | kill)
727 | kill_all_vms
728 | ;;
729 | delete)
730 | kill_all_vms
731 | go-to-scriptdir
732 | find $WORKDIR/* -maxdepth 0 -type d -exec rm -rf {} ';'
733 | ;;
734 | timesync)
735 | brew install sleepwatcher
736 | brew services start sleepwatcher
737 | echo "$BASEDIR/hyperctl.sh hwclock" >> ~/.wakeup
738 | chmod +x ~/.wakeup
739 | echo "time sync added to ~/.wakeup"
740 | echo
741 | cat ~/.wakeup
742 | ;;
743 | hwclock)
744 | exec-on-all-nodes "date ; sudo hwclock -s; date"
745 | ;;
746 | time)
747 | echo "local: $(date)"
748 | exec-on-all-nodes "date ; sudo chronyc makestep ; date"
749 | ;;
750 | track)
751 | exec-on-all-nodes "date ; sudo chronyc tracking"
752 | ;;
753 | docker)
754 | if ! which docker > /dev/null; then
755 | echo "installing docker cli..."
756 | curl -L $DOCKERCLI | tar zxvf - --strip 1 -C /usr/local/bin docker/docker
757 | echo
758 | fi
759 | cmd="echo 'export DOCKER_HOST=ssh://$GUESTUSER@master' >> ~/.profile && . ~/.profile"
760 | echo $cmd | pbcopy
761 | echo "exec to use docker on master (copied to clipboard):"
762 | echo
763 | echo $cmd
764 | ;;
765 | share)
766 | echo "1. make sure File Sharing is enabled on your Mac:"
767 | echo " System Preferences -> Sharing -> "
768 | echo " -> [x] File Sharing"
769 | echo " -> Options..."
770 | echo " -> [x] Share files and folders using SMB"
771 | echo " -> Windows File Sharing: [x] Your Account"
772 | echo
773 |
774 | if sharing -l | grep hyperctl > /dev/null; then
775 | echo "2. (not setting up host $HOME -> /hyperctl share, already present...)"
776 | echo
777 | else
778 | echo "2. setting up host $HOME -> /hyperctl share..."
779 | echo
780 | cmd="sudo sharing -a $HOME -s 001 -g 000 -n hyperctl"
781 | echo $cmd
782 | echo
783 | $cmd
784 | echo
785 | fi
786 |
787 | cmd="sudo mkdir -p $HOME && sudo mount -t cifs //$CIDR.1/hyperctl $HOME -o sec=ntlm,username=$GUESTUSER,vers=3.0,sec=ntlmv2,noperm"
788 | echo $cmd | pbcopy
789 | echo "3. "$cmd
790 | echo " ^ copied to the clipboard, paste & execute on master:"
791 | echo " (just press CMD+V, , ENTER, CTRL+D)"
792 | echo
793 | ssh $SSHOPTS $GUESTUSER@master
794 |
795 | echo
796 | cmd="docker run -it -v $PWD:$PWD r-base ls -l $PWD"
797 | echo $cmd | pbcopy
798 | echo "4. "$cmd
799 | echo " ^ copied to the clipboard, paste & execute locally to test the sharing"
800 | ;;
801 | helm)
802 | brew install kubernetes-helm
803 |
804 | echo
805 | echo "helm version: $(helm version)"
806 |
807 | hyperhelm="helm --kubeconfig $HOME/.kube/config.hyperctl"
808 |
809 | echo
810 | echo "to setup bash alias, exec:"
811 | echo
812 | echo "echo \"alias hyperhelm='$hyperhelm'\" >> ~/.profile"
813 | echo "source ~/.profile"
814 | ;;
815 | repo)
816 | # add remote helm repo
817 | hyperhelm repo add stable https://kubernetes-charts.storage.googleapis.com
818 | hyperhelm repo update
819 |
820 | # prepare secrets for local repo
821 | certs=$WORKDIR/certs
822 | mkdir -p $certs
823 | openssl req -newkey rsa:4096 -nodes -sha256 -subj "/C=/ST=/L=/O=/CN=master" \
824 | -keyout $certs/tls.key -x509 -days 365 -out $certs/tls.cert
825 | hyperctl create secret tls master --cert=$certs/tls.cert --key=$certs/tls.key
826 |
827 | # distribute certs to our nodes
828 | allnodes=( $(get-all-nodes) )
829 | for node in ${allnodes[@]}; do
830 | scp $SSHOPTS $certs/tls.cert $GUESTUSER@$node:
831 | ssh $SSHOPTS $GUESTUSER@$node sudo mkdir -p /etc/docker/certs.d/master:30699/
832 | ssh $SSHOPTS $GUESTUSER@$node sudo mv tls.cert /etc/docker/certs.d/master:30699/ca.crt
833 | done
834 |
835 | # launch local repo on master
836 | hyperhelm install registry stable/docker-registry \
837 | --set tolerations[0].key=node-role.kubernetes.io/master \
838 | --set tolerations[0].operator=Exists \
839 | --set tolerations[0].effect=NoSchedule \
840 | --set nodeSelector.kubernetes\\.io/hostname=master \
841 | --set tlsSecretName=master \
842 | --set service.type=NodePort \
843 | --set service.nodePort=30699
844 |
845 | print-local-repo-tips
846 | ;;
847 | iso)
848 | go-to-scriptdir
849 | write-user-data "${DISTRO}.yaml"
850 | echo "debug cloud-config was written to ./${DISTRO}.yaml"
851 | ;;
852 | help)
853 | help
854 | ;;
855 | *)
856 | echo "unknown command: $arg; try ./hyperctl.sh help"
857 | ;;
858 | esac
859 | done
860 |
861 | echo
862 |
863 | go-to-scriptdir
864 |
--------------------------------------------------------------------------------
/hyperctl.ps1:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env powershell
2 | # SPDX-License-Identifier: Apache-2.0
3 | # For usage overview, read the readme.md at https://github.com/youurayy/hyperctl
4 |
5 | # ---------------------------SETTINGS------------------------------------
6 |
7 | $version = 'v1.0.3'
8 | $workdir = '.\tmp'
9 | $guestuser = $env:USERNAME
10 | $sshpath = "$HOME\.ssh\id_rsa.pub"
11 | if (!(test-path $sshpath)) {
12 | write-host "`n please configure `$sshpath or place a pubkey at $sshpath `n"
13 | exit
14 | }
15 | $sshpub = $(get-content $sshpath -raw).trim()
16 |
17 | $config = $(get-content -path .\.distro -ea silentlycontinue | out-string).trim()
18 | if(!$config) {
19 | $config = 'centos'
20 | }
21 |
22 | switch ($config) {
23 | 'bionic' {
24 | $distro = 'ubuntu'
25 | $generation = 2
26 | $imgvers="18.04"
27 | $imagebase = "https://cloud-images.ubuntu.com/releases/server/$imgvers/release"
28 | $sha256file = 'SHA256SUMS'
29 | $image = "ubuntu-$imgvers-server-cloudimg-amd64.img"
30 | $archive = ""
31 | }
32 | 'disco' {
33 | $distro = 'ubuntu'
34 | $generation = 2
35 | $imgvers="19.04"
36 | $imagebase = "https://cloud-images.ubuntu.com/releases/server/$imgvers/release"
37 | $sha256file = 'SHA256SUMS'
38 | $image = "ubuntu-$imgvers-server-cloudimg-amd64.img"
39 | $archive = ""
40 | }
41 | 'centos' {
42 | $distro = 'centos'
43 | $generation = 1
44 | $imagebase = "https://cloud.centos.org/centos/7/images"
45 | $sha256file = 'sha256sum.txt'
46 | $imgvers = "1907"
47 | $image = "CentOS-7-x86_64-GenericCloud-$imgvers.raw"
48 | $archive = ".tar.gz"
49 | }
50 | }
51 |
52 | $nettype = 'private' # private/public
53 | $zwitch = 'switch' # private or public switch name
54 | $natnet = 'natnet' # private net nat net name (privnet only)
55 | $adapter = 'Wi-Fi' # public net adapter name (pubnet only)
56 |
57 | $cpus = 4
58 | $ram = '4GB'
59 | $hdd = '40GB'
60 |
61 | $cidr = switch ($nettype) {
62 | 'private' { '10.10.0' }
63 | 'public' { $null }
64 | }
65 |
66 | $macs = @(
67 | '0225EA2C9AE7', # master
68 | '02A254C4612F', # node1
69 | '02FBB5136210', # node2
70 | '02FE66735ED6', # node3
71 | '021349558DC7', # node4
72 | '0288F589DCC3', # node5
73 | '02EF3D3E1283', # node6
74 | '0225849ADCBB', # node7
75 | '02E0B0026505', # node8
76 | '02069FBFC2B0', # node9
77 | '02F7E0C904D0' # node10
78 | )
79 |
80 | # https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64/repodata/filelists.xml
81 | # https://packages.cloud.google.com/apt/dists/kubernetes-xenial/main/binary-amd64/Packages
82 | # ctrl+f "kubeadm"
83 | # $kubeversion = '1.15.11'
84 | $kubeversion = '1.16.9'
85 | # $kubeversion = '1.17.5'
86 | # $kubeversion = '1.18.2'
87 |
88 | $kubepackages = @"
89 | - docker-ce
90 | - docker-ce-cli
91 | - containerd.io
92 | - [ kubelet, $kubeversion ]
93 | - [ kubeadm, $kubeversion ]
94 | - [ kubectl, $kubeversion ]
95 | "@
96 |
97 | $cni = 'flannel'
98 |
99 | switch ($cni) {
100 | 'flannel' {
101 | $cniyaml = 'https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml'
102 | $cninet = '10.244.0.0/16'
103 | }
104 | 'weave' {
105 | $cniyaml = 'https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d "\n")'
106 | $cninet = '10.32.0.0/12'
107 | }
108 | 'calico' {
109 | $cniyaml = 'https://docs.projectcalico.org/v3.7/manifests/calico.yaml'
110 | $cninet = '192.168.0.0/16'
111 | }
112 | }
113 |
114 | $sshopts = @('-o LogLevel=ERROR', '-o StrictHostKeyChecking=no', '-o UserKnownHostsFile=/dev/null')
115 |
116 | $dockercli = 'https://github.com/StefanScherer/docker-cli-builder/releases/download/19.03.1/docker.exe'
117 |
118 | $helmurl = 'https://get.helm.sh/helm-v3.1.2-windows-amd64.zip'
119 |
120 | # ----------------------------------------------------------------------
121 |
122 | $imageurl = "$imagebase/$image$archive"
123 | $srcimg = "$workdir\$image"
124 | $vhdxtmpl = "$workdir\$($image -replace '^(.+)\.[^.]+$', '$1').vhdx"
125 |
126 |
127 | # switch to the script directory
128 | cd $PSScriptRoot | out-null
129 |
130 | # stop on any error
131 | $ErrorActionPreference = "Stop"
132 | $PSDefaultParameterValues['*:ErrorAction']='Stop'
133 |
134 | $etchosts = "$env:windir\System32\drivers\etc\hosts"
135 |
136 | # note: network configs version 1 an 2 didn't work
137 | function get-metadata($vmname, $cblock, $ip) {
138 | if(!$cblock) {
139 | return @"
140 | instance-id: id-$($vmname)
141 | local-hostname: $($vmname)
142 | "@
143 | } else {
144 | return @"
145 | instance-id: id-$vmname
146 | network-interfaces: |
147 | auto eth0
148 | iface eth0 inet static
149 | address $($cblock).$($ip)
150 | network $($cblock).0
151 | netmask 255.255.255.0
152 | broadcast $($cblock).255
153 | gateway $($cblock).1
154 | local-hostname: $vmname
155 | "@
156 | }
157 | }
158 |
159 | function get-userdata-shared($cblock) {
160 | return @"
161 | #cloud-config
162 |
163 | mounts:
164 | - [ swap ]
165 |
166 | groups:
167 | - docker
168 |
169 | users:
170 | - name: $guestuser
171 | ssh_authorized_keys:
172 | - $($sshpub)
173 | sudo: [ 'ALL=(ALL) NOPASSWD:ALL' ]
174 | groups: [ sudo, docker ]
175 | shell: /bin/bash
176 | # lock_passwd: false # passwd won't work without this
177 | # passwd: '`$6`$rounds=4096`$byY3nxArmvpvOrpV`$2M4C8fh3ZXx10v91yzipFRng1EFXTRNDE3q9PvxiPc3kC7N/NHG8HiwAvhd7QjMgZAXOsuBD5nOs0AJkByYmf/' # 'test'
178 |
179 | write_files:
180 | # resolv.conf hard-set is a workaround for intial setup
181 | - path: /etc/resolv.conf
182 | content: |
183 | nameserver 8.8.4.4
184 | nameserver 8.8.8.8
185 | - path: /etc/systemd/resolved.conf
186 | content: |
187 | [Resolve]
188 | DNS=8.8.4.4
189 | FallbackDNS=8.8.8.8
190 | - path: /tmp/append-etc-hosts
191 | content: |
192 | $(produce-etc-hosts -cblock $cblock -prefix ' ')
193 | - path: /etc/modules-load.d/k8s.conf
194 | content: |
195 | br_netfilter
196 | - path: /etc/sysctl.d/k8s.conf
197 | content: |
198 | net.bridge.bridge-nf-call-ip6tables = 1
199 | net.bridge.bridge-nf-call-iptables = 1
200 | net.bridge.bridge-nf-call-arptables = 1
201 | net.ipv4.ip_forward = 1
202 | - path: /etc/docker/daemon.json
203 | content: |
204 | {
205 | "exec-opts": ["native.cgroupdriver=systemd"],
206 | "log-driver": "json-file",
207 | "log-opts": {
208 | "max-size": "100m"
209 | },
210 | "storage-driver": "overlay2",
211 | "storage-opts": [
212 | "overlay2.override_kernel_check=true"
213 | ]
214 | }
215 | "@
216 | }
217 |
218 | function get-userdata-centos($cblock) {
219 | return @"
220 | $(get-userdata-shared -cblock $cblock)
221 | # https://github.com/kubernetes/kubernetes/issues/56850
222 | - path: /usr/lib/systemd/system/kubelet.service.d/12-after-docker.conf
223 | content: |
224 | [Unit]
225 | After=docker.service
226 | # https://github.com/clearlinux/distribution/issues/39
227 | - path: /etc/chrony.conf
228 | content: |
229 | refclock PHC /dev/ptp0 trust poll 2
230 | makestep 1 -1
231 | maxdistance 16.0
232 | #pool pool.ntp.org iburst
233 | driftfile /var/lib/chrony/drift
234 | logdir /var/log/chrony
235 |
236 | package_upgrade: true
237 |
238 | yum_repos:
239 | docker-ce-stable:
240 | name: Docker CE Stable - `$basearch
241 | baseurl: https://download.docker.com/linux/centos/7/`$basearch/stable
242 | enabled: 1
243 | gpgcheck: 1
244 | gpgkey: https://download.docker.com/linux/centos/gpg
245 | priority: 1
246 | kubernetes:
247 | name: Kubernetes
248 | baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
249 | enabled: 1
250 | gpgcheck: 1
251 | repo_gpgcheck: 1
252 | gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
253 | priority: 1
254 |
255 | packages:
256 | - hyperv-daemons
257 | - yum-utils
258 | - cifs-utils
259 | - device-mapper-persistent-data
260 | - lvm2
261 | $kubepackages
262 |
263 | runcmd:
264 | - echo "sudo tail -f /var/log/messages" > /home/$guestuser/log
265 | - systemctl restart chronyd
266 | - cat /tmp/append-etc-hosts >> /etc/hosts
267 | # https://docs.docker.com/install/linux/docker-ce/centos/
268 | - setenforce 0
269 | - sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
270 | - mkdir -p /etc/systemd/system/docker.service.d
271 | - systemctl mask --now firewalld
272 | - systemctl daemon-reload
273 | - systemctl enable docker
274 | - systemctl enable kubelet
275 | # https://github.com/kubernetes/kubeadm/issues/954
276 | - echo "exclude=kube*" >> /etc/yum.repos.d/kubernetes.repo
277 | - systemctl start docker
278 | - touch /home/$guestuser/.init-completed
279 | "@
280 | }
281 |
282 | function get-userdata-ubuntu($cblock) {
283 | return @"
284 | $(get-userdata-shared -cblock $cblock)
285 | # https://github.com/kubernetes/kubernetes/issues/56850
286 | - path: /etc/systemd/system/kubelet.service.d/12-after-docker.conf
287 | content: |
288 | [Unit]
289 | After=docker.service
290 | - path: /etc/apt/preferences.d/docker-pin
291 | content: |
292 | Package: *
293 | Pin: origin download.docker.com
294 | Pin-Priority: 600
295 | - path: /etc/systemd/network/99-default.link
296 | content: |
297 | [Match]
298 | Path=/devices/virtual/net/*
299 | [Link]
300 | NamePolicy=kernel database onboard slot path
301 | MACAddressPolicy=none
302 | # https://github.com/clearlinux/distribution/issues/39
303 | - path: /etc/chrony/chrony.conf
304 | content: |
305 | refclock PHC /dev/ptp0 trust poll 2
306 | makestep 1 -1
307 | maxdistance 16.0
308 | #pool pool.ntp.org iburst
309 | driftfile /var/lib/chrony/chrony.drift
310 | logdir /var/log/chrony
311 | apt:
312 | sources:
313 | kubernetes:
314 | source: "deb http://apt.kubernetes.io/ kubernetes-xenial main"
315 | keyserver: "hkp://keyserver.ubuntu.com:80"
316 | keyid: BA07F4FB
317 | docker:
318 | arches: amd64
319 | source: "deb https://download.docker.com/linux/ubuntu bionic stable"
320 | keyserver: "hkp://keyserver.ubuntu.com:80"
321 | keyid: 0EBFCD88
322 |
323 | package_upgrade: true
324 |
325 | packages:
326 | - linux-tools-virtual
327 | - linux-cloud-tools-virtual
328 | - cifs-utils
329 | - chrony
330 | $kubepackages
331 |
332 | runcmd:
333 | - echo "sudo tail -f /var/log/syslog" > /home/$guestuser/log
334 | - systemctl mask --now systemd-timesyncd
335 | - systemctl enable --now chrony
336 | - systemctl stop kubelet
337 | - cat /tmp/append-etc-hosts >> /etc/hosts
338 | - mkdir -p /usr/libexec/hypervkvpd && ln -s /usr/sbin/hv_get_dns_info /usr/sbin/hv_get_dhcp_info /usr/libexec/hypervkvpd
339 | - chmod o+r /lib/systemd/system/kubelet.service
340 | - chmod o+r /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
341 | # https://github.com/kubernetes/kubeadm/issues/954
342 | - apt-mark hold kubeadm kubelet
343 | - touch /home/$guestuser/.init-completed
344 | "@
345 | }
346 |
347 | function create-public-net($zwitch, $adapter) {
348 | new-vmswitch -name $zwitch -allowmanagementos $true -netadaptername $adapter | format-list
349 | }
350 |
351 | function create-private-net($natnet, $zwitch, $cblock) {
352 | new-vmswitch -name $zwitch -switchtype internal | format-list
353 | new-netipaddress -ipaddress "$($cblock).1" -prefixlength 24 -interfacealias "vEthernet ($zwitch)" | format-list
354 | new-netnat -name $natnet -internalipinterfaceaddressprefix "$($cblock).0/24" | format-list
355 | }
356 |
357 | function produce-yaml-contents($path, $cblock) {
358 | set-content $path ([byte[]][char[]] `
359 | "$(&"get-userdata-$distro" -cblock $cblock)`n") -encoding byte
360 | }
361 |
362 | function produce-iso-contents($vmname, $cblock, $ip) {
363 | md $workdir\$vmname\cidata -ea 0 | out-null
364 | set-content $workdir\$vmname\cidata\meta-data ([byte[]][char[]] `
365 | "$(get-metadata -vmname $vmname -cblock $cblock -ip $ip)") -encoding byte
366 | produce-yaml-contents -path $workdir\$vmname\cidata\user-data -cblock $cblock
367 | }
368 |
369 | function make-iso($vmname) {
370 | $fsi = new-object -ComObject IMAPI2FS.MsftFileSystemImage
371 | $fsi.FileSystemsToCreate = 3
372 | $fsi.VolumeName = 'cidata'
373 | $vmdir = (resolve-path -path "$workdir\$vmname").path
374 | $path = "$vmdir\cidata"
375 | $fsi.Root.AddTreeWithNamedStreams($path, $false)
376 | $isopath = "$vmdir\$vmname.iso"
377 | $res = $fsi.CreateResultImage()
378 | $cp = New-Object CodeDom.Compiler.CompilerParameters
379 | $cp.CompilerOptions = "/unsafe"
380 | if (!('ISOFile' -as [type])) {
381 | Add-Type -CompilerParameters $cp -TypeDefinition @"
382 | public class ISOFile {
383 | public unsafe static void Create(string iso, object stream, int blkSz, int blkCnt) {
384 | int bytes = 0; byte[] buf = new byte[blkSz];
385 | var ptr = (System.IntPtr)(&bytes); var o = System.IO.File.OpenWrite(iso);
386 | var i = stream as System.Runtime.InteropServices.ComTypes.IStream;
387 | if (o != null) { while (blkCnt-- > 0) { i.Read(buf, blkSz, ptr); o.Write(buf, 0, bytes); }
388 | o.Flush(); o.Close(); }}}
389 | "@ }
390 | [ISOFile]::Create($isopath, $res.ImageStream, $res.BlockSize, $res.TotalBlocks)
391 | }
392 |
393 | function create-machine($zwitch, $vmname, $cpus, $mem, $hdd, $vhdxtmpl, $cblock, $ip, $mac) {
394 | $vmdir = "$workdir\$vmname"
395 | $vhdx = "$workdir\$vmname\$vmname.vhdx"
396 |
397 | new-item -itemtype directory -force -path $vmdir | out-null
398 |
399 | if (!(test-path $vhdx)) {
400 | copy-item -path $vhdxtmpl -destination $vhdx -force
401 | resize-vhd -path $vhdx -sizebytes $hdd
402 |
403 | produce-iso-contents -vmname $vmname -cblock $cblock -ip $ip
404 | make-iso -vmname $vmname
405 |
406 | $vm = new-vm -name $vmname -memorystartupbytes $mem -generation $generation `
407 | -switchname $zwitch -vhdpath $vhdx -path $workdir
408 |
409 | if($generation -eq 2) {
410 | set-vmfirmware -vm $vm -enablesecureboot off
411 | }
412 |
413 | set-vmprocessor -vm $vm -count $cpus
414 | add-vmdvddrive -vmname $vmname -path $workdir\$vmname\$vmname.iso
415 |
416 | if(!$mac) { $mac = create-mac-address }
417 |
418 | get-vmnetworkadapter -vm $vm | set-vmnetworkadapter -staticmacaddress $mac
419 | set-vmcomport -vmname $vmname -number 2 -path \\.\pipe\$vmname
420 | }
421 | start-vm -name $vmname
422 | }
423 |
424 | function delete-machine($name) {
425 | stop-vm $name -turnoff -confirm:$false -ea silentlycontinue
426 | remove-vm $name -force -ea silentlycontinue
427 | remove-item -recurse -force $workdir\$name
428 | }
429 |
430 | function delete-public-net($zwitch) {
431 | remove-vmswitch -name $zwitch -force -confirm:$false
432 | }
433 |
434 | function delete-private-net($zwitch, $natnet) {
435 | remove-vmswitch -name $zwitch -force -confirm:$false
436 | remove-netnat -name $natnet -confirm:$false
437 | }
438 |
439 | function create-mac-address() {
440 | return "02$((1..5 | %{ '{0:X2}' -f (get-random -max 256) }) -join '')"
441 | }
442 |
443 | function basename($path) {
444 | return $path.substring(0, $path.lastindexof('.'))
445 | }
446 |
447 | function prepare-vhdx-tmpl($imageurl, $srcimg, $vhdxtmpl) {
448 | if (!(test-path $workdir)) {
449 | mkdir $workdir | out-null
450 | }
451 | if (!(test-path $srcimg$archive)) {
452 | download-file -url $imageurl -saveto $srcimg$archive
453 | }
454 |
455 | get-item -path $srcimg$archive | %{ write-host 'srcimg:', $_.name, ([math]::round($_.length/1MB, 2)), 'MB' }
456 |
457 | if($sha256file) {
458 | $hash = shasum256 -shaurl "$imagebase/$sha256file" -diskitem $srcimg$archive -item $image$archive
459 | echo "checksum: $hash"
460 | }
461 | else {
462 | echo "no sha256file specified, skipping integrity ckeck"
463 | }
464 |
465 | if(($archive -eq '.tar.gz') -and (!(test-path $srcimg))) {
466 | tar xzf $srcimg$archive -C $workdir
467 | }
468 | elseif(($archive -eq '.xz') -and (!(test-path $srcimg))) {
469 | 7z e $srcimg$archive "-o$workdir"
470 | }
471 | elseif(($archive -eq '.bz2') -and (!(test-path $srcimg))) {
472 | 7z e $srcimg$archive "-o$workdir"
473 | }
474 |
475 | if (!(test-path $vhdxtmpl)) {
476 | qemu-img.exe convert $srcimg -O vhdx -o subformat=dynamic $vhdxtmpl
477 | }
478 |
479 | echo ''
480 | get-item -path $vhdxtmpl | %{ write-host 'vhxdtmpl:', $_.name, ([math]::round($_.length/1MB, 2)), 'MB' }
481 | return
482 | }
483 |
484 | function download-file($url, $saveto) {
485 | echo "downloading $url to $saveto"
486 | $progresspreference = 'silentlycontinue'
487 | invoke-webrequest $url -usebasicparsing -outfile $saveto # too slow w/ indicator
488 | $progresspreference = 'continue'
489 | }
490 |
491 | function produce-etc-hosts($cblock, $prefix) {
492 | $ret = switch ($nettype) {
493 | 'private' {
494 | @"
495 | #
496 | $prefix#
497 | $prefix$($cblock).10 master
498 | $prefix$($cblock).11 node1
499 | $prefix$($cblock).12 node2
500 | $prefix$($cblock).13 node3
501 | $prefix$($cblock).14 node4
502 | $prefix$($cblock).15 node5
503 | $prefix$($cblock).16 node6
504 | $prefix$($cblock).17 node7
505 | $prefix$($cblock).18 node8
506 | $prefix$($cblock).19 node9
507 | $prefix#
508 | $prefix#
509 | "@
510 | }
511 | 'public' {
512 | ''
513 | }
514 | }
515 | return $ret
516 | }
517 |
518 | function update-etc-hosts($cblock) {
519 | produce-etc-hosts -cblock $cblock -prefix '' | out-file -encoding utf8 -append $etchosts
520 | get-content $etchosts
521 | }
522 |
523 | function create-nodes($num, $cblock) {
524 | 1..$num | %{
525 | echo creating node $_
526 | create-machine -zwitch $zwitch -vmname "node$_" -cpus 4 -mem 4GB -hdd 40GB `
527 | -vhdxtmpl $vhdxtmpl -cblock $cblock -ip $(10+$_)
528 | }
529 | }
530 |
531 | function delete-nodes($num) {
532 | 1..$num | %{
533 | echo deleting node $_
534 | delete-machine -name "node$_"
535 | }
536 | }
537 |
538 | function get-our-vms() {
539 | return get-vm | where-object { ($_.name -match 'master|node.*') }
540 | }
541 |
542 | function get-our-running-vms() {
543 | return get-vm | where-object { ($_.state -eq 'running') -and ($_.name -match 'master|node.*') }
544 | }
545 |
546 | function shasum256($shaurl, $diskitem, $item) {
547 | $pat = "^(\S+)\s+\*?$([regex]::escape($item))$"
548 |
549 | $hash = get-filehash -algo sha256 -path $diskitem | %{ $_.hash}
550 |
551 | $webhash = ( invoke-webrequest $shaurl -usebasicparsing ).tostring().split("`n") | `
552 | select-string $pat | %{ $_.matches.groups[1].value }
553 |
554 | if(!($hash -ieq $webhash)) {
555 | throw @"
556 | SHA256 MISMATCH:
557 | shaurl: $shaurl
558 | item: $item
559 | diskitem: $diskitem
560 | diskhash: $hash
561 | webhash: $webhash
562 | "@
563 | }
564 | return $hash
565 | }
566 |
567 | function got-ctrlc() {
568 | if ([console]::KeyAvailable) {
569 | $key = [system.console]::readkey($true)
570 | if (($key.modifiers -band [consolemodifiers]"control") -and ($key.key -eq "C")) {
571 | return $true
572 | }
573 | }
574 | return $false;
575 | }
576 |
577 | function wait-for-node-init($opts, $name) {
578 | while ( ! $(ssh $opts $guestuser@master 'ls ~/.init-completed 2> /dev/null') ) {
579 | echo "waiting for $name to init..."
580 | start-sleep -seconds 5
581 | if( got-ctrlc ) { exit 1 }
582 | }
583 | }
584 |
585 | function to-unc-path($path) {
586 | $item = get-item $path
587 | return $path.replace($item.root, '/').replace('\', '/')
588 | }
589 |
590 | function to-unc-path2($path) {
591 | return ($path -replace '^[^:]*:?(.+)$', "`$1").replace('\', '/')
592 | }
593 |
594 | function hyperctl() {
595 | kubectl --kubeconfig=$HOME/.kube/config.hyperctl $args
596 | }
597 |
598 | function print-aliases($pwsalias, $bashalias) {
599 | echo ""
600 | echo "powershell alias:"
601 | echo " write-output '$pwsalias' | out-file -encoding utf8 -append `$profile"
602 | echo ""
603 | echo "bash alias:"
604 | echo " write-output `"``n$($bashalias.replace('\', '\\'))``n`" | out-file -encoding utf8 -append -nonewline ~\.profile"
605 | echo ""
606 | echo " -> restart your shell after applying the above"
607 | }
608 |
609 | function install-kubeconfig() {
610 | new-item -itemtype directory -force -path $HOME\.kube | out-null
611 | scp $sshopts $guestuser@master:.kube/config $HOME\.kube\config.hyperctl
612 |
613 | $pwsalias = "function hyperctl() { kubectl --kubeconfig=$HOME\.kube\config.hyperctl `$args }"
614 | $bashalias = "alias hyperctl='kubectl --kubeconfig=$HOME\.kube\config.hyperctl'"
615 |
616 | $cachedir="$HOME\.kube\cache\discovery\$cidr.10_6443"
617 | if (test-path $cachedir) {
618 | echo ""
619 | echo "deleting previous $cachedir"
620 | echo ""
621 | rmdir $cachedir -recurse
622 | }
623 |
624 | echo "executing: hyperctl get pods --all-namespaces`n"
625 | hyperctl get pods --all-namespaces
626 | echo ""
627 | echo "executing: hyperctl get nodes`n"
628 | hyperctl get nodes
629 |
630 | print-aliases -pwsalias $pwsalias -bashalias $bashalias
631 | }
632 |
633 | function install-helm() {
634 | if (!(get-command "helm" -ea silentlycontinue)) {
635 | choco install -y kubernetes-helm
636 | }
637 | else {
638 | choco upgrade kubernetes-helm
639 | }
640 |
641 | echo ""
642 | echo "helm version: $(helm version)"
643 |
644 | $helm = "helm --kubeconfig $(to-unc-path2 $HOME\.kube\config.hyperctl)"
645 | $pwsalias = "function hyperhelm() { $helm `$args }"
646 | $bashalias = "alias hyperhelm='$helm'"
647 |
648 | print-aliases -pwsalias $pwsalias -bashalias $bashalias
649 | echo " -> then you can use e.g.: hyperhelm version"
650 | }
651 |
652 | function print-local-repo-tips() {
653 | echo @"
654 | # you can now publish your apps, e.g.:
655 |
656 | TAG=master:30699/yourapp:`$(git log --pretty=format:'%h' -n 1)
657 | docker build ../yourapp/image/ --tag `$TAG
658 | docker push `$TAG
659 | hyperhelm install yourapp ../yourapp/chart/ --set image=`$TAG
660 | "@
661 | }
662 |
663 | echo ''
664 |
665 | if($args.count -eq 0) {
666 | $args = @( 'help' )
667 | }
668 |
669 | switch -regex ($args) {
670 | ^help$ {
671 | echo @"
672 | Practice real Kubernetes configurations on a local multi-node cluster.
673 | Inspect and optionally customize this script before use.
674 |
675 | Usage: .\hyperctl.ps1 command+
676 |
677 | Commands:
678 |
679 | (pre-requisites are marked with ->)
680 |
681 | -> install - install basic chocolatey packages
682 | config - show script config vars
683 | print - print etc/hosts, network interfaces and mac addresses
684 | -> net - install private or public host network
685 | -> hosts - append private network node names to etc/hosts
686 | -> image - download the VM image
687 | master - create and launch master node
688 | nodeN - create and launch worker node (node1, node2, ...)
689 | info - display info about nodes
690 | init - initialize k8s and setup host kubectl
691 | reboot - soft-reboot the nodes
692 | shutdown - soft-shutdown the nodes
693 | save - snapshot the VMs
694 | restore - restore VMs from latest snapshots
695 | stop - stop the VMs
696 | start - start the VMs
697 | delete - stop VMs and delete the VM files
698 | delnet - delete the network
699 | iso - write cloud config data into a local yaml
700 | docker - setup local docker with the master node
701 | share - setup local fs sharing with docker on master
702 | helm2 - setup helm 2 with tiller in k8s
703 | helm3 - setup helm 3
704 | repo - install local docker repo in k8s
705 |
706 | For more info, see: https://github.com/youurayy/hyperctl
707 | "@
708 | }
709 | ^install$ {
710 | if (!(get-command "7z" -ea silentlycontinue)) {
711 | choco install -y 7zip.commandline
712 | }
713 | if (!(get-command "qemu-img" -ea silentlycontinue)) {
714 | choco install -y qemu-img
715 | }
716 | if (!(get-command "kubectl" -ea silentlycontinue)) {
717 | choco install -y kubernetes-cli
718 | }
719 | else {
720 | choco upgrade kubernetes-cli
721 | }
722 | }
723 | ^config$ {
724 | echo " version: $version"
725 | echo " config: $config"
726 | echo " distro: $distro"
727 | echo " workdir: $workdir"
728 | echo " guestuser: $guestuser"
729 | echo " sshpath: $sshpath"
730 | echo " imageurl: $imageurl"
731 | echo " vhdxtmpl: $vhdxtmpl"
732 | echo " cidr: $cidr.0/24"
733 | echo " switch: $zwitch"
734 | echo " nettype: $nettype"
735 | switch ($nettype) {
736 | 'private' { echo " natnet: $natnet" }
737 | 'public' { echo " adapter: $adapter" }
738 | }
739 | echo " cpus: $cpus"
740 | echo " ram: $ram"
741 | echo " hdd: $hdd"
742 | echo " cni: $cni"
743 | echo " cninet: $cninet"
744 | echo " cniyaml: $cniyaml"
745 | echo " dockercli: $dockercli"
746 | }
747 | ^print$ {
748 | echo "***** $etchosts *****"
749 | get-content $etchosts | select-string -pattern '^#|^\s*$' -notmatch
750 |
751 | echo "`n***** configured mac addresses *****`n"
752 | echo $macs
753 |
754 | echo "`n***** network interfaces *****`n"
755 | (get-vmswitch 'switch' -ea:silent | `
756 | format-list -property name, id, netadapterinterfacedescription | out-string).trim()
757 |
758 | if ($nettype -eq 'private') {
759 | echo ''
760 | (get-netipaddress -interfacealias 'vEthernet (switch)' -ea:silent | `
761 | format-list -property ipaddress, interfacealias | out-string).trim()
762 | echo ''
763 | (get-netnat 'natnet' -ea:silent | format-list -property name, internalipinterfaceaddressprefix | out-string).trim()
764 | }
765 | }
766 | ^net$ {
767 | switch ($nettype) {
768 | 'private' { create-private-net -natnet $natnet -zwitch $zwitch -cblock $cidr }
769 | 'public' { create-public-net -zwitch $zwitch -adapter $adapter }
770 | }
771 | }
772 | ^hosts$ {
773 | switch ($nettype) {
774 | 'private' { update-etc-hosts -cblock $cidr }
775 | 'public' { echo "not supported for public net - use dhcp" }
776 | }
777 | }
778 | ^macs$ {
779 | $cnt = 10
780 | 0..$cnt | %{
781 | $comment = switch ($_) {0 {'master'} default {"node$_"}}
782 | $comma = if($_ -eq $cnt) { '' } else { ',' }
783 | echo " '$(create-mac-address)'$comma # $comment"
784 | }
785 | }
786 | ^image$ {
787 | prepare-vhdx-tmpl -imageurl $imageurl -srcimg $srcimg -vhdxtmpl $vhdxtmpl
788 | }
789 | ^master$ {
790 | create-machine -zwitch $zwitch -vmname 'master' -cpus $cpus `
791 | -mem $(Invoke-Expression $ram) -hdd $(Invoke-Expression $hdd) `
792 | -vhdxtmpl $vhdxtmpl -cblock $cidr -ip '10' -mac $macs[0]
793 | }
794 | '(^node(?\d+)$)' {
795 | $num = [int]$matches.number
796 | $name = "node$($num)"
797 | create-machine -zwitch $zwitch -vmname $name -cpus $cpus `
798 | -mem $(Invoke-Expression $ram) -hdd $(Invoke-Expression $hdd) `
799 | -vhdxtmpl $vhdxtmpl -cblock $cidr -ip "$($num + 10)" -mac $macs[$num]
800 | }
801 | ^info$ {
802 | get-our-vms
803 | }
804 | ^init$ {
805 | get-our-vms | %{ wait-for-node-init -opts $sshopts -name $_.name }
806 |
807 | $init = "sudo kubeadm init --pod-network-cidr=$cninet && \
808 | mkdir -p `$HOME/.kube && \
809 | sudo cp /etc/kubernetes/admin.conf `$HOME/.kube/config && \
810 | sudo chown `$(id -u):`$(id -g) `$HOME/.kube/config && \
811 | kubectl apply -f `$(eval echo $cniyaml)"
812 |
813 | echo "executing on master: $init"
814 |
815 | ssh $sshopts $guestuser@master $init
816 | if (!$?) {
817 | echo "master init has failed, aborting"
818 | exit 1
819 | }
820 |
821 | if((get-our-vms | where { $_.name -match "node.+" }).count -eq 0) {
822 | echo ""
823 | echo "no worker nodes, removing NoSchedule taint from master..."
824 | ssh $sshopts $guestuser@master 'kubectl taint nodes master node-role.kubernetes.io/master:NoSchedule-'
825 | echo ""
826 | }
827 | else {
828 | $joincmd = $(ssh $sshopts $guestuser@master 'sudo kubeadm token create --print-join-command')
829 | get-our-vms | where { $_.name -match "node.+" } |
830 | %{
831 | $node = $_.name
832 | echo "`nexecuting on $node`: $joincmd"
833 | ssh $sshopts $guestuser@$node sudo $joincmd
834 | if (!$?) {
835 | echo "$node init has failed, aborting"
836 | exit 1
837 | }
838 | }
839 | }
840 |
841 | install-kubeconfig
842 | }
843 | ^reboot$ {
844 | get-our-vms | %{ $node = $_.name; $(ssh $sshopts $guestuser@$node 'sudo reboot') }
845 | }
846 | ^shutdown$ {
847 | get-our-vms | %{ $node = $_.name; $(ssh $sshopts $guestuser@$node 'sudo shutdown -h now') }
848 | }
849 | ^save$ {
850 | get-our-vms | checkpoint-vm
851 | }
852 | ^restore$ {
853 | get-our-vms | foreach-object { $_ | get-vmsnapshot | sort creationtime | `
854 | select -last 1 | restore-vmsnapshot -confirm:$false }
855 | }
856 | ^stop$ {
857 | get-our-vms | stop-vm
858 | }
859 | ^start$ {
860 | get-our-vms | start-vm
861 | }
862 | ^delete$ {
863 | get-our-vms | %{ delete-machine -name $_.name }
864 | }
865 | ^delnet$ {
866 | switch ($nettype) {
867 | 'private' { delete-private-net -zwitch $zwitch -natnet $natnet }
868 | 'public' { delete-public-net -zwitch $zwitch }
869 | }
870 | }
871 | ^time$ {
872 | echo "local: $(date)"
873 | get-our-vms | %{
874 | $node = $_.name
875 | echo ---------------------$node
876 | # ssh $sshopts $guestuser@$node "date ; if which chronyc > /dev/null; then sudo chronyc makestep ; date; fi"
877 | ssh $sshopts $guestuser@$node "date"
878 | }
879 | }
880 | ^track$ {
881 | get-our-vms | %{
882 | $node = $_.name
883 | echo ---------------------$node
884 | ssh $sshopts $guestuser@$node "date ; sudo chronyc tracking"
885 | }
886 | }
887 | ^docker$ {
888 | $saveto = "C:\ProgramData\chocolatey\bin\docker.exe"
889 | if (!(test-path $saveto)) {
890 | echo "installing docker cli..."
891 | download-file -url $dockercli -saveto $saveto
892 | }
893 | echo ""
894 | echo "powershell:"
895 | echo " write-output '`$env:DOCKER_HOST = `"ssh://$guestuser@master`"' | out-file -encoding utf8 -append `$profile"
896 | echo ""
897 | echo "bash:"
898 | echo " write-output `"``nexport DOCKER_HOST='ssh://$guestuser@master'``n`" | out-file -encoding utf8 -append -nonewline ~\.profile"
899 | echo ""
900 | echo ""
901 | echo "(restart your shell after applying the above)"
902 | }
903 | ^share$ {
904 | if (!( get-smbshare -name 'hyperctl' -ea silentlycontinue )) {
905 | echo "creating host $HOME -> /hyperctl share..."
906 | new-smbshare -name 'hyperctl' -path $HOME
907 | }
908 | else {
909 | echo "(not creating $HOME -> /hyperctl share, already present...)"
910 | }
911 | echo ""
912 |
913 | $unc = to-unc-path -path $HOME
914 | $cmd = "sudo mkdir -p $unc && sudo mount -t cifs //$cidr.1/hyperctl $unc -o sec=ntlm,username=$guestuser,vers=3.0,sec=ntlmv2,noperm"
915 | set-clipboard -value $cmd
916 | echo $cmd
917 | echo " ^ copied to the clipboard, paste & execute on master:"
918 | echo " (just right-click (to paste), , Enter, Ctrl+D)"
919 | echo ""
920 | ssh $sshopts $guestuser@master
921 |
922 | echo ""
923 | $unc = to-unc-path -path $pwd.path
924 | $cmd = "docker run -it -v $unc`:$unc r-base ls -l $unc"
925 | set-clipboard -value $cmd
926 | echo $cmd
927 | echo " ^ copied to the clipboard, paste & execute locally to test the sharing"
928 | }
929 | ^helm$ {
930 | install-helm
931 | }
932 | ^repo$ {
933 | # install openssl if none is provided
934 | # don't try to install one bc the install is intrusive and not fully automated
935 | $openssl = "openssl.exe"
936 | if(!(get-command "openssl" -ea silentlycontinue)) {
937 | # fall back to cygwin openssl if installed
938 | $openssl = "C:\tools\cygwin\bin\openssl.exe"
939 | if(!(test-path $openssl)) {
940 | echo "error: please make sure 'openssl' command is in the path"
941 | echo "(or install Cygwin so that '$openssl' exists)"
942 | echo ""
943 | exit 1
944 | }
945 | }
946 |
947 | # add remote helm repo to you local ~/.helm registry
948 | hyperhelm repo add stable https://kubernetes-charts.storage.googleapis.com
949 | hyperhelm repo update
950 |
951 | # prepare secrets for local repo
952 | $certs="$workdir\certs"
953 | md $certs -ea 0 | out-null
954 | $expr = "$openssl req -newkey rsa:4096 -nodes -sha256 " +
955 | "-subj `"/C=/ST=/L=/O=/CN=master`" -keyout $certs/tls.key -x509 " +
956 | "-days 365 -out $certs/tls.cert"
957 | invoke-expression $expr
958 | hyperctl create secret tls master --cert=$certs/tls.cert --key=$certs/tls.key
959 |
960 | # distribute certs to our nodes
961 | get-our-vms | %{
962 | $node = $_.name
963 | $(scp $sshopts $certs/tls.cert $guestuser@$node`:)
964 | $(ssh $sshopts $guestuser@$node 'sudo mkdir -p /etc/docker/certs.d/master:30699/')
965 | $(ssh $sshopts $guestuser@$node 'sudo mv tls.cert /etc/docker/certs.d/master:30699/ca.crt')
966 | }
967 |
968 | hyperhelm install registry stable/docker-registry `
969 | --set tolerations[0].key=node-role.kubernetes.io/master `
970 | --set tolerations[0].operator=Exists `
971 | --set tolerations[0].effect=NoSchedule `
972 | --set nodeSelector.kubernetes\.io/hostname=master `
973 | --set tlsSecretName=master `
974 | --set service.type=NodePort `
975 | --set service.nodePort=30699
976 |
977 | echo ''
978 | print-local-repo-tips
979 | echo ''
980 | }
981 | ^iso$ {
982 | produce-yaml-contents -path "$($distro).yaml" -cblock $cidr
983 | echo "debug cloud-config was written to .\${distro}.yaml"
984 | }
985 | default {
986 | echo 'invalid command; try: .\hyperctl.ps1 help'
987 | }
988 | }
989 |
990 | echo ''
991 |
--------------------------------------------------------------------------------