├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── assets
└── cluster.png
├── blueprints
└── network-bridge.md
├── cloud-init
├── README.md
├── generate.sh
└── templates
│ ├── cmdline.txt
│ ├── network-config.yaml
│ ├── ssh
│ └── user-data.yaml
├── compute
├── k3s
│ ├── README.md
│ ├── agent
│ │ ├── main.tf
│ │ ├── providers.tf
│ │ └── vars.tf
│ ├── fetch-config.sh
│ ├── main.tf
│ ├── providers.tf
│ ├── server
│ │ ├── main.tf
│ │ ├── providers.tf
│ │ └── vars.tf
│ ├── terraform.tfvars
│ ├── vars.tf
│ ├── zone-0.tf
│ ├── zone-1.tf
│ └── zone-2.tf
└── workloads
│ ├── aetherfs
│ ├── Chart.yaml
│ └── values.yaml
│ ├── cert-manager
│ ├── Chart.yaml
│ └── values.yaml
│ ├── grafana
│ ├── Chart.yaml
│ ├── templates
│ │ └── datasources.yaml
│ └── values.yaml
│ ├── prometheus
│ ├── Chart.yaml
│ └── values.yaml
│ └── services
│ ├── Chart.yaml
│ ├── templates
│ └── service.yaml
│ └── values.yaml
├── scripts
├── docker-machine
│ ├── README.md
│ ├── connect.sh
│ ├── disconnect.sh
│ ├── purge.sh
│ └── reboot.sh
└── install.sh
└── storage
├── crdb
├── README.md
├── main.tf
├── node
│ ├── main.tf
│ ├── providers.tf
│ └── vars.tf
└── providers.tf
└── minio
├── README.md
├── main.tf
├── node
├── main.tf
├── providers.tf
└── vars.tf
├── outputs.tf
└── providers.tf
/.gitignore:
--------------------------------------------------------------------------------
1 | # cloud-init
2 | cloud-init/generated/
3 |
4 | # terraform
5 | .terraform/
6 | .terraform.lock.hcl
7 | terraform.tfstate
8 | terraform.tfstate.backup
9 |
10 | # k8s
11 | *.tgz
12 | Chart.lock
13 | secrets.yaml
14 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Mya Pitzeruse
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | clean:
2 | rm -rf cloud-init/generated/
3 |
4 | VALUES ?= values
5 | .helm:
6 | @helm upgrade --atomic --create-namespace -i $(NAME) $(CHART) -n $(NAMESPACE) -f $(CHART)/$(VALUES).yaml
7 |
8 | # CLOUD-INIT
9 |
10 | cloud-init: cloud-init/generated
11 | cloud-init/generated: cloud-init/templates/* cloud-init/generate.sh
12 | @bash cloud-init/generate.sh
13 |
14 | # STORAGE
15 |
16 | crdb:
17 | cd storage/crdb && \
18 | terraform init && \
19 | terraform apply
20 |
21 | minio:
22 | cd storage/minio && \
23 | terraform init && \
24 | terraform apply
25 |
26 | # COMPUTE
27 |
28 | k3s:
29 | cd compute/k3s && \
30 | terraform init && \
31 | terraform apply
32 |
33 | # WORKLOADS
34 |
35 | k8s/services:
36 | make .helm NAMESPACE=default NAME=services CHART=./compute/workloads/services
37 |
38 | k8s/aetherfs:
39 | make .helm NAMESPACE=default NAME=aetherfs CHART=./compute/workloads/aetherfs VALUES=secrets
40 |
41 | k8s/cert-manager:
42 | make .helm NAMESPACE=cert-manager NAME=cert-manager CHART=./compute/workloads/cert-manager
43 |
44 | k8s/grafana:
45 | make .helm NAMESPACE=monitoring NAME=grafana CHART=./compute/workloads/grafana VALUES=secrets
46 |
47 | k8s/prometheus:
48 | make .helm NAMESPACE=monitoring NAME=prometheus CHART=./compute/workloads/prometheus
49 |
50 | k8s: k8s/services k8s/cert-manager k8s/prometheus k8s/grafana
51 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # rpi-cloud-init
2 |
3 | This repository supports the initialization of my at-home cloud.
4 |
5 | My goal is to build a modular, relatively low-power system.
6 |
7 | ## Hardware
8 |
9 | - 1x 2013 Apple Mini
10 | - **Operating System / Architecture:** Ubuntu 20.04 / amd64
11 | - **Specs:** 8 CPU, 16GB RAM, 512GB Disk (eventually 1TB)
12 | - **Media:**
13 | - 512GB flash drive (USB 3.0)
14 | - **Services**: MinIO, k3s (server)
15 | - 3x Raspberry Pi 4
16 | - **Operating System / Architecture:** Ubuntu 20.04 / arm64
17 | - **Specs:** 4 CPU, 4GB RAM, 32GB Disk
18 | - **Media:**
19 | - 512GB flash drive (USB 3.0)
20 | - **Services**: CockroachDB
21 | - 8x Raspberry Pi 3b+
22 | - **Operating System / Architecture:** Ubuntu 20.04 / arm64
23 | - **Specs:** 4 CPU, 1GB RAM, 32GB Disk
24 | - **Services**: k3s (agent)
25 |
26 | ## Cluster
27 |
28 | - Provisioning
29 | - [cloud-init](cloud-init) - Machine initialization
30 | - [terraform](https://www.terraform.io/) - Declarative provisioning
31 | - [cert-manager](compute/workloads/cert-manager) - Certificate management
32 | - Storage
33 | - [CockroachDB](storage/crdb) - Relational database management system
34 | - [MinIO](storage/minio) - Erasure coded, small-blob storage
35 | - Compute
36 | - [docker](https://www.docker.com/) / [docker-machine](scripts/docker-machine) - Containerization (deprecate)
37 | - [k3s](compute/k3s) - Extendable compute cluster
38 | - To do:
39 | - containerd - Containerization
40 | - Observability
41 | - [prometheus](compute/workloads/prometheus) - Data collection and storage
42 | - [grafana](compute/workloads/grafana) - Data visualization
43 |
44 |
45 |
46 | ## Workloads
47 |
48 | - [services](compute/workloads/services) - Aliases for out-of-cluster services
49 | - [redis](https://redis.io) - General purpose, caching solution
50 | - [homestead](https://github.com/mjpitz/homestead) - Index builders that help manage my homestead
51 | - [varys](https://github.com/mjpitz/varys) - Secret engine and privileged access management
52 |
--------------------------------------------------------------------------------
/assets/cluster.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mjpitz/rpi-cloud-init/18324be9437e9b11e3d33f0903e53a6b15bb8067/assets/cluster.png
--------------------------------------------------------------------------------
/blueprints/network-bridge.md:
--------------------------------------------------------------------------------
1 | # Network Bridge
2 |
3 | Honestly, I'm not sure if this should be a `README.md` or a blog post.
4 | So I figured why not both?
5 |
6 | It doesn't happen a lot, but every so often I come across a device that isn't wi-fi supported.
7 | This latest case was my security system.
8 | On one hand, I like that my cameras aren't taking up bandwidth on my home network and that the system is largely a closed loop.
9 | On the other hand, not having access to my security system without having it tethered into the router is a bit of a pain.
10 | For one, my home networking setup isn't that elegant (yet).
11 | Second, the last thing I want to do is have more stuff out in the open, co-located with my router.
12 | So I decided to get a little creative.
13 | Sure, I could've bought a wi-fi adapter, but where's the fun in that.
14 | On top of that, I had some other reasons:
15 |
16 | 1. I didn't know if an adapter would work for this system.
17 | 1. I eventually want to do some real-time processing of video feed data and didn't want to stream that over the network.
18 | 1. I already have more than a dozen pis around (I wound up using a 3b+ for this).
19 |
20 | Today, I describe how I set up and configured a Raspberry Pi to act as a WAN client for connected devices.
21 | There are a handful of similar guides out there, but finding one for this specific direction / configuration was difficult.
22 | Similar guides I used as reference:
23 |
24 | - [Setting up a Raspberry Pi as a bridged wireless access point](https://www.raspberrypi.org/documentation/configuration/wireless/access-point-bridged.md)
25 | - [Setting up a Raspberry Pi as a routed wireless access point](https://www.raspberrypi.org/documentation/configuration/wireless/access-point-routed.md)
26 | - [Setting up a Raspberry Pi as an access point](https://raspberrypi.stackexchange.com/questions/88214/setting-up-a-raspberry-pi-as-an-access-point-the-easy-way)
27 | - [Raspberry Pi 4 Model B WiFi Ethernet Bridge](https://willhaley.com/blog/raspberry-pi-wifi-ethernet-bridge/)
28 |
29 | Of these guides, "Raspberry Pi 4 Model B WiFi Ethernet Bridge" is the closest.
30 | Unlike many of these guides, I run an Ubuntu arm64 image instead of Raspbian OS.
31 | The process is largely the same, but some tooling is different.
32 | For example, we need to work with netplan (which can be a pain depending on what you're trying to do).
33 | In addition to that, we have a slightly higher resource footprint, but thats not the biggest concern for what this little one is doing.
34 | To help provide a little context as to what's going on here, I've put together this diagram:
35 |
36 | [](https://mermaid-js.github.io/mermaid-live-editor/edit##eyJjb2RlIjoiZ3JhcGggVERcbiAgICBjMShDYW1lcmEgMSlcbiAgICBjMihDYW1lcmEgMilcbiAgICBjLihDYW1lcmEgLi4uKVxuICAgIGNuKENhbWVyYSBOKVxuICAgIGh1YihTZWN1cml0eSBIdWIgPGJyPiBldGgwIC0gMTkyLjE2OC4xMC4yKVxuICAgIHBpKFJhc3BiZXJyeSBQaSA8YnI-IHdsYW4wIC0gMTkyLjE2OC40LjMwIDxicj4gZXRoMCAtIDE5Mi4xNjguMTAuMSlcbiAgICBtZShNeWEncyBMYXB0b3AgPGJyPiB3bGFuMCAtIDE5Mi4xNjguNC4xMClcbiAgICByb3V0ZXIoV2ktZmkgUm91dGVyKVxuXG4gICAgcm91dGVyIC0uLSBtZVxuICAgIHJvdXRlciAtLi0gcGlcblxuICAgIHBpIC0tLSBodWJcbiAgXG4gICAgaHViIC0tLSBjMVxuICAgIGh1YiAtLS0gYzJcbiAgICBodWIgLS0tIGMuXG4gICAgaHViIC0tLSBjblxuIiwibWVybWFpZCI6IntcbiAgXCJ0aGVtZVwiOiBcImRlZmF1bHRcIlxufSIsInVwZGF0ZUVkaXRvciI6ZmFsc2UsImF1dG9TeW5jIjp0cnVlLCJ1cGRhdGVEaWFncmFtIjpmYWxzZX0)
37 |
38 | Alt-text for diagram:
39 | - At the top, there's a wi-fi router that's connected to the public internet who's `wlan0` interface holds the `192.168.4.1` IP address.
40 | - Connected to the router over wi-fi are two machines.
41 | - First is Mya's laptop who's `wlan0` interface holds the `192.168.4.10` IP address.
42 | - Second is a Raspberry Pi board who's `wlan0` interface holds the `192.168.4.30` IP address
43 | and who's `eth0` interface holds the `192.168.10.1` IP address.
44 | - The Security hub connects to the Raspberry Pi's using an ethernet cable.
45 | - Some number of cameras connect to the security hub using a cable.
46 |
47 | ## Flash Image
48 |
49 | For simplicity, I used my `cloud-init` base from this repo to flash my Raspberry Pi (w/ wi-fi access).
50 | This gives it a similar look and feel to many of the other microcomputers I have around the house.
51 | In addition to that, I get some reasonable default security configurations with that setup (i.e. private key access, no root, custom user), etc.
52 | Note that in flashing this image, we should setup wi-fi so the Raspberry Pi can connect to the internet without ethernet.
53 | To get a machine up and running, follow Steps 1 - 4 on the projects [README.md](../README.md).
54 |
55 | Once the board is running, we'll need to make some additional modifications to the networking setup.
56 | The `network-config` provided in the `cloud-init` setup generates `/etc/netplan/50-cloud-init.yaml`.
57 | We will be creating a new `/etc/netplan/60-static-eth0.yaml` file with the following contents.
58 |
59 | ```yaml
60 | network:
61 | version: 2
62 | ethernets:
63 | eth0:
64 | dhcp4: false
65 | dhcp6: false
66 | addresses: [192.168.10.1/24]
67 | gateway4: 192.168.10.1
68 | nameservers:
69 | addresses: [192.168.10.1]
70 | ```
71 |
72 | This overrides the configuration we provided initially and sets up a private, static IP block for connected devices to use.
73 | We really only need one here, but allocating a block may be useful at some point later on.
74 | With the new file, we need to generate and apply the changes.
75 |
76 | ```
77 | $ sudo netplan generate
78 | $ sudo netplan apply
79 | ```
80 |
81 | We can verify the changes by inspecting the `ifconfig` (you may need to `apt-get install net-tools`).
82 |
83 | ```
84 | $ ifconfig
85 | mjpitz@ip-192-168-4-74:~$ ifconfig
86 | eth0: flags=4163 mtu 1500
87 | inet 192.168.10.1 netmask 255.255.255.0 broadcast 192.168.10.255 ###### expected change
88 |
89 | lo: flags=73 mtu 65536
90 | inet 127.0.0.1 netmask 255.0.0.0
91 |
92 | wlan0: flags=4163 mtu 1500
93 | inet 192.168.4.30 netmask 255.255.255.0 broadcast 192.168.4.255
94 |
95 | ```
96 |
97 | ## Software
98 |
99 | In order get this to work, I needed to install some basic software components.
100 |
101 | - `isc-dhcp-server` is responsible for providing configuration to machines on the underlying network.
102 | - `dnsmasq` does a lot of the heavy lifting by acting as a DNS cache server and router.
103 | - `netfilter-persistent` helps preserve firewall rules and restoring them on reboots.
104 | - `iptables-persistent` helps preserve iptable rules and restoring them on reboots.
105 |
106 | ```
107 | $ sudo apt-get update -y && sudo apt-get install -y isc-dhcp-server dnsmasq
108 | $ sudo DEBIAN_FRONTEND=noninteractive apt install -y netfilter-persistent iptables-persistent
109 | ```
110 |
111 | Once installed, we'll need to configure each system.
112 | Note, dnsmasq may be having trouble starting up.
113 | This is OK, we'll fix it.
114 |
115 | ### dhcp
116 |
117 | First up, let's configure dhcp.
118 | For some reason, I couldn't get the system to work without this installed.
119 | I know dnsmasq supports dhcp, but without this process I was getting an error and couldn't obtain a network address.
120 |
121 | ```
122 | dnsmasq-dhcp[3685]: DHCP packet received on eth0 which has no address
123 | ```
124 |
125 | First, we'll want to modify `/etc/default/isc-dhcp-server` to point `INTERFACESv4="eth0"`.
126 | This will ensure that the dhcp server responds to request (similar to how your wi-fi router responds to these requests).
127 | Next we need to configure the dhcp server to know about the subnet that we're allocating to it.
128 | To do this, we'll edit `/etc/dhcp/dhcpd.conf` to contain the following contents:
129 |
130 | ```
131 | # communicates that the Raspberry Pi will act as a router for requests.
132 | host router {
133 | hardware ethernet "mac of eth0, obtained from ifconfig.eth0.ether attribute";
134 | fixed-address 192.168.10.1;
135 | }
136 |
137 | # communicates how to manage the associated subnet(s)
138 | subnet 192.168.10.0 netmask 255.255.255.0 {
139 | range 192.168.10.2 192.168.10.254;
140 | option routers 192.168.10.1;
141 | option dns-name-servers 192.168.10.1;
142 | }
143 | ```
144 |
145 | Once dhcp has been configured, you'll need to restart the service.
146 |
147 | ```
148 | $ sudo service isc-dhcp-server restart
149 | ```
150 |
151 | Once the dhcp server has been restarted, it should be operating with your new configuration.
152 | We can verify that it's running properly using `sudo service isc-dhcp-server status` or by tailing logs with `sudo journalctl -u isc-dhcp-server`.
153 |
154 | ### dnsmasq
155 |
156 | Next we're going to configure dnsmasq.
157 | This is responsible for the core configuration logic.
158 | In the default configuration, dnsmasq conflicts with systemd-resolved (which is why the service likely couldn't start).
159 | We'll be modifying its configuration to bind dnsmasq to eth0 allowing it to respond to requests.
160 | To do so, we'll backup the existing configuration and create a new file.
161 |
162 | ```
163 | $ sudo mv /etc/dnsmasq.conf /etc/dnsmasq.conf.orig
164 | ```
165 |
166 | Next, we'll create a new `/etc/dnsmasq.conf` with the following content.
167 |
168 | ```
169 | interface=eth0 # what interface to bind to.
170 | listen-address=192.168.10.1 # what addresses to listen on.
171 | bind-interfaces # binds to all interfaces, even if we're only listening on some.
172 | server=192.168.4.1 # sets the upstream router as a dns server for delegation.
173 | dhcp-range=192.168.10.2,192.168.10.254,12h # configures the lease time for dhcp requests.
174 | ```
175 |
176 | Once we configure dnsmasq, we'll need to restart it for our changes to take effect.
177 |
178 | ```
179 | $ sudo service dnsmasq restart
180 | ```
181 |
182 | Once restarted, dnsmasq should come up without an issue.
183 | We can check its status using `sudo service dnsmasq status` or by tailing logs with `sudo journalctl -u dnsmasq`.
184 | Before requests can successfully pass through the dnsmasq, we need to configure some lower level networking.
185 |
186 | ### (GNU/?)Linux networking
187 |
188 | The last part of this configuration requires modifying the (GNU/?)Linux networking components.
189 | I say (GNU/?)Linux because I'm not entirely sure where iptables falls in that delineation.
190 | Gut feeling is Linux, but I've never actually gone down that rabbit whole to verify.
191 |
192 | To do this, we'll first want to configure the kernel to do packet forwarding for IPv4.
193 | Open `/etc/sysctl.conf`, uncomment the line containing `net.ipv4.ip_forward=1`, and save.
194 | To reload the configuration without a full system reboot, run the following command.
195 |
196 | ```
197 | $ sudo sysctl --system
198 | ```
199 |
200 | Next, we'll instruct IP tables to allow masquerading over the wlan0 interface.
201 | This allows requests to pass through the network interfaces with very little software in between.
202 | Once modified, we'll need to persist changes using `netfilter-persistent` so changes persist between reboots.
203 |
204 | ```
205 | $ sudo iptables -t nat -A POSTROUTING -o wlan0 -j MASQUERADE
206 | $ sudo netfilter-persistent save
207 | ```
208 |
209 | ### Verifying client
210 |
211 | Once all of this is done, traffic should flow from the connected device through to other devices on the network or internet.
212 | We can verify this by connecting a device to the Raspberry Pi.
213 | It should successfully negotiate an IP address from the dchp server (likely `192.168.10.2`).
214 | In addition to that, we should be able to make requests from the client (i.e. the security system) to other devices on your network or internet.
215 |
216 | Once I connected my system, I saw that it successfully obtain an IP from the server using the UI they provide.
217 | After, I sent a test email to make sure the request would successfully go through.
218 | While doing this, I brought up a terminal on the Raspberry Pi to watch the traffic flow (`sudo tcpdump -X -i eth0`).
219 | At this point, I'm leagues ahead of where I was before.
220 | When I went to set up cloud backups, I learned that needed to be done through their mobile app.
221 | As a result, I needed to expose the client application ports through the Raspberry Pi in order to connect from my devices.
222 |
223 | ### Exposing ports
224 |
225 | The system I bought exposed two ports of interest to me.
226 | `9000` provides a media application that can interface with their mobile application.
227 | And `554`, which exposes a real-time streaming protocol (RTSP) that allows for remote observation of cameras.
228 | To quickly proxy these ports, we can use our good ole friend `iptables` again.
229 | By executing the following commands, we can successfully route requests from a port on the Raspberry Pi to a port on the client.
230 | Note, `${CLIENT_IP}` should the IP your client obtained.
231 | `${PORT}` would be the port you want to expose.
232 |
233 | ```
234 | $ sudo iptables -A PREROUTING -t nat -i wlan0 -p tcp --dport ${PORT} -j DNAT --to ${CLIENT_IP}:${PORT}
235 | $ sudo iptables -A FORWARD -p tcp -d ${CLIENT_IP} --dport ${PORT} -j ACCEPT
236 | ```
237 |
238 | Note that in my case, the RTSP port needed to also be exposed over `udp` for better streaming support.
239 | If you're exposing ports through a Raspberry Pi, you'll need to verify what protocols to expose them over.
240 | Also, don't forget to save your updated IP table rules, otherwise changes will be lost on reboot.
241 |
242 | ```
243 | $ sudo netfilter-persistent save
244 | ```
245 |
246 | ## Conclusion
247 |
248 | That was it.
249 | Once everything was setup, I was able to connect to the real-time stream from my devices and monitor my whole system.
250 | The mobile application also connected to the security system fine and was able to let me configure cloud backups.
251 | While this is where I stop today, I have some more plans for this in the future.
252 | While I don't expect this to become a regular pattern I'll deploy, it was extremely useful to set up in this case.
253 |
--------------------------------------------------------------------------------
/cloud-init/README.md:
--------------------------------------------------------------------------------
1 | # cloud-init
2 |
3 | Provides machine initialization through cloud-init.
4 |
5 | Ideally, these are relatively small configs to allow for quick starts.
6 | Nothing fancy here, just a little `bash` and `envsubst`.
7 |
8 | ## 1 - Obtain a base image
9 |
10 | I use [Ubuntu 18.04](https://ubuntu.com/download/raspberry-pi).
11 |
12 | ## 2 - Generate cloud-init
13 |
14 | Using `envsubst`, I created a few templates based on files I found in the `system-boot` partition. To generate a file
15 | for each node, simply run the `make` target.
16 |
17 | ```
18 | $ WIFI_SSID=name WIFI_PASSWORD=password make cloud-init
19 | generating 192.168.1.50
20 | generating 192.168.1.51
21 | generating 192.168.1.52
22 | generating 192.168.1.53
23 | generating 192.168.1.54
24 | generating 192.168.1.60
25 | generating 192.168.1.61
26 | generating 192.168.1.62
27 | generating 192.168.1.63
28 | generating 192.168.1.64
29 | generating 192.168.1.70
30 | generating 192.168.1.71
31 | generating 192.168.1.72
32 | generating 192.168.1.73
33 | generating 192.168.1.74
34 | ```
35 |
36 | The output of this script is a directory full of configurations for your each host. I compute host names from the
37 | assigned static IP address and prefix it with `ip-` as such.
38 |
39 | ```
40 | $ ls -1 cloud-init/generated/
41 | ip-192-168-1-50
42 | ip-192-168-1-51
43 | ip-192-168-1-52
44 | ip-192-168-1-53
45 | ip-192-168-1-54
46 | ip-192-168-1-60
47 | ip-192-168-1-61
48 | ip-192-168-1-62
49 | ip-192-168-1-63
50 | ip-192-168-1-64
51 | ip-192-168-1-70
52 | ip-192-168-1-71
53 | ip-192-168-1-72
54 | ip-192-168-1-73
55 | ip-192-168-1-74
56 | ```
57 |
58 | Once your configuration has been generated, you can start to work on flashing your base image.
59 |
60 | ## 3 - Flashing image
61 |
62 | 1. Using [balenaEtcher](https://www.balena.io/etcher), flash the base image onto your SD card.
63 | 2. Once complete, copy the files from the generated host directory to the `system-boot` partition.
64 | 3. After copying the files to `system-boot`, you're safe to eject the SD card.
65 |
66 | ## 4 - Booting up
67 |
68 | Once all the flash drives are configured, you should be able to boot them up. I start wired for the initial boot up
69 | because I ran into issues with cloud-init not initializing properly going through wifi. I was able to reset cloud-init
70 | and force a rerun which resolved it. It just required a little more work.
71 |
--------------------------------------------------------------------------------
/cloud-init/generate.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd)"
4 |
5 | export NETWORK_IP=${NETWORK_IP:-192.168.4.1}
6 | readonly ip_prefix=${NETWORK_IP%.1}
7 |
8 | export ADMIN_USER=${ADMIN_USER:-mjpitz}
9 |
10 | readonly default_ssh_public_key_path="${HOME}/.ssh/id_rsa.pub"
11 | readonly ssh_public_key_path="${SSH_PUBLIC_KEY_PATH:-"$default_ssh_public_key_path"}"
12 | readonly ssh_public_key=$(cat "${ssh_public_key_path}")
13 |
14 | function generate_host() {
15 | static_ip="${1}"
16 | host="ip-${static_ip//./-}"
17 |
18 | templates="${SCRIPT_DIR}/templates"
19 | generated="${SCRIPT_DIR}/generated/${host}"
20 | mkdir -p "${generated}"
21 |
22 | # WIFI_SSID=""
23 | # WIFI_PASSWORD=""
24 |
25 | export SSH_PUBLIC_KEY="${ssh_public_key}"
26 | export HOST="${host}"
27 | export STATIC_IP="${static_ip}"
28 | export UPTIME='$UPTIME'
29 |
30 | envsubst < ${templates}/network-config.yaml > ${generated}/network-config
31 | envsubst < ${templates}/user-data.yaml > ${generated}/user-data
32 | envsubst < ${templates}/ssh > ${generated}/ssh
33 | envsubst < ${templates}/cmdline.txt > ${generated}/cmdline.txt
34 | }
35 |
36 | function generate_zone() {
37 | prefix=${1}
38 | start=${2}
39 | end=${3}
40 |
41 | for i in $(seq ${start} ${end}); do
42 | static_ip="${prefix}.${i}"
43 | echo "generating ${static_ip}"
44 | generate_host "${static_ip}"
45 | done
46 | }
47 |
48 | generate_zone ${ip_prefix} 30 30
49 | generate_zone ${ip_prefix} 50 54
50 | generate_zone ${ip_prefix} 60 64
51 | generate_zone ${ip_prefix} 70 74
52 |
--------------------------------------------------------------------------------
/cloud-init/templates/cmdline.txt:
--------------------------------------------------------------------------------
1 | net.ifnames=0 dwc_otg.lpm_enable=0 console=serial0,115200 console=tty1 root=LABEL=writable rootfstype=ext4 elevator=deadline rootwait fixrtc cgroup_enable=memory cgroup_memory=1
2 |
--------------------------------------------------------------------------------
/cloud-init/templates/network-config.yaml:
--------------------------------------------------------------------------------
1 | # This file contains a netplan-compatible configuration which cloud-init
2 | # will apply on first-boot. Please refer to the cloud-init documentation and
3 | # the netplan reference for full details:
4 | #
5 | # https://cloudinit.readthedocs.io/
6 | # https://netplan.io/reference
7 | #
8 | # Some additional examples are commented out below
9 |
10 | version: 2
11 | ethernets:
12 | eth0:
13 | dhcp4: true
14 | optional: true
15 | wifis:
16 | wlan0:
17 | dhcp4: false
18 | dhcp6: false
19 | addresses: [${STATIC_IP}/24]
20 | gateway4: ${NETWORK_IP}
21 | nameservers:
22 | addresses: [${NETWORK_IP}]
23 | access-points:
24 | "${WIFI_SSID}":
25 | password: "${WIFI_PASSWORD}"
26 |
--------------------------------------------------------------------------------
/cloud-init/templates/ssh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mjpitz/rpi-cloud-init/18324be9437e9b11e3d33f0903e53a6b15bb8067/cloud-init/templates/ssh
--------------------------------------------------------------------------------
/cloud-init/templates/user-data.yaml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 |
3 | ## TEMAPLATE FILE FOR POPULATING USER-DATA
4 | ##
5 | ## VARIABLES
6 | ## - ADMIN_USER=
7 | ## - SSH_PUBLIC_KEY=
8 | ## - HOST=
9 | ## - STATIC_IP=
10 |
11 | ---
12 |
13 | ## Users and groups
14 | chpasswd:
15 | expire: false
16 | list:
17 | - ${ADMIN_USER}:${ADMIN_USER}
18 |
19 | users:
20 | - name: ${ADMIN_USER}
21 | sudo: ALL=(ALL) NOPASSWD:ALL
22 | shell: /bin/bash
23 | lock_passwd: true
24 | ssh_authorized_keys:
25 | - "${SSH_PUBLIC_KEY}"
26 |
27 | ## Configure hostname
28 | preserve_hostname: false
29 | fqdn: ${HOST}
30 | hostname: ${HOST}
31 |
32 | ## Preconfigure docker logging to rotate
33 | write_files:
34 | - path: /etc/docker/daemon.json
35 | content: |
36 | {
37 | "log-driver": "json-file",
38 | "log-opts": {
39 | "max-size": "10m",
40 | "max-file": "3"
41 | }
42 | }
43 |
44 | # Disable until we set up the network
45 | apt_upgrade: false
46 | apt_update: false
47 |
48 | runcmd:
49 | - swapoff -a # kubernetes needs swap off
50 | - netplan apply # force application of network-config
51 | - apt-get update -y # now update
52 | - apt-get upgrade -y # and upgrade
53 |
54 | final_message: "The system is finally up after $UPTIME seconds"
55 |
--------------------------------------------------------------------------------
/compute/k3s/README.md:
--------------------------------------------------------------------------------
1 | # k3s
2 |
3 | Provides the primary compute mechanism for my home cluster.
4 |
5 | This is currently a mix of `amd64` and `arm64` machines that I had around the house.
6 |
7 | ```
8 | cd compute/k3s
9 |
10 | terraform init
11 |
12 | terraform plan
13 |
14 | terraform apply --auto-approve
15 | terraform destroy --auto-approve
16 | ```
17 |
--------------------------------------------------------------------------------
/compute/k3s/agent/main.tf:
--------------------------------------------------------------------------------
1 | data "template_file" "config" {
2 | template = </dev/null\""
14 | }
15 | }
16 |
17 | data "docker_registry_image" "k3s" {
18 | name = var.k3s_image
19 | }
20 |
21 | resource "docker_image" "k3s" {
22 | name = data.docker_registry_image.k3s.name
23 | keep_locally = true
24 | pull_triggers = [data.docker_registry_image.k3s.name]
25 | }
26 |
27 | resource "docker_container" "k3s" {
28 | depends_on = [null_resource.config]
29 |
30 | name = "k3s"
31 | hostname = var.hostname
32 | network_mode = "host"
33 | image = docker_image.k3s.name
34 | restart = "unless-stopped"
35 |
36 | command = [
37 | "agent",
38 | "--node-name",
39 | var.hostname,
40 | ]
41 |
42 | publish_all_ports = true
43 |
44 | volumes {
45 | container_path = "/etc/rancher/k3s/config.yaml"
46 | host_path = "/etc/rancher/k3s/config.yaml"
47 | }
48 |
49 | volumes {
50 | container_path = "/lib/modules"
51 | host_path = "/lib/modules"
52 | }
53 |
54 | privileged = true
55 |
56 | tmpfs = {
57 | "/run" : "",
58 | "/var/run" : "",
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/compute/k3s/agent/providers.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | docker = {
4 | source = "kreuzwerker/docker"
5 | version = "~> 2.13.0"
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/compute/k3s/agent/vars.tf:
--------------------------------------------------------------------------------
1 | variable "hostname" {
2 | type = string
3 | }
4 |
5 | variable "ip" {
6 | type = string
7 | }
8 |
9 | variable "token" {
10 | type = string
11 | }
12 |
13 | variable "server" {
14 | type = string
15 | }
16 |
17 | variable "region" {
18 | type = string
19 | default = ""
20 | }
21 |
22 | variable "zone" {
23 | type = string
24 | default = ""
25 | }
26 |
27 | variable "k3s_image" {
28 | type = string
29 | }
30 |
--------------------------------------------------------------------------------
/compute/k3s/fetch-config.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | which_sed="sed"
4 | if [[ "$(uname -s)" == "Darwin" ]]; then
5 | echo "on darwin, using gsed"
6 | which_sed="gsed"
7 | fi
8 |
9 | readonly REGION="${REGION:-mya}"
10 | readonly SERVER_IP="${SERVER_IP:-192.168.4.30}"
11 | readonly SERVER_HOST="ip-${SERVER_IP//./-}"
12 |
13 |
14 | config_file="${HOME}/.kube/${REGION}.yaml"
15 | rm -rf ${config_file}
16 |
17 | eval $(docker-machine env ${SERVER_HOST})
18 | docker cp k3s:/etc/rancher/k3s/k3s.yaml "${config_file}"
19 |
20 | ${which_sed} -i "s/127.0.0.1/${SERVER_IP}/g" "${config_file}"
21 | ${which_sed} -i "s/localhost/${SERVER_IP}/g" "${config_file}"
22 | ${which_sed} -i "s/default/${REGION}/g" "${config_file}"
23 |
--------------------------------------------------------------------------------
/compute/k3s/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | backend "s3" {
3 | skip_credentials_validation = true
4 | skip_metadata_api_check = true
5 | endpoint = "https://nyc3.digitaloceanspaces.com"
6 | region = "us-east-1"
7 | bucket = "mya-tfstate"
8 | key = "infra/home/compute/k3s/terraform.tfstate"
9 | }
10 | }
11 |
12 | resource "random_password" "token" {
13 | length = 32
14 | special = false
15 | upper = true
16 | lower = true
17 | number = true
18 | }
19 |
20 | locals {
21 | server = "https://192.168.4.30:6443"
22 |
23 | k3s_server_image = "${var.k3s_image}:${var.k3s_server_tag}"
24 | k3s_agent_image = "${var.k3s_image}:${var.k3s_agent_tag}"
25 |
26 | region = "mya"
27 | zone_0 = "mya-0"
28 | zone_1 = "mya-1"
29 | zone_2 = "mya-2"
30 | }
31 |
--------------------------------------------------------------------------------
/compute/k3s/providers.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | docker = {
4 | source = "kreuzwerker/docker"
5 | version = "~> 2.13.0"
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/compute/k3s/server/main.tf:
--------------------------------------------------------------------------------
1 | data "template_file" "config" {
2 | template = </dev/null\""
20 | }
21 | }
22 |
23 | data "docker_registry_image" "k3s" {
24 | name = var.k3s_image
25 | }
26 |
27 | resource "docker_image" "k3s" {
28 | name = data.docker_registry_image.k3s.name
29 | keep_locally = true
30 | pull_triggers = [data.docker_registry_image.k3s.name]
31 | }
32 |
33 | resource "docker_container" "k3s" {
34 | depends_on = [null_resource.config]
35 |
36 | name = "k3s"
37 | hostname = var.hostname
38 | network_mode = "host"
39 | image = docker_image.k3s.name
40 | restart = "unless-stopped"
41 |
42 | command = [
43 | "server",
44 | "--node-name",
45 | var.hostname,
46 | ]
47 |
48 | publish_all_ports = true
49 |
50 | volumes {
51 | container_path = "/etc/rancher/k3s"
52 | host_path = "/etc/rancher/k3s"
53 | }
54 |
55 | volumes {
56 | container_path = "/lib/modules"
57 | host_path = "/lib/modules"
58 | }
59 |
60 | volumes {
61 | container_path = "/var/lib/rancher/k3s"
62 | host_path = "/var/lib/rancher/k3s"
63 | }
64 |
65 | privileged = true
66 |
67 | tmpfs = {
68 | "/run" : "",
69 | "/var/run" : "",
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/compute/k3s/server/providers.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | docker = {
4 | source = "kreuzwerker/docker"
5 | version = "~> 2.13.0"
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/compute/k3s/server/vars.tf:
--------------------------------------------------------------------------------
1 | variable "hostname" {
2 | type = string
3 | }
4 |
5 | variable "ip" {
6 | type = string
7 | }
8 |
9 | variable "token" {
10 | type = string
11 | }
12 |
13 | variable "region" {
14 | type = string
15 | default = ""
16 | }
17 |
18 | variable "zone" {
19 | type = string
20 | default = ""
21 | }
22 |
23 | variable "k3s_image" {
24 | type = string
25 | }
26 |
--------------------------------------------------------------------------------
/compute/k3s/terraform.tfvars:
--------------------------------------------------------------------------------
1 | k3s_server_tag = "v1.23.2-k3s1"
2 | k3s_agent_tag = "v1.23.2-k3s1"
3 |
--------------------------------------------------------------------------------
/compute/k3s/vars.tf:
--------------------------------------------------------------------------------
1 | variable "k3s_image" {
2 | type = string
3 | default = "rancher/k3s"
4 | }
5 |
6 | variable "k3s_server_tag" {
7 | type = string
8 | }
9 |
10 | variable "k3s_agent_tag" {
11 | type = string
12 | }
13 |
--------------------------------------------------------------------------------
/compute/k3s/zone-0.tf:
--------------------------------------------------------------------------------
1 | provider "docker" {
2 | alias = "ip-192-168-4-30"
3 | host = "tcp://192.168.4.30:2376"
4 | cert_path = pathexpand("~/.docker/machine/machines/ip-192-168-4-30")
5 | }
6 |
7 | module "ip-192-168-4-30" {
8 | source = "./server"
9 | providers = {
10 | docker = docker.ip-192-168-4-30
11 | }
12 |
13 | hostname = "ip-192-168-4-30"
14 | ip = "192.168.4.30"
15 | token = random_password.token.result
16 | region = local.region
17 | zone = local.zone_0
18 | k3s_image = local.k3s_server_image
19 | }
20 |
--------------------------------------------------------------------------------
/compute/k3s/zone-1.tf:
--------------------------------------------------------------------------------
1 | provider "docker" {
2 | alias = "ip-192-168-4-51"
3 | host = "tcp://192.168.4.51:2376"
4 | cert_path = pathexpand("~/.docker/machine/machines/ip-192-168-4-51")
5 | }
6 |
7 | module "ip-192-168-4-51" {
8 | depends_on = [module.ip-192-168-4-30]
9 |
10 | source = "./agent"
11 | providers = {
12 | docker = docker.ip-192-168-4-51
13 | }
14 |
15 | hostname = "ip-192-168-4-51"
16 | ip = "192.168.4.51"
17 | token = random_password.token.result
18 | server = local.server
19 | region = local.region
20 | zone = local.zone_1
21 | k3s_image = local.k3s_agent_image
22 | }
23 |
24 | provider "docker" {
25 | alias = "ip-192-168-4-52"
26 | host = "tcp://192.168.4.52:2376"
27 | cert_path = pathexpand("~/.docker/machine/machines/ip-192-168-4-52")
28 | }
29 |
30 | module "ip-192-168-4-52" {
31 | depends_on = [module.ip-192-168-4-30]
32 |
33 | source = "./agent"
34 | providers = {
35 | docker = docker.ip-192-168-4-52
36 | }
37 |
38 | hostname = "ip-192-168-4-52"
39 | ip = "192.168.4.52"
40 | token = random_password.token.result
41 | server = local.server
42 | region = local.region
43 | zone = local.zone_1
44 | k3s_image = local.k3s_agent_image
45 | }
46 |
47 | provider "docker" {
48 | alias = "ip-192-168-4-53"
49 | host = "tcp://192.168.4.53:2376"
50 | cert_path = pathexpand("~/.docker/machine/machines/ip-192-168-4-53")
51 | }
52 |
53 | module "ip-192-168-4-53" {
54 | depends_on = [module.ip-192-168-4-30]
55 |
56 | source = "./agent"
57 | providers = {
58 | docker = docker.ip-192-168-4-53
59 | }
60 |
61 | hostname = "ip-192-168-4-53"
62 | ip = "192.168.4.53"
63 | token = random_password.token.result
64 | server = local.server
65 | region = local.region
66 | zone = local.zone_1
67 | k3s_image = local.k3s_agent_image
68 | }
69 |
70 | provider "docker" {
71 | alias = "ip-192-168-4-54"
72 | host = "tcp://192.168.4.54:2376"
73 | cert_path = pathexpand("~/.docker/machine/machines/ip-192-168-4-54")
74 | }
75 |
76 | module "ip-192-168-4-54" {
77 | depends_on = [module.ip-192-168-4-30]
78 |
79 | source = "./agent"
80 | providers = {
81 | docker = docker.ip-192-168-4-54
82 | }
83 |
84 | hostname = "ip-192-168-4-54"
85 | ip = "192.168.4.54"
86 | token = random_password.token.result
87 | server = local.server
88 | region = local.region
89 | zone = local.zone_1
90 | k3s_image = local.k3s_agent_image
91 | }
92 |
--------------------------------------------------------------------------------
/compute/k3s/zone-2.tf:
--------------------------------------------------------------------------------
1 | provider "docker" {
2 | alias = "ip-192-168-4-61"
3 | host = "tcp://192.168.4.61:2376"
4 | cert_path = pathexpand("~/.docker/machine/machines/ip-192-168-4-61")
5 | }
6 |
7 | module "ip-192-168-4-61" {
8 | depends_on = [module.ip-192-168-4-30]
9 |
10 | source = "./agent"
11 | providers = {
12 | docker = docker.ip-192-168-4-61
13 | }
14 |
15 | hostname = "ip-192-168-4-61"
16 | ip = "192.168.4.61"
17 | token = random_password.token.result
18 | server = local.server
19 | region = local.region
20 | zone = local.zone_2
21 | k3s_image = local.k3s_agent_image
22 | }
23 |
24 | provider "docker" {
25 | alias = "ip-192-168-4-62"
26 | host = "tcp://192.168.4.62:2376"
27 | cert_path = pathexpand("~/.docker/machine/machines/ip-192-168-4-62")
28 | }
29 |
30 | module "ip-192-168-4-62" {
31 | depends_on = [module.ip-192-168-4-30]
32 |
33 | source = "./agent"
34 | providers = {
35 | docker = docker.ip-192-168-4-62
36 | }
37 |
38 | hostname = "ip-192-168-4-62"
39 | ip = "192.168.4.62"
40 | token = random_password.token.result
41 | server = local.server
42 | region = local.region
43 | zone = local.zone_2
44 | k3s_image = local.k3s_agent_image
45 | }
46 |
47 | provider "docker" {
48 | alias = "ip-192-168-4-63"
49 | host = "tcp://192.168.4.63:2376"
50 | cert_path = pathexpand("~/.docker/machine/machines/ip-192-168-4-63")
51 | }
52 |
53 | module "ip-192-168-4-63" {
54 | depends_on = [module.ip-192-168-4-30]
55 |
56 | source = "./agent"
57 | providers = {
58 | docker = docker.ip-192-168-4-63
59 | }
60 |
61 | hostname = "ip-192-168-4-63"
62 | ip = "192.168.4.63"
63 | token = random_password.token.result
64 | server = local.server
65 | region = local.region
66 | zone = local.zone_2
67 | k3s_image = local.k3s_agent_image
68 | }
69 |
70 | provider "docker" {
71 | alias = "ip-192-168-4-64"
72 | host = "tcp://192.168.4.64:2376"
73 | cert_path = pathexpand("~/.docker/machine/machines/ip-192-168-4-64")
74 | }
75 |
76 | module "ip-192-168-4-64" {
77 | depends_on = [module.ip-192-168-4-30]
78 |
79 | source = "./agent"
80 | providers = {
81 | docker = docker.ip-192-168-4-64
82 | }
83 |
84 | hostname = "ip-192-168-4-64"
85 | ip = "192.168.4.64"
86 | token = random_password.token.result
87 | server = local.server
88 | region = local.region
89 | zone = local.zone_2
90 | k3s_image = local.k3s_agent_image
91 | }
92 |
--------------------------------------------------------------------------------
/compute/workloads/aetherfs/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | type: application
3 | name: aetherfs
4 | version: 0.0.0
5 | dependencies:
6 | - repository: file://../../../aetherfs/deploy/helm/aetherfs
7 | name: aetherfs
8 | version: 0.0.0
9 |
--------------------------------------------------------------------------------
/compute/workloads/aetherfs/values.yaml:
--------------------------------------------------------------------------------
1 | aetherfs:
2 | kind: DaemonSet
3 | image:
4 | repository: mjpitz/aetherfs
5 | tag: 22.02-rc1
6 |
7 | service:
8 | type: ClusterIP
9 | clusterIP: 10.43.7.37
10 |
11 | topologyConstraints:
12 | enabled: true
13 |
14 | nfs:
15 | enabled: true
16 | ui:
17 | enabled: true
18 |
--------------------------------------------------------------------------------
/compute/workloads/cert-manager/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | type: application
3 | name: cert-manager
4 | version: 1.6.1
5 | dependencies:
6 | - repository: https://charts.jetstack.io
7 | name: cert-manager
8 | version: v1.6.1
9 |
--------------------------------------------------------------------------------
/compute/workloads/cert-manager/values.yaml:
--------------------------------------------------------------------------------
1 | cert-manager:
2 | installCRDs: true
3 |
--------------------------------------------------------------------------------
/compute/workloads/grafana/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | type: application
3 | name: grafana
4 | version: 0.0.0
5 | dependencies:
6 | - repository: https://grafana.github.io/helm-charts
7 | name: grafana
8 | version: 6.20.5
9 |
--------------------------------------------------------------------------------
/compute/workloads/grafana/templates/datasources.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.datasources }}
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: datasources
6 | labels:
7 | grafana_datasource: "1"
8 | data:
9 | datasource.yaml: |-
10 | apiVersion: 1
11 | datasources:
12 | {{- .Values.datasources | toYaml | nindent 6 }}
13 |
14 | {{- end }}
15 |
--------------------------------------------------------------------------------
/compute/workloads/grafana/values.yaml:
--------------------------------------------------------------------------------
1 | datasources: []
2 |
3 | grafana:
4 | replicas: 1
5 | rbac:
6 | pspEnabled: false
7 | plugins:
8 | - frser-sqlite-datasource
9 | service:
10 | type: "NodePort"
11 | port: 3000
12 | nodePort: 31814
13 | adminUser: admin
14 | adminPassword: admin
15 | securityContext:
16 | runAsUser: 0
17 | runAsGroup: 0
18 | fsGroup: 0
19 | grafana.ini:
20 | server:
21 | root_url: http://192.168.4.51:31814/
22 | disable_initial_admin_creation: true
23 | auth:
24 | disable_login_form: true
25 | auth.github:
26 | enabled: true
27 | allow_sign_up: true
28 | scopes: user:email,read:org
29 | auth_url: https://github.com/login/oauth/authorize
30 | token_url: https://github.com/login/oauth/access_token
31 | api_url: https://api.github.com/user
32 | users:
33 | auto_assign_org_role: Admin
34 | sidecar:
35 | dashboards:
36 | enabled: true
37 | labelValue: "1"
38 | searchNamespace: "ALL"
39 | datasources:
40 | enabled: true
41 | labelValue: "1"
42 | searchNamespace: "ALL"
43 |
--------------------------------------------------------------------------------
/compute/workloads/prometheus/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | type: application
3 | name: prometheus
4 | version: 0.0.0
5 | dependencies:
6 | - name: prometheus
7 | version: 15.1.3
8 | repository: https://prometheus-community.github.io/helm-charts
9 |
--------------------------------------------------------------------------------
/compute/workloads/prometheus/values.yaml:
--------------------------------------------------------------------------------
1 | prometheus:
2 | alertmanager:
3 | enabled: false
4 | pushgateway:
5 | enabled: false
6 | serverFiles:
7 | prometheus.yml:
8 | scrape_configs:
9 | extraScrapeConfigs: []
--------------------------------------------------------------------------------
/compute/workloads/services/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | type: application
3 | name: services
4 | version: 0.0.0
5 |
--------------------------------------------------------------------------------
/compute/workloads/services/templates/service.yaml:
--------------------------------------------------------------------------------
1 | {{- range $service := .Values.services }}
2 | ---
3 | apiVersion: v1
4 | kind: Service
5 | metadata:
6 | name: {{ $service.name }}
7 | {{- if $service.prometheus.enabled }}
8 | annotations:
9 | prometheus.io/scrape: "true"
10 | prometheus.io/path: {{ $service.prometheus.path | default "/metrics" }}
11 | prometheus.io/port: {{ $service.prometheus.port | quote }}
12 | {{- end }}
13 | spec:
14 | ports:
15 | {{- range $port := $service.ports }}
16 | - protocol: {{ $port.protocol | default "TCP" }}
17 | port: {{ $port.port }}
18 | targetPort: {{ $port.port }}
19 | name: {{ $port.name }}
20 | {{- end }}
21 | ---
22 | apiVersion: v1
23 | kind: Endpoints
24 | metadata:
25 | name: {{ $service.name }}
26 | {{- if $service.prometheus.enabled }}
27 | annotations:
28 | prometheus.io/scrape: "true"
29 | prometheus.io/path: {{ $service.prometheus.path | default "/metrics" }}
30 | prometheus.io/port: {{ $service.prometheus.port | quote }}
31 | {{- end }}
32 | subsets:
33 | - addresses:
34 | {{- range $ip := $service.ips }}
35 | - ip: {{ $ip }}
36 | {{- end }}
37 | ports:
38 | {{- range $port := $service.ports }}
39 | - port: {{ $port.port }}
40 | name: {{ $port.name }}
41 | {{- end }}
42 | {{- end }}
43 |
--------------------------------------------------------------------------------
/compute/workloads/services/values.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | - name: crdb
3 | ips:
4 | - 192.168.4.50
5 | - 192.168.4.60
6 | - 192.168.4.70
7 | ports:
8 | - name: http
9 | port: 8080
10 | - name: crdb
11 | port: 26257
12 | prometheus:
13 | enabled: true
14 | path: "/_status/vars"
15 | port: 8080
16 |
17 | - name: minio
18 | ips:
19 | - 192.168.4.30
20 | ports:
21 | - name: http
22 | port: 9090
23 | - name: s3
24 | port: 9000
25 | prometheus:
26 | enabled: true
27 | path: "/minio/v2/metrics/cluster"
28 | port: 9000
29 |
--------------------------------------------------------------------------------
/scripts/docker-machine/README.md:
--------------------------------------------------------------------------------
1 | # docker-machine
2 |
3 | I treat my laptop as an orchestrator for the nodes of my cluster. To do this, I use docker-machine and connect each
4 | machine. This provides a few benefits for my home lab, but the biggest of which is convenience. I can easily introspect
5 | remote machines and docker processes from a single terminal.
6 |
7 | ```bash
8 | # run to purge the unattended-upgrades script
9 | $ ./docker-machine/purge.sh
10 |
11 | $ ./docker-machine/connect.sh
12 | # ...
13 |
14 | $ docker-machine ls
15 | NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS
16 | ip-192-168-1-50 - generic Running tcp://192.168.1.50:2376 v19.03.8
17 | ip-192-168-1-51 - generic Running tcp://192.168.1.51:2376 v19.03.8
18 | ip-192-168-1-52 - generic Running tcp://192.168.1.52:2376 v19.03.8
19 | ip-192-168-1-53 - generic Running tcp://192.168.1.53:2376 v19.03.8
20 | ip-192-168-1-54 - generic Running tcp://192.168.1.54:2376 v19.03.8
21 | ip-192-168-1-60 - generic Running tcp://192.168.1.60:2376 v19.03.8
22 | ip-192-168-1-61 - generic Running tcp://192.168.1.61:2376 v19.03.8
23 | ip-192-168-1-62 - generic Running tcp://192.168.1.62:2376 v19.03.8
24 | ip-192-168-1-63 - generic Running tcp://192.168.1.63:2376 v19.03.8
25 | ip-192-168-1-64 - generic Running tcp://192.168.1.64:2376 v19.03.8
26 | ip-192-168-1-70 - generic Running tcp://192.168.1.70:2376 v19.03.8
27 | ip-192-168-1-71 - generic Running tcp://192.168.1.71:2376 v19.03.8
28 | ip-192-168-1-72 - generic Running tcp://192.168.1.72:2376 v19.03.8
29 | ip-192-168-1-73 - generic Running tcp://192.168.1.73:2376 v19.03.8
30 | ip-192-168-1-74 - generic Running tcp://192.168.1.74:2376 v19.03.8
31 | ```
32 |
--------------------------------------------------------------------------------
/scripts/docker-machine/connect.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | export NETWORK_IP=${NETWORK_IP:-192.168.4.1}
4 | readonly ip_prefix=${NETWORK_IP%.1}
5 |
6 | export ADMIN_USER=${ADMIN_USER:-mjpitz}
7 |
8 | readonly default_ssh_key_path="${HOME}/.ssh/id_rsa"
9 | readonly ssh_key_path="${SSH_KEY_PATH:-"$default_ssh_key_path"}"
10 |
11 | function connect_host() {
12 | static_ip="${1}"
13 | host="ip-${static_ip//./-}"
14 |
15 | if [[ ! -z "${DEBUG}" ]]; then
16 | cat </dev/null 2>&1 && pwd)"
4 |
5 | export NETWORK_IP=${NETWORK_IP:-192.168.4.1}
6 | readonly ip_prefix=${NETWORK_IP%.1}
7 |
8 | readonly packages="$@"
9 |
10 | function install() {
11 | prefix="${1}"
12 | start=${2}
13 | end=${3}
14 |
15 | for i in $(seq ${start} ${end}); do
16 | ip="${prefix}.${i}"
17 | host="ip-${ip//./-}"
18 |
19 | docker-machine ssh "${host}" "sudo apt-get install -y ${packages}"
20 | done
21 | }
22 |
23 | install ${ip_prefix} 50 54
24 | install ${ip_prefix} 60 64
25 |
--------------------------------------------------------------------------------
/storage/crdb/README.md:
--------------------------------------------------------------------------------
1 | # crdb
2 |
3 | Provides a primary storage mechanism for my homelab.
4 |
5 | Deploys a 3 node CockroachDB cluster on top of Raspberry Pi 4 nodes. The cluster is run from within a container, making
6 | it easy to introspect from my laptop. This is managed using terraform as it provides a convenient way to bring up and
7 | tear down the entire cluster. My home systems can tolerate a little downtime for maintenance.
8 |
9 | ```
10 | cd storage/crdb
11 |
12 | terraform init
13 |
14 | terraform plan
15 |
16 | terraform apply --auto-approve
17 | terraform destroy --auto-approve
18 | ```
19 |
--------------------------------------------------------------------------------
/storage/crdb/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | backend "s3" {
3 | skip_credentials_validation = true
4 | skip_metadata_api_check = true
5 | endpoint = "https://nyc3.digitaloceanspaces.com"
6 | region = "us-east-1"
7 | bucket = "mya-tfstate"
8 | key = "infra/home/storage/crdb/terraform.tfstate"
9 | }
10 | }
11 |
12 | locals {
13 | join = [
14 | "192.168.4.50",
15 | "192.168.4.60",
16 | "192.168.4.70",
17 | ]
18 | }
19 |
20 | # ip-192-168-4-50
21 |
22 | provider "docker" {
23 | alias = "ip-192-168-4-50"
24 | host = "tcp://192.168.4.50:2376"
25 | cert_path = pathexpand("~/.docker/machine/machines/ip-192-168-4-50")
26 | }
27 |
28 | module "ip-192-168-4-50" {
29 | source = "./node"
30 | providers = {
31 | docker = docker.ip-192-168-4-50
32 | }
33 |
34 | hostname = "ip-192-168-4-50"
35 | ip = "192.168.4.50"
36 | join = local.join
37 | }
38 |
39 | # ip-192-168-4-60
40 |
41 | provider "docker" {
42 | alias = "ip-192-168-4-60"
43 | host = "tcp://192.168.4.60:2376"
44 | cert_path = pathexpand("~/.docker/machine/machines/ip-192-168-4-60")
45 | }
46 |
47 | module "ip-192-168-4-60" {
48 | source = "./node"
49 | providers = {
50 | docker = docker.ip-192-168-4-60
51 | }
52 |
53 | hostname = "ip-192-168-4-60"
54 | ip = "192.168.4.60"
55 | join = local.join
56 | }
57 |
58 | # ip-192-168-4-70
59 |
60 | provider "docker" {
61 | alias = "ip-192-168-4-70"
62 | host = "tcp://192.168.4.70:2376"
63 | cert_path = pathexpand("~/.docker/machine/machines/ip-192-168-4-70")
64 | }
65 |
66 | module "ip-192-168-4-70" {
67 | source = "./node"
68 | providers = {
69 | docker = docker.ip-192-168-4-70
70 | }
71 |
72 | hostname = "ip-192-168-4-70"
73 | ip = "192.168.4.70"
74 | join = local.join
75 | }
76 |
--------------------------------------------------------------------------------
/storage/crdb/node/main.tf:
--------------------------------------------------------------------------------
1 | resource "docker_container" "crdb" {
2 | name = "crdb"
3 | hostname = var.hostname
4 | network_mode = "host"
5 | image = "christoofar/cockroachdb-arm64:21.2.3"
6 | restart = "unless-stopped"
7 |
8 | command = [
9 | "start",
10 | "--insecure", # fix
11 | "--join=${join(",", var.join)}",
12 | "--listen-addr=0.0.0.0",
13 | "--http-addr=0.0.0.0",
14 | "--advertise-addr=${var.ip}:26257",
15 | "--store=/data/drive-1",
16 | ]
17 |
18 | ports {
19 | internal = 26257
20 | external = 26257
21 | }
22 |
23 | ports {
24 | internal = 8080
25 | external = 8080
26 | }
27 |
28 | volumes {
29 | container_path = "/data/drive-1"
30 | host_path = "/data/drive-1"
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/storage/crdb/node/providers.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | docker = {
4 | source = "kreuzwerker/docker"
5 | version = "~> 2.13.0"
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/storage/crdb/node/vars.tf:
--------------------------------------------------------------------------------
1 | variable "hostname" {
2 | type = string
3 | }
4 |
5 | variable "ip" {
6 | type = string
7 | }
8 |
9 | variable "join" {
10 | type = list(string)
11 | }
12 |
--------------------------------------------------------------------------------
/storage/crdb/providers.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | docker = {
4 | source = "kreuzwerker/docker"
5 | version = "~> 2.13.0"
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/storage/minio/README.md:
--------------------------------------------------------------------------------
1 | # minio
2 |
3 | Provides an erasure-coded small-blob store backed by MinIO.
4 |
5 | Deploys a single node on top of the Apple Mini, currently backed by two physical drives. I'll likely add more drives
6 |
7 | ```
8 | cd storage/minio
9 |
10 | terraform init
11 |
12 | terraform plan
13 |
14 | terraform apply --auto-approve
15 | terraform destroy --auto-approve
16 | ```
17 |
--------------------------------------------------------------------------------
/storage/minio/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | backend "s3" {
3 | skip_credentials_validation = true
4 | skip_metadata_api_check = true
5 | endpoint = "https://nyc3.digitaloceanspaces.com"
6 | region = "us-east-1"
7 | bucket = "mya-tfstate"
8 | key = "infra/home/storage/minio/terraform.tfstate"
9 | }
10 | }
11 |
12 | resource "random_string" "user" {
13 | length = 32
14 | special = false
15 | upper = true
16 | lower = true
17 | number = true
18 | }
19 |
20 | resource "random_password" "password" {
21 | length = 32
22 | special = false
23 | upper = true
24 | lower = true
25 | number = true
26 | }
27 |
28 | provider "docker" {
29 | alias = "ip-192-168-4-30"
30 | host = "tcp://192.168.4.30:2376"
31 | cert_path = pathexpand("~/.docker/machine/machines/ip-192-168-4-30")
32 | }
33 |
34 | module "ip-192-168-4-30" {
35 | source = "./node"
36 | providers = {
37 | docker = docker.ip-192-168-4-30
38 | }
39 |
40 | hostname = "ip-192-168-4-30"
41 | ip = "192.168.4.30"
42 | user = random_string.user.result
43 | password = random_password.password.result
44 | }
45 |
--------------------------------------------------------------------------------
/storage/minio/node/main.tf:
--------------------------------------------------------------------------------
1 | resource "docker_container" "minio" {
2 | name = "minio"
3 | hostname = var.hostname
4 | network_mode = "host"
5 | image = "quay.io/minio/minio:RELEASE.2022-02-01T18-00-14Z"
6 | restart = "unless-stopped"
7 |
8 | env = [
9 | "MINIO_PROMETHEUS_AUTH_TYPE=public",
10 | "MINIO_CONSOLE_ADDRESS=:9090",
11 | "MINIO_ROOT_USER=${var.user}",
12 | "MINIO_ROOT_PASSWORD=${var.password}",
13 | ]
14 |
15 | command = [
16 | "server",
17 | "/data/drive-{0...1}/data{0...1}",
18 | ]
19 |
20 | ports {
21 | internal = 9000
22 | external = 9000
23 | }
24 |
25 | ports {
26 | internal = 9090
27 | external = 9090
28 | }
29 |
30 | volumes {
31 | container_path = "/data/drive-0"
32 | host_path = "/data/minio"
33 | }
34 |
35 | volumes {
36 | container_path = "/data/drive-1"
37 | host_path = "/data/drive-1"
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/storage/minio/node/providers.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | docker = {
4 | source = "kreuzwerker/docker"
5 | version = "~> 2.13.0"
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/storage/minio/node/vars.tf:
--------------------------------------------------------------------------------
1 | variable "hostname" {
2 | type = string
3 | }
4 |
5 | variable "ip" {
6 | type = string
7 | }
8 |
9 | variable "user" {
10 | type = string
11 | }
12 |
13 | variable "password" {
14 | type = string
15 | }
16 |
--------------------------------------------------------------------------------
/storage/minio/outputs.tf:
--------------------------------------------------------------------------------
1 | output "user" {
2 | value = random_string.user.result
3 | sensitive = true
4 | }
5 |
6 | output "password" {
7 | value = random_password.password.result
8 | sensitive = true
9 | }
10 |
--------------------------------------------------------------------------------
/storage/minio/providers.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | docker = {
4 | source = "kreuzwerker/docker"
5 | version = "~> 2.13.0"
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------