├── .env.tmpl
├── .envrc
├── .gitignore
├── LICENSE
├── README.md
├── ansible
├── README.md
├── apply-production.sh
├── bootstrap.yml
├── group_vars
│ ├── mopidy.yml
│ ├── raspbian.yml
│ ├── rsyslog_fwd.yml
│ └── telegraf.yml
├── hosts.yml
├── lowsec.yml.tmpl
├── requirements.yml
├── roles
│ ├── common
│ │ └── tasks
│ │ │ └── main.yml
│ └── mopidy
│ │ ├── handlers
│ │ └── main.yml
│ │ ├── tasks
│ │ └── main.yml
│ │ └── templates
│ │ └── mopidy.conf.j2
└── site.yml
├── authorized_keys.txt
├── baseimage
├── README.md
├── flake.lock
├── flake.nix
└── pve-cloudinit.sh
├── consul
├── README.md
└── create-agent-poltok
├── esphome
├── .gitignore
├── README.md
├── base
│ ├── mhet-devkit-ble.yaml
│ ├── novostella-20w-flood.yaml
│ └── secrets.yaml.tmpl
├── family_esp32.yaml
├── novostella-flood-1.yaml
├── novostella-flood-2.yaml
├── novostella-flood-3.yaml
├── novostella-flood-4.yaml
└── office_esp32.yaml
├── etc
└── gen_passwords
├── flake.lock
├── flake.nix
├── nixos
├── README.md
├── build-image
├── catalog
│ ├── default.nix
│ ├── monitors.nix
│ ├── nodes.nix
│ └── services.nix
├── common.nix
├── common
│ ├── onprem.nix
│ └── packages.nix
├── confgen
│ ├── default.nix
│ └── octodns
│ │ ├── bytemonkey-ext.nix
│ │ ├── bytemonkey-int.nix
│ │ ├── default.nix
│ │ └── home.nix
├── deploy
├── dns-sync
├── flake.lock
├── flake.nix
├── hosts
│ ├── carbon.nix
│ ├── ci-runner.nix
│ ├── eph.nix
│ ├── fastd.nix
│ ├── game.nix
│ ├── k3s.nix
│ ├── metrics.nix
│ ├── nc-pi3.nix
│ ├── nc-um350.nix
│ ├── nexus.nix
│ ├── ryzen.nix
│ ├── scratch.nix
│ ├── web.nix
│ └── witness.nix
├── hw
│ ├── asus-x570p.nix
│ ├── minis-um350.nix
│ ├── proxmox.nix
│ ├── qemu.nix
│ ├── sd-image-pi3.nix
│ └── tp-x1g3.nix
├── nix
│ ├── nixos-configurations.nix
│ └── util.nix
├── pkgs
│ ├── README.md
│ ├── cfdyndns.nix
│ ├── default.nix
│ ├── nomad-usb-device-plugin.nix
│ └── overlay.nix
├── restart
├── roles
│ ├── cluster-volumes.nix
│ ├── consul.nix
│ ├── default.nix
│ ├── dns.nix
│ ├── files
│ │ ├── consul
│ │ │ ├── consul-agent-ca.pem
│ │ │ └── skynet-server-consul-0.pem
│ │ ├── nomad
│ │ │ ├── nomad-ca.pem
│ │ │ └── server-client.pem
│ │ ├── nut
│ │ │ └── upssched-cmd
│ │ └── telegraf
│ │ │ └── zfs_snap_times.py
│ ├── gateway-online.nix
│ ├── gui-wayland.nix
│ ├── gui-xorg.nix
│ ├── homesite.nix
│ ├── influxdb.nix
│ ├── log-forwarder.nix
│ ├── loki.nix
│ ├── mosquitto.nix
│ ├── nfs-bind.nix
│ ├── nomad.nix
│ ├── tailscale.nix
│ ├── telegraf.nix
│ ├── traefik.nix
│ ├── upsmon.nix
│ ├── websvc.nix
│ └── workstation.nix
├── run
├── secrets
│ ├── cloudflare-dns-api.age
│ ├── consul-agent-token.age
│ ├── consul-encrypt.age
│ ├── gitea-runner-token.age
│ ├── influxdb-admin.age
│ ├── influxdb-homeassistant.age
│ ├── influxdb-telegraf.age
│ ├── k3s-token.age
│ ├── mqtt-admin.age
│ ├── mqtt-clock.age
│ ├── mqtt-sensor.age
│ ├── mqtt-zwave.age
│ ├── nomad-consul-token.age
│ ├── nomad-encrypt.age
│ ├── nomad-server-client-key.age
│ ├── secrets.nix
│ ├── skynet-server-consul-0-key.pem.age
│ ├── tailscale.age
│ ├── traefik-consul-token.age
│ └── wifi-env.age
└── status
├── nomad
├── bin
│ └── alloc-stream
├── consul
│ ├── fabio.policy.hcl
│ └── waypoint-ci-agent.policy.hcl
├── env-prod.fish
├── etc
│ ├── ca-config.json
│ ├── cfssl-config.json
│ ├── init-ca.fish
│ ├── make-certs.fish
│ └── nomad-ca.pem
├── examples
│ ├── linuxvm.nomad
│ ├── whoami-connect.nomad
│ └── whoami.nomad
├── fabio.nomad
├── forgejo.nomad
├── grafana.nomad
├── homeassistant.nomad
├── inbucket.nomad
├── linkwarden.nomad
├── logging.nomad
├── nix-cache.nomad
├── nodered.nomad
├── satisfactory.nomad
├── speedflux.nomad
└── syncthing.nomad
└── pkgs
└── octodns-cloudflare.nix
/.env.tmpl:
--------------------------------------------------------------------------------
1 | BIND_KEY_NAME="@bind_rndc_key_name"
2 | BIND_KEY_SECRET="@bind_rndc_key_secret"
3 |
4 | CLOUDFLARE_TOKEN="@cloudflare_dns_api_token"
5 |
6 | NOMAD_ADDR="https://nomad.service.consul:4646"
7 | NOMAD_CACERT="etc/nomad-ca.pem"
8 | NOMAD_TOKEN="@nomad_manager_token"
9 |
--------------------------------------------------------------------------------
/.envrc:
--------------------------------------------------------------------------------
1 | use flake
2 | dotenv
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Direnv
2 | /.direnv
3 | .env
4 |
5 | # Password files
6 | /gen_passwords.json
7 | /ansible/lowsec.yml
8 |
9 | # Certificates
10 | /nomad/etc/ca
11 | /nomad/etc/certs
12 |
13 | # Terraform
14 | /nixos/.terraform*
15 | /nixos/config.tf.json
16 | /nixos/terraform.tfstate*
17 |
18 | /nixos/log.txt
19 |
20 | /.vim
21 | result
22 | *.qcow2
23 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 James Hillyerd
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # homelab
2 |
3 | - `ansible`: My ansible config to capture telemetry and syslogs from Raspberry
4 | Pis.
5 | - `baseimage`: Flakes for generating minimal NixOS images for VMs and SD cards.
6 | - `esphome`: My esphome (ESP32 + Home Assistant) configs.
7 | - `nixos`: My NixOS home lab system configs flake.
8 |
9 | ## Diagrams
10 |
11 | ### Machines
12 |
13 | ```mermaid
14 | graph TD
15 | wan((WAN)) --- gw{{Gateway}}
16 | gw --- lan{LAN}
17 | nc1("nc-um350-1
(nomad client)") --- lan
18 | nc2("nc-um350-2
(nomad client)") --- lan
19 | nas("skynas
(NAS)") --- lan
20 | lan --- pve1
21 | lan --- pve2
22 | lan --- pve3
23 | subgraph pve1 ["pve1 (hypervisor)"]
24 | direction RL
25 | nexus("nexus")
26 | witness("witness")
27 | end
28 | subgraph pve2 ["pve2 (hypervisor)"]
29 | direction RL
30 | kube2("kube2")
31 | scratch("scratch")
32 | end
33 | subgraph pve3 ["pve3 (hypervisor)"]
34 | direction RL
35 | ci-runner1("ci-runner1")
36 | eph("eph")
37 | fastd("fastd")
38 | kube1("kube1")
39 | metrics("metrics")
40 | web("web")
41 | end
42 | ```
43 |
44 | ### Monitoring
45 |
46 | ```mermaid
47 | graph LR
48 | ha([home assistant]) ---> influxdb
49 | subgraph nixos ["all nixos nodes"]
50 | nix-telegraf([telegraf])
51 | nix-syslog([syslog])
52 | end
53 | subgraph nomad-nodes ["all nomad client nodes"]
54 | nomad([nomad client]) --> nomad-telegraf([telegraf])
55 | docker([docker logs]) --> nomad-vector([vector])
56 | end
57 | subgraph metrics ["node: metrics"]
58 | metrics-telegraf(["telegraf
- ping checks
- url checks"])
59 | metrics-telegraf --> influxdb([influxdb])
60 | loki([loki])
61 | nix-telegraf ---> influxdb
62 | nix-syslog ---> loki
63 | nomad-telegraf --> influxdb
64 | nomad-vector --> loki
65 | end
66 | influxdb -.-> grafana([grafana])
67 | loki -.-> grafana
68 | ```
69 |
--------------------------------------------------------------------------------
/ansible/README.md:
--------------------------------------------------------------------------------
1 | ## Setup
2 |
3 | Install dependencies:
4 |
5 | ```
6 | ansible-galaxy install -r requirements.yml
7 | ```
8 |
9 | ## Host boostrapping
10 |
11 | Add the new host(s) to `hosts.yml`
12 |
13 | Create ansible user & group with:
14 |
15 | ```
16 | ansible-playbook bootstrap.yml -i hosts.yml -k -K --extra-vars "host= user=james"
17 | ```
18 |
--------------------------------------------------------------------------------
/ansible/apply-production.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Usage:
3 | # apply-production.sh [limit host pattern] [playbook tag]
4 |
5 | # Optionally limit applied hosts.
6 | limit=""
7 | if [ -n "$1" ]; then
8 | limit="--limit=$1"
9 | fi
10 |
11 | tags=""
12 | if [ -n "$2" ]; then
13 | tags="--tags=$2"
14 | fi
15 |
16 | set -x
17 |
18 | ansible-playbook site.yml --inventory hosts.yml $limit $tags
19 |
--------------------------------------------------------------------------------
/ansible/bootstrap.yml:
--------------------------------------------------------------------------------
1 | - hosts: '{{ host }}'
2 | become: yes
3 | gather_facts: no
4 |
5 | vars:
6 | ansible_user: '{{ user }}'
7 |
8 | tasks:
9 | - name: ansible group account
10 | group:
11 | name: ansible
12 |
13 | - name: ansible user account
14 | user:
15 | name: ansible
16 | comment: "Ansible Deployer"
17 | password: "*"
18 | group: ansible
19 |
20 | - name: sudo without password for ansible
21 | copy:
22 | content: "ansible ALL=(ALL:ALL) NOPASSWD: ALL"
23 | dest: /etc/sudoers.d/050_ansible-nopasswd
24 | mode: 0440
25 | validate: /usr/sbin/visudo -csf %s
26 |
27 | - name: setup authorized keys for ansible user
28 | authorized_key:
29 | user: ansible
30 | key: "{{ lookup('file', '../authorized_keys.txt') }}"
31 |
--------------------------------------------------------------------------------
/ansible/group_vars/mopidy.yml:
--------------------------------------------------------------------------------
1 | mopidy:
2 | jellyfin_host: "skynas.home.arpa:8096"
3 |
--------------------------------------------------------------------------------
/ansible/group_vars/raspbian.yml:
--------------------------------------------------------------------------------
1 | ansible_user: ansible
2 |
--------------------------------------------------------------------------------
/ansible/group_vars/rsyslog_fwd.yml:
--------------------------------------------------------------------------------
1 | rsyslog_remote: metrics.home.arpa
2 | rsyslog_remote_port: 514
3 |
4 | rsyslog_forward_rule_name: metrics_fwd
5 |
6 | rsyslog_mods:
7 | - imuxsock
8 | - imklog
9 |
10 | rsyslog_deploy_default_config: false
11 |
--------------------------------------------------------------------------------
/ansible/group_vars/telegraf.yml:
--------------------------------------------------------------------------------
1 | telegraf_agent_version: "1.25.3"
2 | telegraf_agent_package_method: repo
3 | telegraf_agent_hostname: "{{ ansible_hostname }}"
4 | telegraf_agent_output:
5 | - type: influxdb
6 | config:
7 | - urls = [ "http://metrics.home.arpa:8086" ]
8 | database = "telegraf-hosts"
9 | username = "{{ lowsec.influxdb.telegraf.user }}"
10 | password = "{{ lowsec.influxdb.telegraf.password }}"
11 |
--------------------------------------------------------------------------------
/ansible/hosts.yml:
--------------------------------------------------------------------------------
1 | all:
2 | children:
3 | # Mopidy nodes
4 | mopidy:
5 | hosts:
6 | theater-mopidy.dyn.home.arpa:
7 | ansible_host: 192.168.1.200
8 | # OctoPi nodes
9 | octopis:
10 | hosts:
11 | octopi.home.arpa:
12 | # Pis running Raspbian
13 | raspbian:
14 | children:
15 | mopidy:
16 | octopis:
17 | # Nodes monitored with telegraf
18 | telegraf:
19 | children:
20 | raspbian:
21 | # Nodes forwarding syslogs
22 | rsyslog_fwd:
23 | children:
24 | raspbian:
25 |
--------------------------------------------------------------------------------
/ansible/lowsec.yml.tmpl:
--------------------------------------------------------------------------------
1 | lowsec:
2 | influxdb:
3 | telegraf:
4 | user: "telegraf"
5 | password: "@influx_telegraf"
6 | mopidy:
7 | jellyfin:
8 | user: "mopidy"
9 | password: "@jellyfin_mopidy"
10 |
--------------------------------------------------------------------------------
/ansible/requirements.yml:
--------------------------------------------------------------------------------
1 | ---
2 | roles:
3 | - name: dj-wasabi.telegraf
4 | version: "0.14.0"
5 | - name: robertdebock.rsyslog
6 | version: "4.6.0"
7 |
--------------------------------------------------------------------------------
/ansible/roles/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: setup authorized keys for ansible user
2 | authorized_key:
3 | user: ansible
4 | key: "{{ lookup('file', '../authorized_keys.txt') }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/mopidy/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart mopidy
2 | service:
3 | name: mopidy
4 | enabled: true
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/ansible/roles/mopidy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install mopidy with plugins
2 | package:
3 | name:
4 | - mopidy
5 | - mopidy-mpd
6 | - gstreamer1.0-plugins-bad
7 | - python3-pip
8 | state: latest
9 | notify: "Restart mopidy"
10 |
11 | - name: install mopidy python packages
12 | pip:
13 | name:
14 | - mopidy-jellyfin
15 | - mopidy-iris
16 | state: latest
17 | notify: "Restart mopidy"
18 |
19 | - name: setup ram cache
20 | file:
21 | path: /var/run/mopidy
22 | state: directory
23 | owner: mopidy
24 | group: audio
25 | mode: "0755"
26 | notify: "Restart mopidy"
27 |
28 | - name: install mopidy config
29 | template:
30 | src: templates/mopidy.conf.j2
31 | dest: /etc/mopidy/mopidy.conf
32 | owner: mopidy
33 | group: root
34 | mode: "0640"
35 | notify: "Restart mopidy"
36 |
--------------------------------------------------------------------------------
/ansible/roles/mopidy/templates/mopidy.conf.j2:
--------------------------------------------------------------------------------
1 | [core]
2 | cache_dir = /var/run/mopidy
3 |
4 | [audio]
5 | output = autoaudiosink
6 |
7 | [file]
8 | enabled = false
9 |
10 | [http]
11 | enabled = true
12 | hostname = ::
13 | port = 6680
14 | zeroconf = Mopidy HTTP server on $hostname
15 | allowed_origins =
16 | csrf_protection = true
17 | default_app = mopidy
18 |
19 | [m3u]
20 | enabled = false
21 |
22 | [softwaremixer]
23 | enabled = true
24 |
25 | [stream]
26 | enabled = true
27 | protocols =
28 | http
29 | https
30 | mms
31 | rtmp
32 | rtmps
33 | rtsp
34 | metadata_blacklist =
35 | timeout = 5000
36 |
37 | [jellyfin]
38 | enabled = true
39 | hostname = {{ mopidy.jellyfin_host }}
40 | username = {{ lowsec.mopidy.jellyfin.user }}
41 | password = {{ lowsec.mopidy.jellyfin.password }}
42 |
43 | [mpd]
44 | enabled = true
45 | hostname = ::
46 | port = 6600
47 | password =
48 | max_connections = 20
49 | connection_timeout = 60
50 | zeroconf = Mopidy MPD server on $hostname
51 | command_blacklist =
52 | listall
53 | listallinfo
54 | default_playlist_scheme = m3u
55 |
--------------------------------------------------------------------------------
/ansible/site.yml:
--------------------------------------------------------------------------------
1 | - hosts: all
2 | roles: [ common ]
3 | tags: [ common ]
4 |
5 | - hosts: rsyslog_fwd
6 | vars_files:
7 | - lowsec.yml
8 | roles:
9 | - { role: robertdebock.rsyslog, become: yes }
10 | tags: [ rsyslog ]
11 |
12 | - hosts: telegraf
13 | vars_files:
14 | - lowsec.yml
15 | roles:
16 | - dj-wasabi.telegraf
17 | tags: [ telegraf ]
18 |
19 | - hosts: mopidy
20 | vars_files:
21 | - lowsec.yml
22 | roles:
23 | - { role: mopidy, become: yes }
24 | tags: [ mopidy ]
25 |
--------------------------------------------------------------------------------
/authorized_keys.txt:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDZj6GV5aC3zX/P/STi0QDDaIUCwyDekLIKtN/L+s2vL8E1KxD69DLQ4DKV1fJUV97oo/Qv8pHUFgCQhEOYm5bchm+0Wc6ZBolcJ6q9KUNGIsaIa8ts6vQEG5k3pRI1E4kMrhggUJFOlSKxmcA9v+tEmZTlAo9wXn2wmqhmaLVfaGORwyMCuUc+2BP4xTwfuc+c0rb+kZOdp6+TuYiIXUOD9OqDrBkhFMe9bqNI0QxryACjid/qJvhjMos/fTeg7CgSsp+jP9ChVWnde0QquUVv5jmkKq2cdN2tfZdmin48cvAKAdtibpi4jQcIeWM7xWfEoE9T1u5tkfQgM8VhiV5EmSQrO/U9PIucKh64Vu+PGvQtbeNUcODd5Zkky0NDK2vrnIZTnGwQcTw4j5nDDUkgBHeW8jxT3Pf9lsCtJJL3edLxKwZA2+Dgf6EX2LxovvZVKYgfONhH1FRtv4V9ahoCPg0l1qdYX996Iihwc9wv8DfXMnWypEcytpKP2sXhUqc= james@Ryzen
2 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICAXHtE9NI16ZPNSKF6Cn0JNJS6fTNQYduerVmVa6WKY james@RYZEN
3 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM1Rq9OFHVus5eULteCEGNkHgINch40oPP2LwvlVd6ng james@eph
4 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB5SIs0HmrtQN+W7YFqIPpyTqTbRqW8Kq06h2btmXElG james@fractal
5 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICJoH0p+6iSISUAqRO8+6+uvQWpjaP0eQjDeGAXIYUI6 james@nix-ryzen
6 |
--------------------------------------------------------------------------------
/baseimage/README.md:
--------------------------------------------------------------------------------
1 | # baseimage
2 |
3 | This flake creates a base VM image with my public keys installed, ready for
4 | deployment.
5 |
6 | Usage example:
7 |
8 | ```
9 | nix build .#libvirt
10 | ```
11 |
--------------------------------------------------------------------------------
/baseimage/flake.lock:
--------------------------------------------------------------------------------
1 | {
2 | "nodes": {
3 | "nixlib": {
4 | "locked": {
5 | "lastModified": 1693701915,
6 | "narHash": "sha256-waHPLdDYUOHSEtMKKabcKIMhlUOHPOOPQ9UyFeEoovs=",
7 | "owner": "nix-community",
8 | "repo": "nixpkgs.lib",
9 | "rev": "f5af57d3ef9947a70ac86e42695231ac1ad00c25",
10 | "type": "github"
11 | },
12 | "original": {
13 | "owner": "nix-community",
14 | "repo": "nixpkgs.lib",
15 | "type": "github"
16 | }
17 | },
18 | "nixos-generators": {
19 | "inputs": {
20 | "nixlib": "nixlib",
21 | "nixpkgs": [
22 | "nixpkgs"
23 | ]
24 | },
25 | "locked": {
26 | "lastModified": 1705400161,
27 | "narHash": "sha256-0MFaNIwwpVWB1N9m7cfHAM2pSVtYESQ7tlHxnDTOhM4=",
28 | "owner": "nix-community",
29 | "repo": "nixos-generators",
30 | "rev": "521fb4cdd8a2e1a00d1adf0fea7135d1faf04234",
31 | "type": "github"
32 | },
33 | "original": {
34 | "owner": "nix-community",
35 | "repo": "nixos-generators",
36 | "type": "github"
37 | }
38 | },
39 | "nixpkgs": {
40 | "locked": {
41 | "lastModified": 1705331948,
42 | "narHash": "sha256-qjQXfvrAT1/RKDFAMdl8Hw3m4tLVvMCc8fMqzJv0pP4=",
43 | "owner": "nixos",
44 | "repo": "nixpkgs",
45 | "rev": "b8dd8be3c790215716e7c12b247f45ca525867e2",
46 | "type": "github"
47 | },
48 | "original": {
49 | "owner": "nixos",
50 | "ref": "nixos-23.11",
51 | "repo": "nixpkgs",
52 | "type": "github"
53 | }
54 | },
55 | "root": {
56 | "inputs": {
57 | "nixos-generators": "nixos-generators",
58 | "nixpkgs": "nixpkgs"
59 | }
60 | }
61 | },
62 | "root": "root",
63 | "version": 7
64 | }
65 |
--------------------------------------------------------------------------------
/baseimage/flake.nix:
--------------------------------------------------------------------------------
1 | {
2 | description = "VM deployment target base images";
3 |
4 | inputs = {
5 | nixpkgs.url = "github:nixos/nixpkgs/nixos-23.11";
6 |
7 | nixos-generators = {
8 | url = "github:nix-community/nixos-generators";
9 | inputs.nixpkgs.follows = "nixpkgs";
10 | };
11 | };
12 |
13 | outputs =
14 | {
15 | self,
16 | nixpkgs,
17 | nixos-generators,
18 | }:
19 | let
20 | inherit (nixpkgs) lib;
21 |
22 | system = "x86_64-linux";
23 | pkgs = nixpkgs.legacyPackages.${system};
24 |
25 | baseModule =
26 | { ... }:
27 | {
28 | services.openssh = {
29 | enable = true;
30 | settings.PermitRootLogin = "yes";
31 | };
32 |
33 | time.timeZone = "US/Pacific";
34 |
35 | users.users.root.openssh.authorizedKeys.keys = lib.splitString "\n" (
36 | builtins.readFile ../authorized_keys.txt
37 | );
38 |
39 | # Display the IP address at the login prompt.
40 | environment.etc."issue.d/ip.issue".text = ''
41 | This is a base image.
42 | IPv4: \4
43 | '';
44 | networking.dhcpcd.runHook = "${pkgs.utillinux}/bin/agetty --reload";
45 | };
46 |
47 | qemuModule =
48 | { ... }:
49 | {
50 | boot.kernelParams = [ "console=ttyS0" ];
51 |
52 | services.qemuGuest.enable = true;
53 | };
54 |
55 | proxmoxModule =
56 | { modulesPath, ... }:
57 | {
58 | imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
59 |
60 | boot.kernelParams = [ "console=ttyS0" ];
61 |
62 | networking.useDHCP = false;
63 |
64 | services.cloud-init = {
65 | enable = true;
66 | network.enable = true;
67 |
68 | settings = {
69 | system_info = {
70 | distro = "nixos";
71 | network.renderers = [ "networkd" ];
72 | };
73 |
74 | ssh_pwauth = true;
75 |
76 | # Network stage.
77 | cloud_init_modules = [
78 | "migrator"
79 | "seed_random"
80 | "growpart"
81 | "resizefs"
82 | "set_hostname"
83 | ];
84 |
85 | # Config stage.
86 | cloud_config_modules = [
87 | "disk_setup"
88 | "mounts"
89 | "set-passwords"
90 | "ssh"
91 | ];
92 | };
93 | };
94 |
95 | services.qemuGuest.enable = true;
96 | };
97 | in
98 | {
99 | packages.${system} = {
100 | hyperv = nixos-generators.nixosGenerate {
101 | inherit pkgs;
102 | modules = [ baseModule ];
103 | format = "hyperv";
104 | };
105 |
106 | libvirt = nixos-generators.nixosGenerate {
107 | inherit pkgs;
108 | modules = [
109 | baseModule
110 | qemuModule
111 | ];
112 | format = "qcow";
113 | };
114 |
115 | proxmox = nixos-generators.nixosGenerate {
116 | inherit pkgs;
117 | modules = [
118 | baseModule
119 | proxmoxModule
120 | ];
121 | format = "qcow";
122 | };
123 | };
124 | };
125 | }
126 |
--------------------------------------------------------------------------------
/baseimage/pve-cloudinit.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Imports a cloud-init disk image into Proxmox VE; must be run on PVE host.
3 | #
4 | # Usage:
5 | # pve-cloudinit.sh [VM image path]
6 |
7 | set -e
8 |
9 | function fail {
10 | echo "$1" >&2
11 | exit 1
12 | }
13 |
14 | id="$1"
15 | image="${2:-/root/nixos.qcow2}"
16 |
17 | test -n "$id" || fail "Missing VM ID (first argument)"
18 | test -r "$image" || fail "Image '$image' image is unreadable"
19 |
20 | set -x
21 |
22 | qm create $id --memory 2048 --net0 virtio,bridge=vmbr0 --scsihw virtio-scsi-pci
23 | qm set $id --scsi0 "local-lvm:0,discard=on,import-from=$image"
24 | qm set $id --ide2 local-lvm:cloudinit
25 | qm set $id --boot order=scsi0 --ostype l26
26 | qm set $id --serial0 socket --vga serial0
27 | qm set $id --ipconfig0 ip=dhcp
28 | qm set $id --ciupgrade 0 --agent 1
29 | qm cloudinit update $id
30 | qm template $id
31 |
--------------------------------------------------------------------------------
/consul/README.md:
--------------------------------------------------------------------------------
1 | In additional to the token and policy created by `create-agent-poltok`,
2 | you'll need to have some nomad specific policies that can be applied to the
3 | tokens. This is easier to do in the consul web UI for a small number of
4 | nodes.
5 |
6 | ## `nomad-server` policy
7 |
8 | ```hcl
9 | agent_prefix "" {
10 | policy = "read"
11 | }
12 |
13 | node_prefix "" {
14 | policy = "read"
15 | }
16 |
17 | service_prefix "" {
18 | policy = "write"
19 | }
20 |
21 | acl = "write"
22 | operator = "write"
23 | ```
24 |
25 | ## `nomad-client` policy
26 |
27 | ```
28 | agent_prefix "" {
29 | policy = "read"
30 | }
31 |
32 | node_prefix "" {
33 | policy = "read"
34 | }
35 |
36 | service_prefix "" {
37 | policy = "write"
38 | }
39 |
40 | key_prefix "" {
41 | policy = "read"
42 | }
43 | ```
44 |
--------------------------------------------------------------------------------
/consul/create-agent-poltok:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | domain="skynet.local"
4 | server="$1"
5 | agent="$2"
6 |
7 | if [ -z "$CONSUL_HTTP_TOKEN" ]; then
8 | echo "\$CONSUL_HTTP_TOKEN must be set"
9 | exit 128
10 | fi
11 |
12 | if [ -z "$server" ]; then
13 | echo "No server specified"
14 | exit 128
15 | fi
16 |
17 | if [ -z "$agent" ]; then
18 | echo "No agent specified"
19 | exit 128
20 | fi
21 |
22 | # Create ACL policy for agent.
23 | consul acl policy create -name "$agent" -http-addr "$server.$domain:8500" \
24 | -rules - <"
36 |
--------------------------------------------------------------------------------
/esphome/.gitignore:
--------------------------------------------------------------------------------
1 | # Gitignore settings for ESPHome
2 | # This is an example and may include too much for your use-case.
3 | # You can modify this file to suit your needs.
4 | /.esphome/
5 | **/.pioenvs/
6 | **/.piolibdeps/
7 | **/lib/
8 | **/src/
9 | **/platformio.ini
10 | **/secrets.yaml
11 |
12 | family_esp32
13 | office_esp32
14 |
--------------------------------------------------------------------------------
/esphome/README.md:
--------------------------------------------------------------------------------
1 | # esphome
2 |
3 | ## Building
4 |
5 | Check, build, upload, start log capture:
6 |
7 | esphome run .yaml
8 |
--------------------------------------------------------------------------------
/esphome/base/mhet-devkit-ble.yaml:
--------------------------------------------------------------------------------
1 | esphome:
2 | name: ${device_name}
3 |
4 | esp32:
5 | board: mhetesp32devkit
6 |
7 | wifi:
8 | ssid: !secret 'wifi_ssid'
9 | password: !secret 'wifi_pass'
10 | domain: .dyn.home.arpa
11 |
12 | # Enable fallback hotspot (captive portal) in case wifi connection fails
13 | ap:
14 | ssid: "${device_name} Fallback Hotspot"
15 | password: !secret 'wifi_pass'
16 |
17 | captive_portal:
18 |
19 | # Enable logging
20 | logger:
21 |
22 | # Enable Home Assistant API
23 | api:
24 | encryption:
25 | key: !secret 'api_key'
26 |
27 | ota:
28 | password: !secret 'ota_pass'
29 |
30 | esp32_ble_tracker:
31 |
32 | sensor:
33 | - platform: uptime
34 | name: "${device_name} Uptime Sensor"
35 |
36 | - platform: wifi_signal
37 | name: "${device_name} WiFi Signal"
38 | update_interval: 60s
39 |
--------------------------------------------------------------------------------
/esphome/base/novostella-20w-flood.yaml:
--------------------------------------------------------------------------------
1 | esphome:
2 | name: ${device_name}
3 | comment: ${device_description}
4 | friendly_name: ${friendly_name}
5 |
6 | bk72xx:
7 | board: generic-bk7231n-qfn32-tuya
8 |
9 | logger:
10 |
11 | web_server:
12 |
13 | captive_portal:
14 |
15 | mdns:
16 |
17 | api:
18 | encryption:
19 | key: !secret 'api_key'
20 |
21 | ota:
22 | password: !secret 'ota_pass'
23 |
24 | wifi:
25 | ssid: !secret 'wifi_ssid'
26 | password: !secret 'wifi_pass'
27 | domain: .dyn.home.arpa
28 | ap:
29 | ssid: "${device_name} AP"
30 | password: !secret 'wifi_pass'
31 |
32 | button:
33 | - platform: restart
34 | name: Restart
35 |
36 | debug:
37 | update_interval: 30s
38 |
39 | text_sensor:
40 | - platform: debug
41 | reset_reason:
42 | name: Reset Reason
43 | - platform: libretiny
44 | version:
45 | name: LibreTiny Version
46 |
47 | binary_sensor:
48 | # Reports if this device is Connected or not
49 | - platform: status
50 | name: ${friendly_name} Status
51 |
52 | sensor:
53 | # Reports the WiFi signal strength
54 | - platform: wifi_signal
55 | name: ${friendly_name} Signal
56 | update_interval: 60s
57 |
58 | # Reports how long the device has been powered (in minutes)
59 | - platform: uptime
60 | name: ${friendly_name} Uptime
61 | filters:
62 | - lambda: return x / 60.0;
63 | unit_of_measurement: minutes
64 |
65 | output:
66 | - platform: libretiny_pwm
67 | id: red
68 | pin: P6
69 | - platform: libretiny_pwm
70 | id: green
71 | pin: P7
72 | - platform: libretiny_pwm
73 | id: blue
74 | pin: P8
75 | - platform: libretiny_pwm
76 | id: cold_white
77 | pin: P26
78 | - platform: libretiny_pwm
79 | id: warm_white
80 | pin: P24
81 |
82 | light:
83 | - platform: rgbww
84 | name: ${friendly_name}
85 | red: red
86 | green: green
87 | blue: blue
88 | cold_white: cold_white
89 | warm_white: warm_white
90 | cold_white_color_temperature: 6500 K
91 | warm_white_color_temperature: 2700 K
92 | id: thelight
93 | color_interlock: true #Prevent white leds being on at the same time as RGB leds
94 | restore_mode: restore_default_off
95 | effects:
96 | - random:
97 | - strobe:
98 | - flicker:
99 | alpha: 50% #The percentage that the last color value should affect the light. More or less the “forget-factor” of an exponential moving average. Defaults to 95%.
100 | intensity: 50% #The intensity of the flickering, basically the maximum amplitude of the random offsets. Defaults to 1.5%.
101 | - lambda:
102 | name: Throb
103 | update_interval: 1s
104 | lambda: |-
105 | static int state = 0;
106 | auto call = id(thelight).turn_on();
107 | // Transtion of 1000ms = 1s
108 | call.set_transition_length(1000);
109 | if (state == 0) {
110 | call.set_brightness(1.0);
111 | } else {
112 | call.set_brightness(0.01);
113 | }
114 | call.perform();
115 | state += 1;
116 | if (state == 2)
117 | state = 0;
118 |
--------------------------------------------------------------------------------
/esphome/base/secrets.yaml.tmpl:
--------------------------------------------------------------------------------
1 | wifi_ssid: "@not_wifi_ssid"
2 | wifi_pass: "@not_wifi_pass"
3 | api_key: "@esphome_api_key"
4 | ota_pass: "@esphome_ota_pass"
5 |
--------------------------------------------------------------------------------
/esphome/family_esp32.yaml:
--------------------------------------------------------------------------------
1 | substitutions:
2 | device_name: family_esp32
3 |
4 | sensor:
5 | - platform: uptime
6 | name: "${device_name} Uptime Sensor"
7 |
8 | - platform: wifi_signal
9 | name: "${device_name} WiFi Signal"
10 | update_interval: 60s
11 |
12 | - platform: atc_mithermometer
13 | mac_address: "A4:C1:38:1B:55:C5"
14 | temperature:
15 | name: "Dining Mi Temperature"
16 | humidity:
17 | name: "Dining Mi Humidity"
18 | battery_level:
19 | name: "Dining Mi Battery Level"
20 |
21 | - platform: atc_mithermometer
22 | mac_address: "A4:C1:38:C2:FA:A2"
23 | temperature:
24 | name: "Garage Mi Temperature"
25 | humidity:
26 | name: "Garage Mi Humidity"
27 | battery_level:
28 | name: "Garage Mi Battery Level"
29 |
30 | <<: !include base/mhet-devkit-ble.yaml
31 |
--------------------------------------------------------------------------------
/esphome/novostella-flood-1.yaml:
--------------------------------------------------------------------------------
1 | substitutions:
2 | device_name: novostella-flood-light-1
3 | device_description: 20W RGBWW flood light
4 | friendly_name: Novostella Flood Light 1
5 |
6 | <<: !include base/novostella-20w-flood.yaml
7 |
--------------------------------------------------------------------------------
/esphome/novostella-flood-2.yaml:
--------------------------------------------------------------------------------
1 | substitutions:
2 | device_name: novostella-flood-light-2
3 | device_description: 20W RGBWW flood light
4 | friendly_name: Novostella Flood Light 2
5 |
6 | <<: !include base/novostella-20w-flood.yaml
7 |
--------------------------------------------------------------------------------
/esphome/novostella-flood-3.yaml:
--------------------------------------------------------------------------------
1 | substitutions:
2 | device_name: novostella-flood-light-3
3 | device_description: 20W RGBWW flood light
4 | friendly_name: Novostella Flood Light 3
5 |
6 | <<: !include base/novostella-20w-flood.yaml
7 |
--------------------------------------------------------------------------------
/esphome/novostella-flood-4.yaml:
--------------------------------------------------------------------------------
1 | substitutions:
2 | device_name: novostella-flood-light-4
3 | device_description: 20W RGBWW flood light
4 | friendly_name: Novostella Flood Light 4
5 |
6 | <<: !include base/novostella-20w-flood.yaml
7 |
--------------------------------------------------------------------------------
/esphome/office_esp32.yaml:
--------------------------------------------------------------------------------
1 | substitutions:
2 | device_name: office_esp32
3 |
4 | sensor:
5 | - platform: uptime
6 | name: "${device_name} Uptime Sensor"
7 |
8 | - platform: wifi_signal
9 | name: "${device_name} WiFi Signal"
10 | update_interval: 60s
11 |
12 | - platform: atc_mithermometer
13 | mac_address: "A4:C1:38:0F:5D:64"
14 | temperature:
15 | name: "Master Mi Temperature"
16 | humidity:
17 | name: "Master Mi Humidity"
18 | battery_level:
19 | name: "Master Mi Battery Level"
20 |
21 | - platform: atc_mithermometer
22 | mac_address: "A4:C1:38:F0:22:38"
23 | temperature:
24 | name: "Office Mi Temperature"
25 | humidity:
26 | name: "Office Mi Humidity"
27 | battery_level:
28 | name: "Office Mi Battery Level"
29 |
30 | <<: !include base/mhet-devkit-ble.yaml
31 |
--------------------------------------------------------------------------------
/etc/gen_passwords:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # gen-passwords:
3 | # Render template files with random passwords.
4 | #
5 | # Replacement delimiter is @, and passwords with the same name will
6 | # be the same for all template files. '@@' renders a single '@'.
7 | #
8 | # Examples: "@password", zz@{zzPassword}zz, alias@@host.email.com
9 |
10 | from collections import defaultdict
11 | import json
12 | import secrets
13 | import string
14 | import sys
15 |
16 | # Template to output mappings.
17 | template_outputs = {
18 | '.env.tmpl': '.env',
19 | 'ansible/lowsec.yml.tmpl': 'ansible/lowsec.yml',
20 | 'esphome/base/secrets.yaml.tmpl': 'esphome/base/secrets.yaml',
21 | }
22 |
23 | # JSON cache of already generated passwords.
24 | password_cache = 'gen_passwords.json'
25 |
26 | # Characters used to generate passwords.
27 | password_alphabet = string.ascii_letters + string.digits
28 |
29 | # Length of generated password.
30 | password_length = 16
31 |
32 |
33 | class PasswordTemplate(string.Template):
34 | delimiter = '@'
35 |
36 |
37 | def main():
38 | # Use the same dictionary for all templates.
39 | password_dict = load_cache()
40 |
41 | for in_name in template_outputs:
42 | out_name = template_outputs[in_name]
43 | print(f'Generating "{out_name}" from template "{in_name}"',
44 | file=sys.stderr)
45 |
46 | try:
47 | with open(in_name) as in_file:
48 | in_template = PasswordTemplate(in_file.read())
49 | out_data = in_template.substitute(password_dict)
50 | with open(out_name, 'w') as out_file:
51 | print(out_data, file=out_file)
52 | except BaseException as err:
53 | sys.exit(err)
54 |
55 | save_cache(password_dict)
56 |
57 |
58 | def gen_password():
59 | return ''.join(secrets.choice(password_alphabet)
60 | for i in range(password_length))
61 |
62 | def load_cache():
63 | try:
64 | with open(password_cache, 'r') as in_file:
65 | cache = json.load(in_file)
66 | return defaultdict(gen_password, cache)
67 | except FileNotFoundError:
68 | print(f'Cache "{password_cache}" not found, continuing', file=sys.stderr)
69 |
70 | return defaultdict(gen_password)
71 |
72 |
73 | def save_cache(password_dict):
74 | print(f'Updating cache "{password_cache}"', file=sys.stderr)
75 |
76 | with open(password_cache, 'w') as out_file:
77 | json.dump(password_dict, out_file, sort_keys=True, indent=2)
78 |
79 |
80 | if __name__ == "__main__":
81 | main()
82 |
--------------------------------------------------------------------------------
/flake.lock:
--------------------------------------------------------------------------------
1 | {
2 | "nodes": {
3 | "agenix": {
4 | "inputs": {
5 | "darwin": "darwin",
6 | "home-manager": "home-manager",
7 | "nixpkgs": [
8 | "nixpkgs"
9 | ]
10 | },
11 | "locked": {
12 | "lastModified": 1703089996,
13 | "narHash": "sha256-ipqShkBmHKC9ft1ZAsA6aeKps32k7+XZSPwfxeHLsAU=",
14 | "owner": "ryantm",
15 | "repo": "agenix",
16 | "rev": "564595d0ad4be7277e07fa63b5a991b3c645655d",
17 | "type": "github"
18 | },
19 | "original": {
20 | "owner": "ryantm",
21 | "ref": "0.15.0",
22 | "repo": "agenix",
23 | "type": "github"
24 | }
25 | },
26 | "darwin": {
27 | "inputs": {
28 | "nixpkgs": [
29 | "agenix",
30 | "nixpkgs"
31 | ]
32 | },
33 | "locked": {
34 | "lastModified": 1673295039,
35 | "narHash": "sha256-AsdYgE8/GPwcelGgrntlijMg4t3hLFJFCRF3tL5WVjA=",
36 | "owner": "lnl7",
37 | "repo": "nix-darwin",
38 | "rev": "87b9d090ad39b25b2400029c64825fc2a8868943",
39 | "type": "github"
40 | },
41 | "original": {
42 | "owner": "lnl7",
43 | "ref": "master",
44 | "repo": "nix-darwin",
45 | "type": "github"
46 | }
47 | },
48 | "flake-utils": {
49 | "inputs": {
50 | "systems": "systems"
51 | },
52 | "locked": {
53 | "lastModified": 1731533236,
54 | "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
55 | "owner": "numtide",
56 | "repo": "flake-utils",
57 | "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
58 | "type": "github"
59 | },
60 | "original": {
61 | "owner": "numtide",
62 | "repo": "flake-utils",
63 | "type": "github"
64 | }
65 | },
66 | "home-manager": {
67 | "inputs": {
68 | "nixpkgs": [
69 | "agenix",
70 | "nixpkgs"
71 | ]
72 | },
73 | "locked": {
74 | "lastModified": 1682203081,
75 | "narHash": "sha256-kRL4ejWDhi0zph/FpebFYhzqlOBrk0Pl3dzGEKSAlEw=",
76 | "owner": "nix-community",
77 | "repo": "home-manager",
78 | "rev": "32d3e39c491e2f91152c84f8ad8b003420eab0a1",
79 | "type": "github"
80 | },
81 | "original": {
82 | "owner": "nix-community",
83 | "repo": "home-manager",
84 | "type": "github"
85 | }
86 | },
87 | "nixpkgs": {
88 | "locked": {
89 | "lastModified": 1747862697,
90 | "narHash": "sha256-U4HaNZ1W26cbOVm0Eb5OdGSnfQVWQKbLSPrSSa78KC0=",
91 | "owner": "nixos",
92 | "repo": "nixpkgs",
93 | "rev": "2baa12ff69913392faf0ace833bc54bba297ea95",
94 | "type": "github"
95 | },
96 | "original": {
97 | "owner": "nixos",
98 | "ref": "nixos-24.11",
99 | "repo": "nixpkgs",
100 | "type": "github"
101 | }
102 | },
103 | "nixpkgs-unstable": {
104 | "locked": {
105 | "lastModified": 1747958103,
106 | "narHash": "sha256-qmmFCrfBwSHoWw7cVK4Aj+fns+c54EBP8cGqp/yK410=",
107 | "owner": "nixos",
108 | "repo": "nixpkgs",
109 | "rev": "fe51d34885f7b5e3e7b59572796e1bcb427eccb1",
110 | "type": "github"
111 | },
112 | "original": {
113 | "owner": "nixos",
114 | "ref": "nixpkgs-unstable",
115 | "repo": "nixpkgs",
116 | "type": "github"
117 | }
118 | },
119 | "root": {
120 | "inputs": {
121 | "agenix": "agenix",
122 | "flake-utils": "flake-utils",
123 | "nixpkgs": "nixpkgs",
124 | "nixpkgs-unstable": "nixpkgs-unstable"
125 | }
126 | },
127 | "systems": {
128 | "locked": {
129 | "lastModified": 1681028828,
130 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
131 | "owner": "nix-systems",
132 | "repo": "default",
133 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
134 | "type": "github"
135 | },
136 | "original": {
137 | "owner": "nix-systems",
138 | "repo": "default",
139 | "type": "github"
140 | }
141 | }
142 | },
143 | "root": "root",
144 | "version": 7
145 | }
146 |
--------------------------------------------------------------------------------
/flake.nix:
--------------------------------------------------------------------------------
1 | {
2 | description = "my nixos & ansible configuration";
3 |
4 | inputs = {
5 | nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11";
6 | nixpkgs-unstable.url = "github:nixos/nixpkgs/nixpkgs-unstable";
7 |
8 | agenix.url = "github:ryantm/agenix/0.15.0";
9 | agenix.inputs.nixpkgs.follows = "nixpkgs";
10 |
11 | flake-utils.url = "github:numtide/flake-utils";
12 | };
13 |
14 | outputs =
15 | {
16 | nixpkgs,
17 | nixpkgs-unstable,
18 | flake-utils,
19 | agenix,
20 | ...
21 | }:
22 | flake-utils.lib.eachDefaultSystem (
23 | system:
24 | let
25 | pkgs = import nixpkgs {
26 | inherit system;
27 | config.allowUnfree = true;
28 | };
29 | unstable = nixpkgs-unstable.legacyPackages.${system};
30 | in
31 | {
32 | devShell =
33 | let
34 | octodns-cloudflare = pkgs.python3Packages.callPackage ./pkgs/octodns-cloudflare.nix { };
35 | in
36 | pkgs.mkShell {
37 | buildInputs =
38 | (with pkgs; [
39 | ansible
40 | cfssl
41 | consul
42 | esphome
43 | kubectl
44 | nomad_1_8
45 | octodns
46 | octodns-providers.bind
47 | openssl
48 | platformio
49 | sshpass
50 | ])
51 | ++ [
52 | agenix.packages.${system}.default
53 | octodns-cloudflare
54 | ];
55 | };
56 | }
57 | );
58 | }
59 |
--------------------------------------------------------------------------------
/nixos/README.md:
--------------------------------------------------------------------------------
1 | # nixos flake
2 |
3 | ## Terminology
4 |
5 | Knowing how I use these terms will better help you understand the layout
6 | of this flake:
7 |
8 | - catalog: The high level configuration of my homelab:
9 | - common: configuration that can be referenced by any part of this flake.
10 | - nodes: combines _host_, _hw_, and network configuration into a deployable
11 | NixOS configuration.
12 | - services: defines how a particular service may be accessed, including
13 | load-balanced URLs, whether to generate DNS entries, and the icon to display
14 | on my internal homepage.
15 | - host: A grouping of _roles_ and _services_ that will be applied to one or
16 | more _nodes_. A _node_ may only have a single host type.
17 | - hw: Hardware configuration for physical and virtual machines.
18 | - node: A specific VM or machine on my network.
19 | - role: A nix module configuring one or more _services_ for one or more hosts.
20 | - service: Standard nix modules for configuring services.
21 |
22 | ## Ways to use this flake
23 |
24 | ### Update NixOS channel
25 |
26 | ```sh
27 | nix flake update
28 | ```
29 |
30 | ### Push to running libvirtd or Hyper-V *test* VM
31 |
32 | ```sh
33 | ./deploy virt-$host root@$hostIP
34 | ```
35 |
36 | or
37 |
38 | ```sh
39 | ./deploy hyper-$host root@$hostIP
40 | ```
41 |
42 | See `../baseimage` for initial boot base images for VMs.
43 |
44 | ### Push to running bare-metal prod machine
45 |
46 | ```sh
47 | ./deploy $host root@$hostIP
48 | ```
49 |
50 | ### Rebuild localhost with specified host config
51 |
52 | ```sh
53 | sudo nixos-rebuild --flake ".#$host" boot
54 | ```
55 |
56 | ### Build SD card image for host
57 |
58 | ```sh
59 | nix build ".#images.$host"
60 | sudo dd bs=4M conv=fsync if=result/sd-image/*-linux.img of=/dev/sdX
61 | ```
62 |
63 | ## Convenience scripts
64 |
65 | **Note:** These are obsoleted by my deployment TUI:
66 | https://github.com/jhillyerd/labcoat/
67 |
68 | When iterating on my homelab, I often repeat the following steps:
69 |
70 | 1. Check an existing node for failed services, uptime, and disk space
71 | 2. Deploy an updated NixOS config to the host
72 | 3. Check for failed services
73 | 4. Reboot the node
74 | 5. Check for failed services, and confirm the uptime is now zero
75 |
76 | The scripts in this directory simplify the process to:
77 |
78 | ```sh
79 | host=
80 | ./status
81 | ./deploy
82 | ./status
83 | ./restart
84 | ./status
85 | ```
86 |
87 | Additionally, `deploy` adds an entry to log.txt with the git commit deployed,
88 | helping me identify out-of-date nodes. Example:
89 |
90 | ```
91 | web [2024-01-18 13:41:16 -0800] e38c39c proxmox: use serial console
92 | nexus [2024-01-21 16:03:31 -0800] 0769380 dns: add kube records
93 | nc-um350-2 [2024-01-21 16:39:14 -0800] ebece50 dns: forward local zones on nomad clients
94 | nc-um350-1 [2024-01-21 16:39:14 -0800] ebece50 dns: forward local zones on nomad clients
95 | metrics [2024-01-21 16:47:48 -0800] da374b9 k3s: add first node
96 | kube1 [2024-01-22 10:39:26 -0800] 266e67f catalog: organize default.nix
97 | ```
98 |
--------------------------------------------------------------------------------
/nixos/build-image:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # build-image :
3 | # Builds the image for the specified host name.
4 | #
5 |
6 | host="$1"
7 |
8 | nix build ".#images.$host"
9 |
--------------------------------------------------------------------------------
/nixos/catalog/default.nix:
--------------------------------------------------------------------------------
1 | # Catalog defines the systems & services on my network.
2 | { system }:
3 | rec {
4 | nodes = import ./nodes.nix { inherit system; };
5 | services = import ./services.nix {
6 | inherit
7 | nodes
8 | consul
9 | nomad
10 | k3s
11 | ;
12 | };
13 | monitors = import ./monitors.nix { inherit consul nomad; };
14 |
15 | # Common config across most machines.
16 | cf-api.user = "james@hillyerd.com";
17 | smtp.host = "mail.home.arpa";
18 | syslog.host = nodes.metrics.ip.priv;
19 | syslog.port = 1514;
20 | tailscale.interface = "tailscale0";
21 |
22 | # Role/service specifc configuration.
23 | authelia = {
24 | host = nodes.web.ip.priv;
25 | port = 9091;
26 | };
27 |
28 | consul = {
29 | servers = with nodes; [
30 | nc-um350-1.ip.priv
31 | nc-um350-2.ip.priv
32 | witness.ip.priv
33 | ];
34 | };
35 |
36 | dns = with nodes; {
37 | ns1 = nexus.ip.priv;
38 | ns2 = nc-um350-1.ip.priv;
39 | ns3 = nc-um350-2.ip.priv;
40 | };
41 |
42 | k3s = {
43 | leader = nodes.kube1;
44 |
45 | workers = with nodes; [
46 | kube1.ip.priv
47 | kube2.ip.priv
48 | ];
49 | };
50 |
51 | influxdb = rec {
52 | host = nodes.metrics.ip.priv;
53 | port = 8086;
54 | telegraf.user = "telegraf";
55 | telegraf.database = "telegraf-hosts";
56 | urls = [ "http://${host}:${toString port}" ];
57 | };
58 |
59 | nomad = {
60 | servers = with nodes; [
61 | nc-um350-1.ip.priv
62 | nc-um350-2.ip.priv
63 | witness.ip.priv
64 | ];
65 |
66 | skynas-host-volumes = [
67 | "forgejo-data"
68 | "gitea-storage"
69 | "grafana-storage"
70 | "homeassistant-data"
71 | "linkwarden-data"
72 | "linkwarden-meili"
73 | "nodered-data"
74 | "piper-data"
75 | "satisfactory-data"
76 | "syncthing-data"
77 | "whisper-data"
78 | "zwavejs-data"
79 | ];
80 | };
81 |
82 | # Named TCP/UDP load balancer entry points.
83 | traefik.entrypoints = {
84 | factorygame = ":7777/udp";
85 | factorybeacon = ":15000/udp";
86 | factoryquery = ":15777/udp";
87 |
88 | smtp = ":25/tcp";
89 | ssh = ":222/tcp";
90 | websecure = ":443/tcp";
91 | extweb = ":8443/tcp";
92 | };
93 |
94 | # Layout of services on the dashboard.
95 | layout = [
96 | {
97 | section = "Services";
98 | services = [
99 | "fluidd"
100 | "forgejo"
101 | "grafana"
102 | "homeassistant"
103 | "inbucket"
104 | "nodered"
105 | "syncthing"
106 | ];
107 | }
108 | {
109 | section = "Cluster";
110 | services = [
111 | "consul"
112 | "nomad"
113 | "proxmox"
114 | "dockreg"
115 | "argocd"
116 | ];
117 | }
118 | {
119 | section = "Infrastructure";
120 | services = [
121 | "modem"
122 | "skynas"
123 | "traefik"
124 | "unifi"
125 | "zwavejs"
126 | ];
127 | }
128 | ];
129 | }
130 |
--------------------------------------------------------------------------------
/nixos/catalog/monitors.nix:
--------------------------------------------------------------------------------
1 | { consul, nomad, ... }:
2 | {
3 | # Telegraf monitoring config.
4 | http_response = [
5 | {
6 | urls = [ "https://consul.bytemonkey.org/ui/" ];
7 | response_status_code = 200;
8 | }
9 | {
10 | urls = [ "http://demo.inbucket.org/status" ];
11 | response_status_code = 200;
12 | }
13 | {
14 | urls = [ "https://dockreg.bytemonkey.org/v2/" ];
15 | response_status_code = 200;
16 | }
17 | {
18 | urls = [ "https://forgejo.bytemonkey.org" ];
19 | response_status_code = 200;
20 | }
21 | {
22 | urls = [ "https://grafana.bytemonkey.org/" ];
23 | response_status_code = 401;
24 | }
25 | {
26 | urls = [ "https://homeassistant.bytemonkey.org/" ];
27 | response_status_code = 200;
28 | }
29 | {
30 | urls = [ "https://inbucket.bytemonkey.org/" ];
31 | response_status_code = 200;
32 | }
33 | {
34 | urls = [ "http://msdde3.home.arpa/" ];
35 | response_status_code = 200;
36 | }
37 | {
38 | urls = [ "https://nodered.bytemonkey.org" ];
39 | response_status_code = 200;
40 | }
41 | {
42 | urls = [ "https://nomad.bytemonkey.org/ui/" ];
43 | response_status_code = 200;
44 | }
45 | {
46 | urls = [ "https://zwavejs.bytemonkey.org/" ];
47 | response_status_code = 200;
48 | }
49 | ];
50 |
51 | ping = [
52 | "gateway.home.arpa"
53 | "msdde3.home.arpa"
54 | "nexus.home.arpa"
55 | "nc-um350-1.home.arpa"
56 | "nc-um350-2.home.arpa"
57 | "pve1.home.arpa"
58 | "pve2.home.arpa"
59 | "pve3.home.arpa"
60 | "skynas.home.arpa"
61 | "web.home.arpa"
62 | "witness.home.arpa"
63 | ];
64 |
65 | x509_certs =
66 | [ "https://dash.bytemonkey.org/" ]
67 | ++ (map (ip: "https://${ip}:8300") consul.servers)
68 | ++ (map (ip: "https://${ip}:4646") nomad.servers);
69 | }
70 |
--------------------------------------------------------------------------------
/nixos/catalog/nodes.nix:
--------------------------------------------------------------------------------
1 | { system }:
2 | {
3 | carbon = {
4 | config = ../hosts/carbon.nix;
5 | hw = ../hw/tp-x1g3.nix;
6 | system = system.x86_64-linux;
7 | };
8 |
9 | ci-runner1 = {
10 | ip.priv = "192.168.131.4";
11 | config = ../hosts/ci-runner.nix;
12 | hw = ../hw/proxmox.nix;
13 | system = system.x86_64-linux;
14 | };
15 |
16 | eph = {
17 | ip.priv = "192.168.128.44";
18 | ip.tail = "100.119.252.34";
19 | config = ../hosts/eph.nix;
20 | hw = ../hw/proxmox.nix;
21 | system = system.x86_64-linux;
22 | };
23 |
24 | fastd = {
25 | ip.priv = "192.168.131.5";
26 | hostId = "f4fa7292";
27 | config = ../hosts/fastd.nix;
28 | hw = ../hw/proxmox.nix;
29 | system = system.x86_64-linux;
30 | };
31 |
32 | game = {
33 | ip.priv = "192.168.131.6";
34 | config = ../hosts/game.nix;
35 | hw = ../hw/proxmox.nix;
36 | system = system.x86_64-linux;
37 | };
38 |
39 | kube1 = {
40 | ip.priv = "192.168.132.1";
41 | config = ../hosts/k3s.nix;
42 | hw = ../hw/proxmox.nix;
43 | system = system.x86_64-linux;
44 | };
45 |
46 | kube2 = {
47 | ip.priv = "192.168.132.2";
48 | config = ../hosts/k3s.nix;
49 | hw = ../hw/proxmox.nix;
50 | system = system.x86_64-linux;
51 | };
52 |
53 | metrics = {
54 | ip.priv = "192.168.128.41";
55 | ip.tail = "100.108.135.101";
56 | config = ../hosts/metrics.nix;
57 | hw = ../hw/proxmox.nix;
58 | system = system.x86_64-linux;
59 | };
60 |
61 | nc-um350-1 = {
62 | ip.priv = "192.168.128.36";
63 | config = ../hosts/nc-um350.nix;
64 | hw = ../hw/minis-um350.nix;
65 | system = system.x86_64-linux;
66 | nomad.meta.zwave = "aeotec";
67 | };
68 |
69 | nc-um350-2 = {
70 | ip.priv = "192.168.128.37";
71 | config = ../hosts/nc-um350.nix;
72 | hw = ../hw/minis-um350.nix;
73 | system = system.x86_64-linux;
74 | };
75 |
76 | nexus = {
77 | ip.priv = "192.168.128.40";
78 | ip.tail = "100.96.6.112";
79 | config = ../hosts/nexus.nix;
80 | hw = ../hw/proxmox.nix;
81 | system = system.x86_64-linux;
82 | };
83 |
84 | ryzen = {
85 | ip.priv = "192.168.1.50";
86 | ip.tail = "100.112.232.73";
87 | config = ../hosts/ryzen.nix;
88 | hw = ../hw/asus-x570p.nix;
89 | system = system.x86_64-linux;
90 | };
91 |
92 | scratch = {
93 | ip.priv = "192.168.131.2";
94 | config = ../hosts/scratch.nix;
95 | hw = ../hw/proxmox.nix;
96 | system = system.x86_64-linux;
97 | };
98 |
99 | web = {
100 | ip.priv = "192.168.128.11";
101 | ip.tail = "100.90.124.31";
102 | config = ../hosts/web.nix;
103 | hw = ../hw/proxmox.nix;
104 | system = system.x86_64-linux;
105 | };
106 |
107 | witness = {
108 | ip.priv = "192.168.131.3";
109 | config = ../hosts/witness.nix;
110 | hw = ../hw/proxmox.nix;
111 | system = system.x86_64-linux;
112 | };
113 | }
114 |
--------------------------------------------------------------------------------
/nixos/catalog/services.nix:
--------------------------------------------------------------------------------
1 | {
2 | nodes,
3 | consul,
4 | nomad,
5 | k3s,
6 | }:
7 | {
8 | # The services block populates my dashboard and configures the load balancer.
9 | #
10 | # The key-name of each service block is mapped to an internal domain name
11 | # .bytemonkey.org and an external domain name .x.bytemonkey.org.
12 | #
13 | # If the `lb` section is unspecified, then it is assumed the configuration
14 | # has been provided by tags in consul. Untagged services can be specified
15 | # using the dash.(host|port|proto|path) attributes.
16 | #
17 | # Authelia is configured to deny by default; services will need to be
18 | # configured there before being available externally.
19 | #
20 | # `dash.icon` paths can be found in https://github.com/walkxcode/dashboard-icons
21 | argocd = {
22 | title = "ArgoCD";
23 |
24 | dns.intCname = true;
25 |
26 | dash.icon = "svg/argocd.svg";
27 |
28 | lb.backendUrls = map (ip: "https://${ip}:443") k3s.workers;
29 | lb.checkHost = "argocd.bytemonkey.org";
30 | };
31 |
32 | auth = {
33 | title = "Authelia";
34 | external = true;
35 |
36 | dns.intCname = true;
37 | dns.extCname = true;
38 |
39 | lb.backendUrls = [ "http://127.0.0.1:9091" ];
40 | lb.auth = "none";
41 | };
42 |
43 | consul = {
44 | title = "Consul";
45 |
46 | dns.intCname = true;
47 |
48 | dash.icon = "svg/consul.svg";
49 |
50 | lb.backendUrls = map (ip: "http://${ip}:8500") consul.servers;
51 | lb.sticky = true;
52 | lb.auth = "external";
53 | };
54 |
55 | dash = {
56 | title = "Dashboard";
57 | dns.intCname = true;
58 | lb.backendUrls = [ "http://127.0.0.1:12701" ];
59 | };
60 |
61 | dockreg = {
62 | title = "Docker Registry";
63 |
64 | dns.intCname = true;
65 |
66 | dash.icon = "svg/docker.svg";
67 | dash.path = "/v2/_catalog";
68 |
69 | lb.backendUrls = [ "http://192.168.1.20:5050" ];
70 | };
71 |
72 | fluidd = {
73 | title = "Fluidd";
74 | dns.intCname = true;
75 | dash.icon = "svg/fluidd.svg";
76 | lb.backendUrls = [ "http://msdde3.home.arpa" ];
77 | };
78 |
79 | forgejo = {
80 | title = "Forgejo";
81 |
82 | dns.intCname = true;
83 | dns.extCname = false;
84 |
85 | dash.icon = "svg/forgejo.svg";
86 | # Note: external + auth handled by labels.
87 | };
88 |
89 | git = {
90 | # Used by forgejo SSH.
91 | title = "Git";
92 | dns.intCname = true;
93 | };
94 |
95 | grafana = {
96 | title = "Grafana";
97 |
98 | dns.intCname = true;
99 | dns.extCname = true;
100 |
101 | dash.icon = "svg/grafana.svg";
102 | # Note: external + auth handled by labels.
103 | };
104 |
105 | homeassistant = {
106 | title = "Home Assistant";
107 |
108 | dns.intCname = true;
109 |
110 | dash.icon = "svg/home-assistant.svg";
111 | };
112 |
113 | inbucket = {
114 | title = "Inbucket";
115 | dns.intCname = true;
116 | dash.icon = "svg/gmail.svg";
117 | };
118 |
119 | links = {
120 | title = "Linkwarden";
121 | dns.intCname = true;
122 | dash.icon = "svg/linkwarden.svg";
123 | };
124 |
125 | modem = {
126 | title = "Cable Modem";
127 |
128 | dash.icon = "png/arris.png";
129 | dash.host = "192.168.100.1";
130 | dash.proto = "http";
131 | };
132 |
133 | monolith = {
134 | title = "Monolith";
135 | dns.intCname = true;
136 | };
137 |
138 | nodered = {
139 | title = "Node-RED";
140 | dns.intCname = true;
141 | dash.icon = "svg/node-red.svg";
142 | };
143 |
144 | nomad = {
145 | title = "Nomad";
146 | external = true;
147 |
148 | dns.intCname = true;
149 | dns.extCname = true;
150 |
151 | dash.icon = "svg/nomad.svg";
152 |
153 | lb.backendUrls = map (ip: "https://${ip}:4646") nomad.servers;
154 | lb.sticky = true;
155 | lb.auth = "external";
156 | };
157 |
158 | proxmox = {
159 | title = "Proxmox VE";
160 | dns.intCname = true;
161 | dash.icon = "png/proxmox.png";
162 |
163 | lb.backendUrls = [
164 | "https://192.168.128.12:8006"
165 | "https://192.168.128.13:8006"
166 | ];
167 | lb.sticky = true;
168 | };
169 |
170 | skynas = {
171 | title = "SkyNAS";
172 | dash.icon = "png/synology-dsm.png";
173 | dash.host = "skynas.bytemonkey.org";
174 | dash.port = 5001;
175 | };
176 |
177 | syncthing = {
178 | title = "Syncthing";
179 |
180 | dns.intCname = true;
181 | dns.extCname = false;
182 |
183 | dash.icon = "svg/syncthing.svg";
184 | # Note: external + auth handled by labels.
185 | };
186 |
187 | traefik = {
188 | title = "Traefik";
189 |
190 | dns.intCname = true;
191 |
192 | dash.icon = "svg/traefik.svg";
193 | dash.host = "traefik.bytemonkey.org";
194 | dash.path = "/dashboard/";
195 | };
196 |
197 | unifi = {
198 | title = "UniFi";
199 | external = true;
200 |
201 | dns.intCname = true;
202 | dns.extCname = true;
203 |
204 | dash.icon = "png/unifi.png";
205 |
206 | lb.backendUrls = [ "https://192.168.1.20:8443" ];
207 | lb.auth = "external";
208 | };
209 |
210 | zwavejs = {
211 | title = "Z-Wave JS";
212 |
213 | dns.intCname = true;
214 |
215 | dash.icon = "png/zwavejs2mqtt.png";
216 | };
217 | }
218 |
--------------------------------------------------------------------------------
/nixos/common.nix:
--------------------------------------------------------------------------------
1 | # Common config shared among all machines
2 | {
3 | pkgs,
4 | authorizedKeys,
5 | catalog,
6 | hostName,
7 | environment,
8 | ...
9 | }:
10 | {
11 | system.stateVersion = "24.11";
12 |
13 | imports = [
14 | ./common/packages.nix
15 | ./roles
16 | ];
17 | nixpkgs.overlays = [ (import ./pkgs/overlay.nix) ];
18 | nixpkgs.config.allowUnfree = true;
19 |
20 | environment.shellAliases = {
21 | la = "ls -lAh";
22 | };
23 |
24 | nix = {
25 | optimise.automatic = true;
26 |
27 | gc = {
28 | automatic = true;
29 | dates = "weekly";
30 | options = "--delete-older-than 7d";
31 | randomizedDelaySec = "20min";
32 | };
33 |
34 | settings.download-buffer-size = 134217728;
35 |
36 | # TODO revisit after https://github.com/NixOS/nix/pull/13301
37 | # settings.substituters = [ "http://nix-cache.service.skynet.consul?priority=10" ];
38 | };
39 |
40 | services.getty.helpLine = ">>> Flake node: ${hostName}, environment: ${environment}";
41 |
42 | services.openssh = {
43 | enable = true;
44 | settings.PermitRootLogin = "yes";
45 | };
46 |
47 | programs.command-not-found.enable = false; # not flake aware
48 |
49 | time.timeZone = "US/Pacific";
50 |
51 | users.users.root.openssh.authorizedKeys.keys = authorizedKeys;
52 |
53 | environment.etc."issue.d/ip.issue".text = ''
54 | IPv4: \4
55 | '';
56 | networking.dhcpcd.runHook = "${pkgs.utillinux}/bin/agetty --reload";
57 | networking.firewall.checkReversePath = "loose";
58 |
59 | systemd.network.wait-online.ignoredInterfaces = [ catalog.tailscale.interface ];
60 |
61 | }
62 |
--------------------------------------------------------------------------------
/nixos/common/onprem.nix:
--------------------------------------------------------------------------------
1 | # Setup for on-premises machines
2 | {
3 | config,
4 | options,
5 | catalog,
6 | ...
7 | }:
8 | {
9 | networking = {
10 | search = [
11 | "home.arpa"
12 | "dyn.home.arpa"
13 | ];
14 | timeServers = [ "ntp.home.arpa" ] ++ options.networking.timeServers.default;
15 | };
16 |
17 | # Configure telegraf agent.
18 | roles.telegraf = {
19 | enable = true;
20 | influxdb = {
21 | urls = catalog.influxdb.urls;
22 | database = catalog.influxdb.telegraf.database;
23 | user = catalog.influxdb.telegraf.user;
24 | passwordFile = config.age.secrets.influxdb-telegraf.path;
25 | };
26 | };
27 |
28 | # Forward syslogs to promtail/loki.
29 | roles.log-forwarder = {
30 | enable = true;
31 | syslogHost = catalog.syslog.host;
32 | syslogPort = catalog.syslog.port;
33 | };
34 |
35 | programs.msmtp.accounts.default = {
36 | auth = false;
37 | host = catalog.smtp.host;
38 | };
39 |
40 | age.secrets = {
41 | influxdb-telegraf.file = ../secrets/influxdb-telegraf.age;
42 | wifi-env.file = ../secrets/wifi-env.age;
43 | };
44 | }
45 |
--------------------------------------------------------------------------------
/nixos/common/packages.nix:
--------------------------------------------------------------------------------
1 | { pkgs, nixpkgs-unstable, ... }:
2 | {
3 | environment.systemPackages =
4 | let
5 | unstable = nixpkgs-unstable.legacyPackages.${pkgs.system};
6 |
7 | remaps = [
8 | (pkgs.writeShellScriptBin "vim" ''
9 | exec /run/current-system/sw/bin/nvim "$@"
10 | '')
11 | ];
12 | in
13 | (with pkgs; [
14 | bat
15 | bind
16 | file
17 | git
18 | htop
19 | jq
20 | lf
21 | lsof
22 | mailutils
23 | nmap
24 | psmisc
25 | python3
26 | smartmontools
27 | tree
28 | wget
29 | ])
30 | ++ (with unstable; [ neovim ])
31 | ++ remaps;
32 | }
33 |
--------------------------------------------------------------------------------
/nixos/confgen/default.nix:
--------------------------------------------------------------------------------
1 | { nixpkgs, ... }:
2 | catalog: system:
3 | let
4 | pkgs = nixpkgs.legacyPackages.${system};
5 |
6 | octodnsBundle = import ./octodns { inherit pkgs catalog; };
7 | in
8 | pkgs.stdenvNoCC.mkDerivation {
9 | name = "config-outputs";
10 |
11 | nativeBuildInputs = [
12 | pkgs.jq
13 | pkgs.remarshal
14 | ];
15 |
16 | dontUnpack = true;
17 |
18 | # Write the entire config bundle as a single JSON file.
19 | jsonBundle = (builtins.toJSON octodnsBundle);
20 | passAsFile = [ "jsonBundle" ];
21 |
22 | installPhase = ''
23 | mkdir $out
24 | cd $out
25 |
26 | # Loop over each destination file name from the JSON bundle.
27 | jq -r "keys | .[]" $jsonBundlePath | while read fpath; do
28 | if [[ -z "$fpath" ]]; then
29 | echo "Empty destination file path for config!" >&2
30 | exit 1
31 | fi
32 |
33 | # Create destination directory and convert syntax.
34 | mkdir -p $(dirname "$fpath")
35 | if [[ "$fpath" == *.yaml ]]; then
36 | jq ".[\"$fpath\"]" $jsonBundlePath | json2yaml > "$fpath"
37 | else
38 | jq ".[\"$fpath\"]" $jsonBundlePath > "$fpath"
39 | fi
40 | done
41 | '';
42 | }
43 |
--------------------------------------------------------------------------------
/nixos/confgen/octodns/bytemonkey-ext.nix:
--------------------------------------------------------------------------------
1 | { pkgs, catalog, ... }:
2 | target:
3 | let
4 | inherit (pkgs.lib)
5 | filterAttrs
6 | attrByPath
7 | mapAttrs
8 | mapAttrs'
9 | ;
10 |
11 | # Reverse proxy host for internal services.
12 | intProxy = "web.bytemonkey.org.";
13 |
14 | # Reverse proxy host for external (internet) services.
15 | extProxy = "x.bytemonkey.org.";
16 |
17 | bytemonkeyRecords = {
18 | skynas = {
19 | type = "A";
20 | value = "100.126.1.1";
21 | };
22 | tse = {
23 | type = "TXT";
24 | value = "google-site-verification=AuTsq7_HTu2uyAM1L-FDshwDdFfzjtUHyQ2lzXr7UOg";
25 | ttl = 3600;
26 | };
27 | web = {
28 | type = "A";
29 | value = catalog.nodes.web.ip.tail;
30 | };
31 | x = {
32 | type = "CNAME";
33 | value = "home.bytemonkey.org.";
34 | };
35 | };
36 |
37 | # Services that requested a CNAME.
38 | internalServices = filterAttrs (
39 | n: svc:
40 | attrByPath [
41 | "dns"
42 | "intCname"
43 | ] false svc
44 | ) catalog.services;
45 |
46 | # Services to expose outside of our tailnet.
47 | externalServices = filterAttrs (
48 | n: svc:
49 | attrByPath [
50 | "dns"
51 | "extCname"
52 | ] false svc
53 | ) catalog.services;
54 |
55 | mkInternalServiceRecord = proxy: name: svc: {
56 | type = "CNAME";
57 | value = proxy;
58 | };
59 |
60 | mkExternalServiceRecord = proxy: name: svc: {
61 | name = "${name}.x";
62 | value = {
63 | type = "CNAME";
64 | value = proxy;
65 | };
66 | };
67 | in
68 | bytemonkeyRecords
69 | // (mapAttrs (mkInternalServiceRecord intProxy) internalServices)
70 | // (mapAttrs' (mkExternalServiceRecord extProxy) externalServices)
71 |
--------------------------------------------------------------------------------
/nixos/confgen/octodns/bytemonkey-int.nix:
--------------------------------------------------------------------------------
1 | { pkgs, catalog, ... }:
2 | target:
3 | let
4 | inherit (pkgs.lib)
5 | filterAttrs
6 | attrByPath
7 | mapAttrs
8 | mapAttrs'
9 | ;
10 |
11 | # Reverse proxy host for internal services.
12 | intProxy = "web.home.arpa.";
13 |
14 | bytemonkeyRecords = {
15 | "" = {
16 | type = "NS";
17 | values = [
18 | "ns1.bytemonkey.org."
19 | "ns2.bytemonkey.org."
20 | "ns3.bytemonkey.org."
21 | ];
22 | };
23 |
24 | ns1 = {
25 | type = "A";
26 | value = catalog.dns.ns1;
27 | };
28 | ns2 = {
29 | type = "A";
30 | value = catalog.dns.ns2;
31 | };
32 | ns3 = {
33 | type = "A";
34 | value = catalog.dns.ns3;
35 | };
36 |
37 | skynas = {
38 | type = "A";
39 | value = "100.126.1.1";
40 | };
41 | x = {
42 | type = "CNAME";
43 | value = intProxy;
44 | };
45 | };
46 |
47 | # Services that requested a CNAME.
48 | internalServices = filterAttrs (
49 | n: svc:
50 | attrByPath [
51 | "dns"
52 | "intCname"
53 | ] false svc
54 | ) catalog.services;
55 |
56 | # Services to expose outside of our tailnet.
57 | externalServices = filterAttrs (
58 | n: svc:
59 | attrByPath [
60 | "dns"
61 | "extCname"
62 | ] false svc
63 | ) catalog.services;
64 |
65 | mkInternalServiceRecord = proxy: name: svc: {
66 | type = "CNAME";
67 | value = proxy;
68 | };
69 |
70 | mkExternalServiceRecord = proxy: name: svc: {
71 | name = "${name}.x";
72 | value = {
73 | type = "CNAME";
74 | value = proxy;
75 | };
76 | };
77 | in
78 | bytemonkeyRecords
79 | // (mapAttrs (mkInternalServiceRecord intProxy) internalServices)
80 | // (mapAttrs' (mkExternalServiceRecord intProxy) externalServices)
81 |
--------------------------------------------------------------------------------
/nixos/confgen/octodns/default.nix:
--------------------------------------------------------------------------------
1 | { catalog, ... }@inputs:
2 | let
3 | # Nameserver to push records to.
4 | target = catalog.dns.ns1;
5 | in
6 | {
7 | # Internal octodns config.
8 | "octodns/internal-config.yaml" = {
9 | manager = {
10 | max_workers = 1;
11 | enable_checksum = true;
12 | processors = [ "meta" ];
13 | };
14 |
15 | providers = {
16 | zones = {
17 | class = "octodns.provider.yaml.YamlProvider";
18 | directory = "./internal-zones";
19 | default_ttl = 600;
20 | enforce_order = true;
21 | };
22 |
23 | nexus_bind = {
24 | class = "octodns_bind.Rfc2136Provider";
25 | host = target;
26 | key_name = "env/BIND_KEY_NAME";
27 | key_secret = "env/BIND_KEY_SECRET";
28 | };
29 | };
30 |
31 | zones = {
32 | "*" = {
33 | sources = [ "zones" ];
34 | targets = [ "nexus_bind" ];
35 | };
36 | };
37 |
38 | processors = {
39 | meta = {
40 | class = "octodns.processor.meta.MetaProcessor";
41 | record_name = "octodns-meta";
42 | include_provider = true;
43 | };
44 | };
45 | };
46 |
47 | # Cloudflare octodns config.
48 | "octodns/cloudflare-config.yaml" = {
49 | manager = {
50 | max_workers = 1;
51 | enable_checksum = true;
52 | processors = [
53 | "meta"
54 | "preserve-names"
55 | ];
56 | };
57 |
58 | providers = {
59 | zones = {
60 | class = "octodns.provider.yaml.YamlProvider";
61 | directory = "./external-zones";
62 | default_ttl = 600;
63 | enforce_order = true;
64 | };
65 |
66 | cloudflare = {
67 | class = "octodns_cloudflare.CloudflareProvider";
68 | token = "env/CLOUDFLARE_TOKEN";
69 | };
70 | };
71 |
72 | zones = {
73 | "*" = {
74 | sources = [ "zones" ];
75 | targets = [ "cloudflare" ];
76 | };
77 | };
78 |
79 | processors = {
80 | meta = {
81 | class = "octodns.processor.meta.MetaProcessor";
82 | record_name = "octodns-meta";
83 | include_provider = true;
84 | };
85 |
86 | preserve-names = {
87 | class = "octodns.processor.filter.NameRejectlistFilter";
88 | rejectlist = [ "home" ];
89 | };
90 | };
91 | };
92 |
93 | "octodns/internal-zones/bytemonkey.org.yaml" = import ./bytemonkey-int.nix inputs target;
94 | "octodns/internal-zones/home.arpa.yaml" = import ./home.nix inputs target;
95 |
96 | "octodns/external-zones/bytemonkey.org.yaml" = import ./bytemonkey-ext.nix inputs target;
97 | }
98 |
--------------------------------------------------------------------------------
/nixos/confgen/octodns/home.nix:
--------------------------------------------------------------------------------
1 | { pkgs, catalog, ... }:
2 | target:
3 | let
4 | inherit (pkgs.lib) filterAttrs mapAttrs;
5 |
6 | homeRecords = {
7 | "" = {
8 | type = "NS";
9 | values = [
10 | "ns1.home.arpa."
11 | "ns2.home.arpa."
12 | "ns3.home.arpa."
13 | ];
14 | };
15 |
16 | ns1 = {
17 | type = "A";
18 | value = catalog.dns.ns1;
19 | };
20 | ns2 = {
21 | type = "A";
22 | value = catalog.dns.ns2;
23 | };
24 | ns3 = {
25 | type = "A";
26 | value = catalog.dns.ns3;
27 | };
28 | cluster = {
29 | type = "NS";
30 | value = "gateway.home.arpa.";
31 | };
32 | dyn = {
33 | type = "NS";
34 | value = "gateway.home.arpa.";
35 | };
36 |
37 | mail = {
38 | type = "CNAME";
39 | value = "web.home.arpa.";
40 | };
41 | mqtt = {
42 | type = "CNAME";
43 | value = "metrics.home.arpa.";
44 | };
45 | ntp = {
46 | type = "CNAME";
47 | value = "skynas.home.arpa.";
48 | };
49 |
50 | # LAN network.
51 | gateway = {
52 | type = "A";
53 | value = "192.168.1.1";
54 | };
55 | printer = {
56 | type = "A";
57 | value = "192.168.1.5";
58 | };
59 | skynas = {
60 | type = "A";
61 | value = "192.168.1.20";
62 | };
63 | octopi = {
64 | type = "A";
65 | value = "192.168.1.21";
66 | };
67 |
68 | modem = {
69 | type = "A";
70 | value = "192.168.100.1";
71 | };
72 |
73 | # IoT network.
74 | msdde3 = {
75 | type = "A";
76 | value = "192.168.10.23";
77 | };
78 |
79 | # Cluster network.
80 | pve1 = {
81 | type = "A";
82 | value = "192.168.128.10";
83 | };
84 | pve2 = {
85 | type = "A";
86 | value = "192.168.128.12";
87 | };
88 | pve3 = {
89 | type = "A";
90 | value = "192.168.128.13";
91 | };
92 | "*.k" = {
93 | type = "CNAME";
94 | value = "kube1.home.arpa.";
95 | };
96 | };
97 |
98 | ipPrivNodes = filterAttrs (n: v: v ? ip.priv) catalog.nodes;
99 |
100 | mkNodeRecord = name: node: {
101 | type = "A";
102 | value = node.ip.priv;
103 | };
104 |
105 | nodeRecords = mapAttrs mkNodeRecord ipPrivNodes;
106 | in
107 | homeRecords // nodeRecords
108 |
--------------------------------------------------------------------------------
/nixos/deploy:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env fish
2 | # deploy [host-name] [root@]:
3 | # Builds the flake for host-name locally and deploys to target-host
4 |
5 | set domain home.arpa
6 | set logfile log.txt
7 |
8 | if test -n "$argv[1]"
9 | set host $argv[1]
10 | end
11 | if test -n "$argv[2]"
12 | set target $argv[2]
13 | end
14 |
15 | if test -z "$host"
16 | echo "host env or argument required" >&2
17 | exit 1
18 | end
19 |
20 | if test -z "$target"
21 | set target "root@$host.$domain"
22 | end
23 |
24 | # Build flake locally, push to $host.
25 | echo "Building $host and pushing to $target..."
26 | nixos-rebuild \
27 | --flake ".#$host" \
28 | --target-host $target \
29 | --build-host localhost switch
30 | or exit $status
31 |
32 | # Log most recent deploy for host w/ git commit.
33 | test -e $logfile; or touch $logfile
34 | set commit (git log -n 1 --format="[%ci] %h %s")
35 | set tmpfile (mktemp)
36 | cp $logfile $tmpfile
37 | grep -v "^$host " $tmpfile > $logfile
38 | printf "%-12s %s\n" $host $commit >> $logfile
39 | rm -f $tmpfile
40 |
41 | echo
42 | echo "Deploy of $host --> $target complete"
43 |
--------------------------------------------------------------------------------
/nixos/dns-sync:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env fish
2 |
3 | nix build ".#confgen"; or exit
4 |
5 | cd result/octodns; or exit
6 |
7 | octodns-sync --config-file=./internal-config.yaml $argv
8 | octodns-sync --config-file=./cloudflare-config.yaml $argv
9 |
--------------------------------------------------------------------------------
/nixos/flake.nix:
--------------------------------------------------------------------------------
1 | {
2 | description = "Home Services";
3 |
4 | inputs = {
5 | nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11";
6 | nixpkgs-unstable.url = "github:nixos/nixpkgs/nixpkgs-unstable";
7 | flake-utils.url = "github:numtide/flake-utils";
8 |
9 | nixd-flake.url = "github:nix-community/nixd/2.5.1";
10 | nixd-flake.inputs = {
11 | nixpkgs.follows = "nixpkgs-unstable";
12 | };
13 |
14 | agenix.url = "github:ryantm/agenix/0.15.0";
15 | agenix.inputs = {
16 | nixpkgs.follows = "nixpkgs";
17 | };
18 |
19 | agenix-template.url = "github:jhillyerd/agenix-template/main";
20 |
21 | homesite.url = "github:jhillyerd/homesite/main";
22 | homesite.inputs = {
23 | flake-utils.follows = "flake-utils";
24 | nixpkgs.follows = "nixpkgs";
25 | };
26 |
27 | hw-gauge.url = "github:jhillyerd/hw-gauge";
28 | hw-gauge.inputs = {
29 | flake-utils.follows = "flake-utils";
30 | nixpkgs.follows = "nixpkgs";
31 | };
32 | };
33 |
34 | outputs =
35 | { nixpkgs, flake-utils, ... }@inputs:
36 | let
37 | inherit (nixpkgs.lib) mapAttrs;
38 | inherit (flake-utils.lib) eachSystemMap system;
39 |
40 | # catalog.nodes defines the systems available in this flake.
41 | catalog = import ./catalog { inherit system; };
42 | in
43 | rec {
44 | # Convert catalog.nodes into a set of NixOS configs.
45 | nixosConfigurations = import ./nix/nixos-configurations.nix inputs catalog;
46 |
47 | # Generate an SD card image for each node in the catalog.
48 | images = mapAttrs (
49 | host: node: nixosConfigurations.${host}.config.system.build.sdImage
50 | ) catalog.nodes;
51 |
52 | # Configuration generators.
53 | packages =
54 | let
55 | confgen = import ./confgen inputs catalog;
56 | in
57 | eachSystemMap [ system.x86_64-linux ] (system: {
58 | confgen = confgen system;
59 | });
60 | };
61 | }
62 |
--------------------------------------------------------------------------------
/nixos/hosts/carbon.nix:
--------------------------------------------------------------------------------
1 | { ... }:
2 | {
3 | imports = [ ../common.nix ];
4 |
5 | roles.gui-xorg.enable = true;
6 | roles.workstation.enable = true;
7 |
8 | networking.networkmanager.enable = true;
9 | networking.firewall.enable = false;
10 |
11 | services.resolved.enable = true;
12 |
13 | # Backlight controls.
14 | programs.light.enable = true;
15 | services.actkbd = {
16 | enable = true;
17 | bindings = [
18 | {
19 | keys = [ 224 ];
20 | events = [ "key" ];
21 | command = "/run/current-system/sw/bin/light -U 10";
22 | }
23 | {
24 | keys = [ 225 ];
25 | events = [ "key" ];
26 | command = "/run/current-system/sw/bin/light -A 10";
27 | }
28 | ];
29 | };
30 |
31 | virtualisation.libvirtd.enable = true;
32 | }
33 |
--------------------------------------------------------------------------------
/nixos/hosts/ci-runner.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | pkgs-unstable,
4 | self,
5 | util,
6 | ...
7 | }:
8 | {
9 | imports = [
10 | ../common.nix
11 | ../common/onprem.nix
12 | ];
13 |
14 | systemd.network.networks = util.mkClusterNetworks self;
15 |
16 | networking.firewall.enable = true;
17 |
18 | services.gitea-actions-runner.package = pkgs-unstable.forgejo-actions-runner;
19 | services.gitea-actions-runner.instances.skynet = {
20 | enable = true;
21 |
22 | name = config.networking.hostName;
23 | labels = [
24 | "nixos_amd64:host"
25 | "ubuntu-latest:docker://node:20-bookworm"
26 | "ubuntu-22.04:docker://node:20-bullseye"
27 | ];
28 |
29 | url = "https://forgejo.bytemonkey.org";
30 | tokenFile = config.age.secrets.gitea-runner-token.path;
31 |
32 | settings = {
33 | # The level of logging, can be trace, debug, info, warn, error, fatal
34 | log.level = "info";
35 |
36 | cache = {
37 | enabled = true;
38 | dir = "/var/cache/forgejo-runner/actions";
39 | };
40 |
41 | runner = {
42 | # Execute how many tasks concurrently at the same time.
43 | capacity = 2;
44 | # Extra environment variables to run jobs.
45 | envs = { };
46 | # The timeout for a job to be finished.
47 | # Please note that the Gitea instance also has a timeout (3h by default) for the job.
48 | # So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
49 | timeout = "3h";
50 | };
51 |
52 | container = {
53 | # Specifies the network to which the container will connect.
54 | # Could be host, bridge or the name of a custom network.
55 | # If it's empty, act_runner will create a network automatically.
56 | network = "";
57 |
58 | # Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
59 | privileged = false;
60 |
61 | # overrides the docker client host with the specified one.
62 | # If "-" or "", an available docker host will automatically be found.
63 | # If "automount", an available docker host will automatically be found and mounted in the job container (e.g.
64 | # /var/run/docker.sock).
65 | # Otherwise the specified docker host will be used and an error will be returned if it doesn't work.
66 | docker_host = "automount";
67 | };
68 |
69 | # systemd will namespace /var/tmp paths.
70 | host.workdir_parent = "/var/tmp/actwork";
71 | };
72 | };
73 |
74 | systemd.services.gitea-runner-skynet = {
75 | serviceConfig = {
76 | # Used by for action cache.
77 | CacheDirectory = "forgejo-runner";
78 | };
79 | };
80 |
81 | virtualisation.docker = {
82 | enable = true;
83 | # TODO: autoPrune settings.
84 | };
85 |
86 | age.secrets = {
87 | gitea-runner-token.file = ../secrets/gitea-runner-token.age;
88 | };
89 |
90 | # Allow container runners to access cache service.
91 | networking.firewall.trustedInterfaces = [ "br-+" ];
92 |
93 | roles.upsmon = {
94 | enable = true;
95 | wave = 1;
96 | };
97 | }
98 |
--------------------------------------------------------------------------------
/nixos/hosts/eph.nix:
--------------------------------------------------------------------------------
1 | {
2 | environment,
3 | self,
4 | util,
5 | ...
6 | }:
7 | {
8 | imports = [
9 | ../common.nix
10 | ../common/onprem.nix
11 | ];
12 |
13 | roles.tailscale.enable = true;
14 |
15 | roles.workstation.enable = true;
16 |
17 | networking.firewall.enable = true;
18 |
19 | systemd.network.networks = util.mkClusterNetworks self;
20 |
21 | # Do not enable libvirtd inside of test VM, the inner virtual bridge
22 | # routing to the outer virtual network, due to using the same IP range.
23 | virtualisation.libvirtd.enable = environment == "prod";
24 |
25 | roles.upsmon = {
26 | enable = true;
27 | wave = 1;
28 | };
29 | }
30 |
--------------------------------------------------------------------------------
/nixos/hosts/fastd.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | pkgs,
4 | self,
5 | util,
6 | ...
7 | }:
8 | {
9 | imports = [
10 | ../common.nix
11 | ../common/onprem.nix
12 | ];
13 |
14 | boot.supportedFilesystems = [ "zfs" ];
15 |
16 | # Listed extra pools must be available during boot.
17 | boot.zfs.extraPools = [ "fast1" ];
18 |
19 | services.postgresql = {
20 | package = pkgs.postgresql_16;
21 |
22 | enable = true;
23 | enableTCPIP = true;
24 | dataDir = "/fast1/database/postgresql/${config.services.postgresql.package.psqlSchema}";
25 |
26 | authentication = ''
27 | host all all all scram-sha-256
28 | '';
29 |
30 | ensureDatabases = [
31 | "root"
32 | "forgejo"
33 | ];
34 |
35 | ensureUsers = [
36 | {
37 | name = "root";
38 | ensureDBOwnership = true;
39 | ensureClauses.superuser = true;
40 | ensureClauses.login = true;
41 | }
42 | {
43 | name = "forgejo";
44 | ensureDBOwnership = true;
45 | ensureClauses."inherit" = true;
46 | ensureClauses.login = true;
47 | }
48 | ];
49 | };
50 |
51 | services.openiscsi = {
52 | enable = true;
53 | enableAutoLoginOut = true;
54 | name = "iqn.1999-11.org.bytemonkey:fastd";
55 | };
56 |
57 | # Import iSCSI ZFS pools.
58 | systemd.services.zfs-import-backup1 =
59 | let
60 | zpoolcmd = "/run/current-system/sw/bin/zpool";
61 | pool = "backup1";
62 |
63 | script.start = ''
64 | if ! ${zpoolcmd} list ${pool} >/dev/null 2>&1; then
65 | ${zpoolcmd} import ${pool}
66 | fi
67 | '';
68 |
69 | script.stop = ''
70 | if ${zpoolcmd} list ${pool} >/dev/null 2>&1; then
71 | ${zpoolcmd} export ${pool}
72 | fi
73 | '';
74 | in
75 | {
76 | # Give iSCSI time to login to NAS.
77 | preStart = "/run/current-system/sw/bin/sleep 5";
78 |
79 | script = script.start;
80 |
81 | wantedBy = [ "multi-user.target" ];
82 | requires = [ "iscsi.service" ];
83 | after = [ "iscsi.service" ];
84 | serviceConfig = {
85 | Type = "oneshot";
86 | RemainAfterExit = "yes";
87 | ExecStop = "${pkgs.writeShellScript "stop-zfs-backup1" script.stop}";
88 | };
89 | };
90 |
91 | services.syncoid = {
92 | enable = true;
93 | interval = "minutely";
94 |
95 | localSourceAllow = [
96 | "bookmark"
97 | "hold"
98 | "mount" # added
99 | "send"
100 | "snapshot"
101 | "destroy"
102 | ];
103 |
104 | localTargetAllow = [
105 | "change-key"
106 | "compression"
107 | "create"
108 | "destroy" # added
109 | "mount"
110 | "mountpoint"
111 | "receive"
112 | "rollback"
113 | ];
114 |
115 | # Volumes to backup.
116 | commands."fast1/database".target = "backup1/database";
117 |
118 | service.after = [ "zfs-import-backup1.service" ];
119 | };
120 |
121 | # Collect snapshot stats.
122 | roles.telegraf.zfs = true;
123 |
124 | systemd.network.networks = util.mkClusterNetworks self;
125 |
126 | networking.firewall.enable = true;
127 | networking.firewall.allowedTCPPorts = [ config.services.postgresql.settings.port ];
128 |
129 | roles.upsmon = {
130 | enable = true;
131 | wave = 2;
132 | };
133 | }
134 |
--------------------------------------------------------------------------------
/nixos/hosts/game.nix:
--------------------------------------------------------------------------------
1 | {
2 | self,
3 | util,
4 | ...
5 | }:
6 | {
7 | imports = [
8 | ../common.nix
9 | ../common/onprem.nix
10 | ];
11 |
12 | systemd.network.networks = util.mkClusterNetworks self;
13 | roles.gateway-online.addr = "192.168.1.1";
14 |
15 | virtualisation.oci-containers = {
16 | backend = "docker"; # https://github.com/mornedhels/enshrouded-server/issues/103
17 |
18 | containers = {
19 | enshrouded = {
20 | image = "mornedhels/enshrouded-server:latest";
21 | hostname = "enshrouded";
22 | ports = [
23 | "15637:15637/udp" # Enshrouded
24 | ];
25 | volumes = [ "/data/enshrouded:/opt/enshrouded" ];
26 | environment = {
27 | SERVER_NAME = "Enshrouded by Cuteness_v3_FINAL";
28 | SERVER_ENABLE_TEXT_CHAT = "true";
29 | UPDATE_CRON = "37 * * * *";
30 | UPDATE_CHECK_PLAYERS = "true";
31 | BACKUP_CRON = "*/30 * * * *";
32 | BACKUP_MAX_COUNT = "48";
33 | };
34 | };
35 | };
36 | };
37 |
38 | networking.firewall.enable = true;
39 |
40 | roles.upsmon = {
41 | enable = true;
42 | wave = 1;
43 | };
44 | }
45 |
--------------------------------------------------------------------------------
/nixos/hosts/k3s.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | lib,
4 | catalog,
5 | self,
6 | util,
7 | ...
8 | }:
9 | {
10 | imports = [
11 | ../common.nix
12 | ../common/onprem.nix
13 | ];
14 |
15 | systemd.network.networks = util.mkClusterNetworks self;
16 | roles.gateway-online.addr = "192.168.1.1";
17 |
18 | # NFS mount support
19 | boot.supportedFilesystems = [ "nfs" ];
20 | services.rpcbind.enable = true;
21 |
22 | services.k3s =
23 | let
24 | leader = catalog.k3s.leader;
25 |
26 | isLeader = leader == self;
27 | in
28 | {
29 | enable = true;
30 |
31 | # Enables embedded etcd on leader node.
32 | clusterInit = isLeader;
33 | serverAddr = lib.mkIf (!isLeader) "https://${leader.ip.priv}:6443";
34 | extraFlags = "--egress-selector-mode pod";
35 |
36 | tokenFile = config.age.secrets.k3s-token.path;
37 | };
38 |
39 | networking.firewall.enable = false;
40 |
41 | roles.upsmon = {
42 | enable = true;
43 | wave = 1;
44 | };
45 |
46 | age.secrets = {
47 | k3s-token.file = ../secrets/k3s-token.age;
48 | };
49 | }
50 |
--------------------------------------------------------------------------------
/nixos/hosts/metrics.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | catalog,
4 | self,
5 | util,
6 | ...
7 | }:
8 | {
9 | imports = [
10 | ../common.nix
11 | ../common/onprem.nix
12 | ];
13 |
14 | fileSystems."/var" = {
15 | device = "/dev/disk/by-label/var";
16 | fsType = "ext4";
17 | };
18 |
19 | systemd.network.networks = util.mkClusterNetworks self;
20 |
21 | # Telegraf service status goes through tailnet.
22 | roles.tailscale.enable = true;
23 |
24 | roles.influxdb = {
25 | enable = true;
26 | adminUser = "admin";
27 | adminPasswordFile = config.age.secrets.influxdb-admin.path;
28 |
29 | databases = {
30 | homeassistant = {
31 | user = "homeassistant";
32 | passwordFile = config.age.secrets.influxdb-homeassistant.path;
33 | };
34 |
35 | telegraf-hosts = {
36 | user = "telegraf";
37 | passwordFile = config.age.secrets.influxdb-telegraf.path;
38 | retention = "26w";
39 | };
40 | };
41 | };
42 |
43 | roles.loki.enable = true;
44 |
45 | roles.mosquitto = {
46 | enable = true;
47 |
48 | users = {
49 | admin = {
50 | passwordFile = config.age.secrets.mqtt-admin.path;
51 | acl = [
52 | "readwrite $SYS/#"
53 | "readwrite #"
54 | ];
55 | };
56 | clock = {
57 | passwordFile = config.age.secrets.mqtt-clock.path;
58 | acl = [ "readwrite clock/#" ];
59 | };
60 | sensor = {
61 | passwordFile = config.age.secrets.mqtt-sensor.path;
62 | acl = [ ];
63 | };
64 | zwave = {
65 | passwordFile = config.age.secrets.mqtt-zwave.path;
66 | acl = [ "readwrite zwave/#" ];
67 | };
68 | };
69 | };
70 |
71 | roles.log-forwarder = {
72 | # Forward remote syslogs as well.
73 | enableTcpListener = true;
74 | };
75 |
76 | roles.gateway-online.addr = "192.168.1.1";
77 |
78 | roles.telegraf = {
79 | inherit (catalog.monitors) http_response ping x509_certs;
80 | };
81 |
82 | age.secrets = {
83 | influxdb-admin.file = ../secrets/influxdb-admin.age;
84 | influxdb-homeassistant.file = ../secrets/influxdb-homeassistant.age;
85 |
86 | mqtt-admin.file = ../secrets/mqtt-admin.age;
87 | mqtt-admin.owner = "mosquitto";
88 |
89 | mqtt-clock.file = ../secrets/mqtt-clock.age;
90 | mqtt-clock.owner = "mosquitto";
91 |
92 | mqtt-sensor.file = ../secrets/mqtt-sensor.age;
93 | mqtt-sensor.owner = "mosquitto";
94 |
95 | mqtt-zwave.file = ../secrets/mqtt-zwave.age;
96 | mqtt-zwave.owner = "mosquitto";
97 | };
98 |
99 | networking.firewall.enable = true;
100 |
101 | roles.upsmon = {
102 | enable = true;
103 | wave = 2;
104 | };
105 | }
106 |
--------------------------------------------------------------------------------
/nixos/hosts/nc-pi3.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | catalog,
5 | ...
6 | }:
7 | {
8 | imports = [
9 | ../common.nix
10 | ../common/onprem.nix
11 | ];
12 |
13 | roles.cluster-volumes.enable = true;
14 |
15 | roles.consul = {
16 | retryJoin = catalog.consul.servers;
17 |
18 | client = {
19 | enable = true;
20 | connect = true;
21 | };
22 | };
23 |
24 | roles.nomad = {
25 | enableClient = true;
26 |
27 | retryJoin = catalog.nomad.servers;
28 |
29 | hostVolumes =
30 | lib.genAttrs catalog.nomad.skynas-host-volumes (name: {
31 | path = "/mnt/skynas/${name}";
32 | readOnly = false;
33 | })
34 | // {
35 | "docker-sock-ro" = {
36 | path = "/var/run/docker.sock";
37 | readOnly = true;
38 | };
39 | };
40 |
41 | # USB plugin doesn't seem to work.
42 | # usb = {
43 | # enable = true;
44 | # includedVendorIds = [
45 | # 1624 # 0x0658, Aeotec
46 | # ];
47 | # };
48 |
49 | client = {
50 | meta = {
51 | # Envoy built with gperftools
52 | # https://github.com/envoyproxy/envoy/issues/23339#issuecomment-1290509732
53 | "connect.sidecar_image" = "thegrandpkizzle/envoy:1.24.0";
54 | };
55 | };
56 | };
57 |
58 | roles.gateway-online.addr = "192.168.1.1";
59 |
60 | networking.firewall.enable = false;
61 |
62 | networking.wireless = {
63 | enable = true;
64 | environmentFile = config.age.secrets.wifi-env.path;
65 | networks.SKYNET.psk = "@SKYNET_PSK@";
66 | };
67 |
68 | roles.upsmon = {
69 | enable = true;
70 | wave = 1;
71 | };
72 | }
73 |
--------------------------------------------------------------------------------
/nixos/hosts/nc-um350.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | catalog,
4 | self,
5 | ...
6 | }:
7 | {
8 | imports = [
9 | ../common.nix
10 | ../common/onprem.nix
11 | ];
12 |
13 | roles.dns.bind.enable = true;
14 | roles.dns.bind.serveLocalZones = false;
15 |
16 | roles.cluster-volumes.enable = true;
17 |
18 | roles.consul = {
19 | enableServer = true;
20 | retryJoin = catalog.consul.servers;
21 |
22 | client = {
23 | enable = true;
24 | connect = true;
25 | };
26 | };
27 |
28 | roles.nomad = {
29 | enableClient = true;
30 | enableServer = true;
31 | allocDir = "/data/nomad-alloc";
32 |
33 | retryJoin = catalog.nomad.servers;
34 |
35 | hostVolumes =
36 | lib.genAttrs catalog.nomad.skynas-host-volumes (name: {
37 | path = "/mnt/skynas/${name}";
38 | readOnly = false;
39 | })
40 | // {
41 | "docker-sock-ro" = {
42 | path = "/var/run/docker.sock";
43 | readOnly = true;
44 | };
45 | };
46 |
47 | # Use node catalog meta tags if defined.
48 | client.meta = lib.mkIf (self ? nomad.meta) self.nomad.meta;
49 | };
50 |
51 | roles.telegraf.nomad = true;
52 |
53 | roles.gateway-online.addr = "192.168.1.1";
54 |
55 | virtualisation.docker.extraOptions = "--data-root /data/docker";
56 |
57 | networking.firewall.enable = false;
58 |
59 | roles.upsmon = {
60 | enable = true;
61 | wave = 1;
62 | };
63 | }
64 |
--------------------------------------------------------------------------------
/nixos/hosts/nexus.nix:
--------------------------------------------------------------------------------
1 | { self, util, ... }:
2 | {
3 | imports = [
4 | ../common.nix
5 | ../common/onprem.nix
6 | ];
7 |
8 | systemd.network.networks = util.mkClusterNetworks self;
9 | roles.gateway-online.addr = "192.168.1.1";
10 |
11 | roles.dns.bind.enable = true;
12 | roles.dns.bind.serveLocalZones = true;
13 |
14 | roles.tailscale.enable = true;
15 |
16 | networking.firewall.enable = false;
17 |
18 | roles.upsmon = {
19 | enable = true;
20 | wave = 3;
21 | };
22 | }
23 |
--------------------------------------------------------------------------------
/nixos/hosts/ryzen.nix:
--------------------------------------------------------------------------------
1 | { environment, ... }:
2 | {
3 | imports = [
4 | ../common.nix
5 | ../common/onprem.nix
6 | ];
7 |
8 | roles.tailscale.enable = true;
9 |
10 | roles.gui-wayland.enable = true;
11 | roles.workstation.enable = true;
12 |
13 | # For Raspberry Pi builds.
14 | boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
15 |
16 | networking.firewall.enable = false;
17 |
18 | # Do not enable libvirtd inside of test VM, the inner virtual bridge
19 | # routing to the outer virtual network, due to using the same IP range.
20 | virtualisation.libvirtd.enable = environment == "prod";
21 | virtualisation.docker.extraOptions = "--data-root /data/docker";
22 |
23 | # For Windows dual-boot.
24 | time.hardwareClockInLocalTime = true;
25 | }
26 |
--------------------------------------------------------------------------------
/nixos/hosts/scratch.nix:
--------------------------------------------------------------------------------
1 | # A scratch host for building up new service configurations.
2 | {
3 | self,
4 | util,
5 | ...
6 | }:
7 | {
8 | imports = [
9 | ../common.nix
10 | ../common/onprem.nix
11 | ];
12 |
13 | systemd.network.networks = util.mkClusterNetworks self;
14 |
15 | networking.firewall.enable = false;
16 |
17 | ### Temporary configuration below.
18 | }
19 |
--------------------------------------------------------------------------------
/nixos/hosts/web.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | lib,
4 | environment,
5 | catalog,
6 | self,
7 | util,
8 | ...
9 | }:
10 | {
11 | imports = [
12 | ../common.nix
13 | ../common/onprem.nix
14 | ];
15 |
16 | systemd.network.networks = util.mkClusterNetworks self;
17 | roles.gateway-online.addr = "192.168.1.1";
18 |
19 | # Web services accessed via tailnet.
20 | roles.tailscale = {
21 | enable = true;
22 | exitNode = true;
23 | };
24 |
25 | roles.nfs-bind = {
26 | nfsPath = "192.168.1.20:/volume1/web_${environment}";
27 |
28 | binds = {
29 | "authelia" = {
30 | user = "0";
31 | group = "0";
32 | mode = "0770";
33 | };
34 | };
35 |
36 | before = [ "podman-authelia.service" ];
37 | };
38 |
39 | # Configures traefik and homesite roles from service catalog.
40 | roles.websvc = {
41 | enable = true;
42 |
43 | internalDomain = "bytemonkey.org";
44 | externalDomain = "x.bytemonkey.org";
45 |
46 | cloudflareDnsApiTokenFile = config.age.secrets.cloudflare-dns-api.path;
47 |
48 | services = catalog.services;
49 | layout = catalog.layout;
50 | };
51 |
52 | virtualisation.oci-containers.containers = {
53 | authelia = {
54 | image = "authelia/authelia:4.37.5";
55 | ports = [ "${toString catalog.authelia.port}:9091" ];
56 | volumes = [ "/data/authelia:/config" ];
57 | };
58 | };
59 |
60 | services.cfdyndns = {
61 | enable = true;
62 | records = [ "home.bytemonkey.org" ];
63 |
64 | email = catalog.cf-api.user;
65 | apiTokenFile = config.age.secrets.cloudflare-dns-api.path;
66 | };
67 |
68 | systemd.services.cfdyndns = {
69 | startAt = lib.mkForce "*:07:00";
70 |
71 | after = [ "network-online.target" ];
72 | wants = [ "network-online.target" ];
73 | };
74 |
75 | age.secrets = {
76 | cloudflare-dns-api.file = ../secrets/cloudflare-dns-api.age;
77 | };
78 |
79 | roles.upsmon = {
80 | enable = true;
81 | wave = 1;
82 | };
83 | }
84 |
--------------------------------------------------------------------------------
/nixos/hosts/witness.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | catalog,
4 | self,
5 | util,
6 | ...
7 | }:
8 | {
9 | imports = [
10 | ../common.nix
11 | ../common/onprem.nix
12 | ];
13 |
14 | systemd.network.networks = util.mkClusterNetworks self;
15 | roles.gateway-online.addr = "192.168.1.1";
16 |
17 | roles.consul = {
18 | enableServer = true;
19 | retryJoin = catalog.consul.servers;
20 | };
21 |
22 | roles.nomad = {
23 | enableServer = true;
24 | retryJoin = catalog.nomad.servers;
25 | };
26 |
27 | services.k3s = {
28 | enable = true;
29 | disableAgent = true;
30 |
31 | serverAddr = "https://${catalog.k3s.leader.ip.priv}:6443";
32 | extraFlags = "--egress-selector-mode pod";
33 |
34 | tokenFile = config.age.secrets.k3s-token.path;
35 | };
36 |
37 | services.nginx = {
38 | enable = true;
39 |
40 | # Foward NUT (UPS) traffic to NAS.
41 | streamConfig = ''
42 | server {
43 | listen *:3493;
44 | proxy_pass skynas.home.arpa:3493;
45 | }
46 | '';
47 | };
48 |
49 | age.secrets = {
50 | k3s-token.file = ../secrets/k3s-token.age;
51 | };
52 |
53 | networking.firewall.enable = false;
54 |
55 | roles.upsmon = {
56 | enable = true;
57 | wave = 3;
58 | };
59 | }
60 |
--------------------------------------------------------------------------------
/nixos/hw/asus-x570p.nix:
--------------------------------------------------------------------------------
1 | { config, lib, ... }:
2 | {
3 | boot.initrd.availableKernelModules = [
4 | "nvme"
5 | "xhci_pci"
6 | "ahci"
7 | "usb_storage"
8 | "usbhid"
9 | "sd_mod"
10 | ];
11 | boot.initrd.kernelModules = [ ];
12 | boot.kernelModules = [ "kvm-amd" ];
13 | boot.extraModulePackages = [ ];
14 |
15 | boot.loader = {
16 | systemd-boot.enable = true;
17 | efi.canTouchEfiVariables = true;
18 | efi.efiSysMountPoint = "/boot/efi";
19 | timeout = 10;
20 | };
21 |
22 | fileSystems."/" = {
23 | device = "/dev/disk/by-uuid/c6fb5461-1de7-4764-b313-2de767ccb836";
24 | fsType = "ext4";
25 | };
26 |
27 | fileSystems."/boot/efi" = {
28 | device = "/dev/disk/by-uuid/5C34-C3D2";
29 | fsType = "vfat";
30 | };
31 |
32 | swapDevices = [ ];
33 |
34 | networking = {
35 | useDHCP = false;
36 | interfaces.enp6s0.useDHCP = true;
37 | };
38 |
39 | services.fstrim.enable = true;
40 | services.hw-gauge-daemon.enable = true;
41 |
42 | # nvidia graphics card setup.
43 | hardware.graphics.enable = true;
44 | hardware.nvidia = {
45 | open = true;
46 | powerManagement.enable = true;
47 | };
48 | services.xserver.videoDrivers = [ "nvidia" ];
49 |
50 | fonts.fontconfig = {
51 | antialias = true;
52 | subpixel.rgba = "rgb";
53 | };
54 |
55 | nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
56 | hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
57 | }
58 |
--------------------------------------------------------------------------------
/nixos/hw/minis-um350.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | lib,
4 | modulesPath,
5 | ...
6 | }:
7 | {
8 | imports = [ (modulesPath + "/installer/scan/not-detected.nix") ];
9 |
10 | boot.initrd.availableKernelModules = [
11 | "nvme"
12 | "xhci_pci"
13 | "ahci"
14 | "usb_storage"
15 | "sd_mod"
16 | ];
17 | boot.initrd.kernelModules = [ ];
18 | boot.kernelModules = [ "kvm-amd" ];
19 | boot.extraModulePackages = [ ];
20 |
21 | # Use the systemd-boot EFI boot loader.
22 | boot.loader.systemd-boot.enable = true;
23 | boot.loader.efi.canTouchEfiVariables = true;
24 |
25 | networking.useDHCP = false;
26 | networking.interfaces.enp3s0.useDHCP = true;
27 |
28 | fileSystems."/" = {
29 | device = "/dev/disk/by-label/nixos";
30 | fsType = "ext4";
31 | };
32 |
33 | fileSystems."/boot" = {
34 | device = "/dev/disk/by-label/boot";
35 | fsType = "vfat";
36 | };
37 |
38 | fileSystems."/data" = {
39 | device = "/dev/disk/by-label/data";
40 | fsType = "ext4";
41 | };
42 |
43 | swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
44 |
45 | hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
46 | }
47 |
--------------------------------------------------------------------------------
/nixos/hw/proxmox.nix:
--------------------------------------------------------------------------------
1 | # Proxmox VE Guest Hardware
2 | { lib, modulesPath, ... }:
3 | {
4 | boot.loader.grub.enable = true;
5 | boot.loader.grub.device = "/dev/sda";
6 |
7 | # Hardware configuration
8 | imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
9 |
10 | boot = {
11 | initrd.availableKernelModules = [ "uas" ];
12 | initrd.kernelModules = [ ];
13 |
14 | kernelModules = [ ];
15 | extraModulePackages = [ ];
16 |
17 | kernelParams = [ "console=ttyS0" ];
18 | };
19 |
20 | fileSystems."/" = {
21 | device = "/dev/disk/by-label/nixos";
22 | fsType = "ext4";
23 | };
24 |
25 | swapDevices = [ ];
26 |
27 | networking.useDHCP = false;
28 |
29 | systemd.network = {
30 | enable = true;
31 |
32 | networks."10-cluster" = {
33 | matchConfig.Name = "enp0s18";
34 | networkConfig = {
35 | DHCP = lib.mkDefault "ipv4";
36 | IPv6AcceptRA = lib.mkDefault "no";
37 | LinkLocalAddressing = lib.mkDefault "no";
38 | };
39 | };
40 | };
41 |
42 | nix.settings.max-jobs = lib.mkDefault 2;
43 |
44 | services.qemuGuest.enable = true;
45 | }
46 |
--------------------------------------------------------------------------------
/nixos/hw/qemu.nix:
--------------------------------------------------------------------------------
1 | # QEMU Guest Hardware
2 | { lib, modulesPath, ... }:
3 | {
4 | boot.loader.grub.enable = true;
5 | boot.loader.grub.device = "/dev/vda";
6 |
7 | # We don't always know the interface name on QEMU.
8 | networking.useDHCP = true;
9 |
10 | # Hardware configuration
11 | imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
12 |
13 | boot.initrd.availableKernelModules = [
14 | "ahci"
15 | "xhci_pci"
16 | "virtio_pci"
17 | "virtio_blk"
18 | ];
19 | boot.initrd.kernelModules = [ ];
20 | boot.kernelModules = [ ];
21 | boot.extraModulePackages = [ ];
22 |
23 | fileSystems."/" = {
24 | device = "/dev/disk/by-label/nixos";
25 | fsType = "ext4";
26 | };
27 |
28 | swapDevices = [ ];
29 |
30 | nix.settings.max-jobs = lib.mkDefault 2;
31 | }
32 |
--------------------------------------------------------------------------------
/nixos/hw/sd-image-pi3.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | lib,
4 | pkgs,
5 | modulesPath,
6 | ...
7 | }:
8 | {
9 | imports = [
10 | (modulesPath + "/profiles/minimal.nix")
11 | (modulesPath + "/installer/sd-card/sd-image-aarch64.nix")
12 | ];
13 |
14 | boot.kernelPackages = pkgs.linuxKernel.packages.linux_rpi3;
15 | boot.initrd.availableKernelModules = lib.mkOverride 0 [ ];
16 | boot.supportedFilesystems = lib.mkOverride 50 [
17 | "vfat"
18 | "f2fs"
19 | "ntfs"
20 | "cifs"
21 | "nfs"
22 | ];
23 |
24 | sdImage.compressImage = false;
25 | }
26 |
--------------------------------------------------------------------------------
/nixos/hw/tp-x1g3.nix:
--------------------------------------------------------------------------------
1 | { config, lib, ... }:
2 | {
3 | boot.initrd.availableKernelModules = [
4 | "xhci_pci"
5 | "nvme"
6 | "usb_storage"
7 | "sd_mod"
8 | "sdhci_pci"
9 | ];
10 | boot.initrd.kernelModules = [ ];
11 | boot.kernelModules = [ "kvm-intel" ];
12 | boot.extraModulePackages = [ ];
13 |
14 | boot.loader.systemd-boot.enable = true;
15 | boot.loader.efi.canTouchEfiVariables = true;
16 |
17 | fileSystems."/" = {
18 | device = "/dev/disk/by-uuid/6dc82f20-f212-4d00-a910-7b75934e7596";
19 | fsType = "ext4";
20 | };
21 |
22 | boot.initrd.luks.devices."luks-4923c6cb-e919-458b-bdb9-f972ddd162a6".device =
23 | "/dev/disk/by-uuid/4923c6cb-e919-458b-bdb9-f972ddd162a6";
24 |
25 | fileSystems."/boot" = {
26 | device = "/dev/disk/by-uuid/F480-2E4D";
27 | fsType = "vfat";
28 | options = [
29 | "fmask=0022"
30 | "dmask=0022"
31 | ];
32 | };
33 |
34 | swapDevices = [
35 | {
36 | device = "/dev/disk/by-partuuid/7adc1774-c80c-4244-8d2c-debceceb34b0";
37 | randomEncryption.enable = true;
38 | }
39 | ];
40 |
41 | networking.interfaces.wlp0s20f3.useDHCP = lib.mkDefault true;
42 |
43 | hardware.enableRedistributableFirmware = true;
44 | hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
45 |
46 | # nvidia graphics card setup.
47 | hardware.graphics.enable = true;
48 | hardware.nvidia = {
49 | open = true;
50 | modesetting.enable = true; # for udev events
51 | powerManagement.enable = true;
52 | };
53 | services.xserver.videoDrivers = [ "nvidia" ];
54 |
55 | hardware.trackpoint.device = "TPPS/2 Elan TrackPoint";
56 |
57 | services.fstrim.enable = true;
58 | services.power-profiles-daemon.enable = true;
59 | services.throttled.enable = true;
60 |
61 | services.libinput.touchpad = {
62 | accelSpeed = "0.3";
63 | clickMethod = "clickfinger";
64 | naturalScrolling = true;
65 | tapping = true;
66 | };
67 |
68 | environment.etc."libinput/local-overrides.quirks".text = ''
69 | [Touchpad pressure override]
70 | MatchUdevType=touchpad
71 | MatchName=Synaptics TM3625-010
72 | MatchDMIModalias=dmi:*svnLENOVO:*:pvrThinkPadX1ExtremeGen3*
73 | AttrPressureRange=10:8
74 | '';
75 |
76 | fonts.fontconfig = {
77 | antialias = true;
78 | subpixel.rgba = "rgb";
79 | };
80 |
81 | nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
82 | }
83 |
--------------------------------------------------------------------------------
/nixos/nix/nixos-configurations.nix:
--------------------------------------------------------------------------------
1 | # Builds nixosConfigurations flake output.
2 | {
3 | nixpkgs,
4 | nixpkgs-unstable,
5 | agenix,
6 | agenix-template,
7 | hw-gauge,
8 | ...
9 | }@inputs:
10 | catalog:
11 | let
12 | inherit (nixpkgs.lib) mapAttrs nixosSystem splitString;
13 |
14 | authorizedKeys = splitString "\n" (builtins.readFile ../../authorized_keys.txt);
15 |
16 | util = import ./util.nix { lib = nixpkgs.lib; };
17 |
18 | # Creates a nixosSystem attribute set for the specified node, allowing
19 | # the node config to be overridden.
20 | mkSystem =
21 | {
22 | hostName,
23 | node,
24 | hardware ? node.hw,
25 | modules ? [ ],
26 | environment ? "test",
27 | }:
28 | nixosSystem {
29 | system = node.system;
30 |
31 | # `specialArgs` allows access to catalog, environment, etc with
32 | # hosts and roles. `self` lets a host reference aspects of
33 | # itself.
34 | specialArgs = inputs // {
35 | inherit
36 | authorizedKeys
37 | catalog
38 | environment
39 | hostName
40 | util
41 | ;
42 | pkgs-unstable = nixpkgs-unstable.legacyPackages.${node.system};
43 | self = node;
44 | };
45 |
46 | modules = modules ++ [
47 | (nodeModule node)
48 | hardware
49 | node.config
50 | agenix.nixosModules.default
51 | agenix-template.nixosModules.default
52 | hw-gauge.nixosModules.default
53 | ];
54 | };
55 |
56 | # Common system config built from node entry.
57 | nodeModule =
58 | node:
59 | { hostName, ... }:
60 | {
61 | networking = {
62 | inherit hostName;
63 | domain = "home.arpa";
64 | hostId = node.hostId or null;
65 | };
66 | };
67 | in
68 | mapAttrs (
69 | hostName: node:
70 | mkSystem {
71 | inherit hostName node;
72 | environment = "prod";
73 | }
74 | ) catalog.nodes
75 |
--------------------------------------------------------------------------------
/nixos/nix/util.nix:
--------------------------------------------------------------------------------
1 | { lib, ... }:
2 | {
3 | # Populate systemd.network.networks given a catalog `self` entry.
4 | mkClusterNetworks = self: {
5 | # Hardware config defaults to DHCP, make static if ip.priv is set.
6 | "10-cluster" = lib.mkIf (self ? ip.priv) {
7 | networkConfig.DHCP = "no";
8 | address = [ (self.ip.priv + "/18") ];
9 | gateway = [ "192.168.128.1" ];
10 |
11 | dns = [
12 | "192.168.128.36"
13 | "192.168.128.37"
14 | "192.168.128.40"
15 | ];
16 | domains = [
17 | "home.arpa"
18 | "dyn.home.arpa"
19 | ];
20 | };
21 | };
22 | }
23 |
--------------------------------------------------------------------------------
/nixos/pkgs/README.md:
--------------------------------------------------------------------------------
1 | To build one of these packages, you should run nix-build in this (pkgs)
2 | directory.
3 |
4 | Example:
5 |
6 | nix-build -A homesite
7 |
--------------------------------------------------------------------------------
/nixos/pkgs/cfdyndns.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | fetchFromGitHub,
4 | rustPlatform,
5 | pkg-config,
6 | openssl,
7 | }:
8 | with rustPlatform;
9 | buildRustPackage rec {
10 | pname = "cfdyndns";
11 | version = "0.0.3";
12 |
13 | src = fetchFromGitHub {
14 | owner = "sysr-q";
15 | repo = "cfdyndns";
16 | rev = "4e703506df0298423a79be3e0efec7ecf6ae8680";
17 | sha256 = "0plijgr5y58ir9mjvxgm6jszz90pd1g0qjf21z0v5xrzg6bs2sy1";
18 | };
19 |
20 | cargoSha256 = "sha256-OLxvob50FXumoxVfj97lOwSDaXncWGZXTf0wltnReQo=";
21 |
22 | nativeBuildInputs = [ pkg-config ];
23 | buildInputs = [ openssl ];
24 | }
25 |
--------------------------------------------------------------------------------
/nixos/pkgs/default.nix:
--------------------------------------------------------------------------------
1 | # Bootstrap the overlay.
2 | {
3 | pkgsPath ? ,
4 | }:
5 | import pkgsPath { overlays = [ (import ./overlay.nix) ]; }
6 |
--------------------------------------------------------------------------------
/nixos/pkgs/nomad-usb-device-plugin.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | buildGoModule,
4 | fetchFromGitLab,
5 | pkg-config,
6 | libusb1,
7 | }:
8 | buildGoModule rec {
9 | pname = "nomad-usb-device-plugin";
10 | version = "0.4.0";
11 |
12 | src = fetchFromGitLab {
13 | owner = "CarbonCollins";
14 | repo = pname;
15 | rev = version;
16 |
17 | hash = "sha256-k5L07CzQkY80kHszCLhqtZ0LfGGuV07LrHjvdgy04bk=";
18 | };
19 |
20 | vendorHash = "sha256-gf2E7DTAGTjoo3nEjcix3qWjHJHudlR7x9XJODvb2sk=";
21 |
22 | nativeBuildInputs = [
23 | pkg-config
24 | libusb1
25 | ];
26 | buildInputs = [ libusb1 ];
27 | }
28 |
--------------------------------------------------------------------------------
/nixos/pkgs/overlay.nix:
--------------------------------------------------------------------------------
1 | # An overlay of packages we want full control (not just overlays) of.
2 | final: prev:
3 | let
4 | inherit (final) callPackage;
5 | in
6 | {
7 | # Package template: x = final.callPackage ./x { };
8 | cfdyndns = callPackage ./cfdyndns.nix { };
9 | nomad-usb-device-plugin = callPackage ./nomad-usb-device-plugin.nix { };
10 | }
11 |
--------------------------------------------------------------------------------
/nixos/restart:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env fish
2 |
3 | if test -n "$argv[1]"
4 | set host $argv[1]
5 | end
6 | if test -z "$host"
7 | echo "host env or argument required" >&2
8 | exit 1
9 | end
10 |
11 | set target "root@$host.home.arpa"
12 |
13 | echo "Restarting $host"
14 |
15 | ssh $target "reboot"
16 |
--------------------------------------------------------------------------------
/nixos/roles/cluster-volumes.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | lib,
4 | catalog,
5 | environment,
6 | ...
7 | }:
8 | let
9 | cfg = config.roles.cluster-volumes;
10 | in
11 | {
12 | options.roles.cluster-volumes = {
13 | enable = lib.mkEnableOption "Enable NFS mount of catalog cluster volumes";
14 | };
15 |
16 | config = lib.mkIf cfg.enable {
17 | fileSystems = {
18 | "/mnt/skynas" = {
19 | device = "192.168.1.20:/volume1/cluster_${environment}";
20 | fsType = "nfs";
21 | options = [
22 | "x-systemd.automount"
23 | "noauto"
24 | ];
25 | };
26 | };
27 |
28 | systemd.services.host-volume-init = {
29 | # Create host volume dirs.
30 | script = lib.concatStringsSep "\n" (
31 | map (name: ''
32 | path=${lib.escapeShellArg "/mnt/skynas/${name}"}
33 | if [ ! -e "$path" ]; then
34 | mkdir -p "$path"
35 | chmod 770 "$path"
36 | fi
37 | '') catalog.nomad.skynas-host-volumes
38 | );
39 |
40 | wants = [
41 | "network-online.target"
42 | "remote-fs.target"
43 | ];
44 | after = [
45 | "network-online.target"
46 | "remote-fs.target"
47 | ];
48 | wantedBy = [ "nomad.service" ];
49 | before = [ "nomad.service" ];
50 | serviceConfig = {
51 | Type = "oneshot";
52 | };
53 | };
54 | };
55 | }
56 |
--------------------------------------------------------------------------------
/nixos/roles/consul.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | pkgs,
4 | lib,
5 | catalog,
6 | self,
7 | ...
8 | }:
9 | with lib;
10 | let
11 | cfg = config.roles.consul;
12 | datacenter = "skynet";
13 | in
14 | {
15 | options.roles.consul = with types; {
16 | enableServer = mkEnableOption "Enable Consul Server";
17 |
18 | retryJoin = mkOption {
19 | type = listOf str;
20 | description = "List of server host or IPs to join to datacenter";
21 | };
22 |
23 | client = mkOption {
24 | type = submodule {
25 | options = {
26 | enable = mkEnableOption "Enable Consul Client";
27 | connect = mkEnableOption "Enable Consul Connect mesh";
28 | };
29 | };
30 | default = { };
31 | };
32 | };
33 |
34 | config = mkMerge [
35 | # Configure if either client or server is enabled.
36 | (mkIf (cfg.enableServer || cfg.client.enable) {
37 | # Consul shared client & server config.
38 | services.consul = {
39 | enable = true;
40 |
41 | extraConfig = {
42 | inherit datacenter;
43 |
44 | bind_addr = self.ip.priv;
45 | client_addr = "0.0.0.0";
46 |
47 | retry_join = filter (x: x != self.ip.priv) cfg.retryJoin;
48 | retry_interval = "15s";
49 |
50 | tls = {
51 | internal_rpc.verify_server_hostname = true;
52 |
53 | # Encrypt and verify outgoing TLS.
54 | defaults = {
55 | ca_file = ./files/consul/consul-agent-ca.pem;
56 | verify_incoming = mkDefault false;
57 | verify_outgoing = true;
58 | };
59 | };
60 |
61 | acl = {
62 | enabled = true;
63 | default_policy = "deny";
64 | enable_token_persistence = true;
65 | };
66 | };
67 |
68 | # Install extra HCL file to hold encryption key.
69 | extraConfigFiles = [ config.age-template.files."consul-encrypt.hcl".path ];
70 | };
71 |
72 | age.secrets = {
73 | consul-encrypt.file = ../secrets/consul-encrypt.age;
74 | };
75 |
76 | # Create envfiles containing encryption keys.
77 | age-template.files = {
78 | "consul-encrypt.hcl" = {
79 | vars.encrypt = config.age.secrets.consul-encrypt.path;
80 | content = ''encrypt = "$encrypt"'';
81 | owner = "consul";
82 | };
83 | };
84 |
85 | systemd.services.consul = {
86 | after = [ "network-online.target" ];
87 | wants = [ "network-online.target" ];
88 | };
89 |
90 | networking.firewall.allowedTCPPorts = [
91 | 8300
92 | 8301
93 | 8302
94 | 8500
95 | 8501
96 | 8502
97 | 8503
98 | 8600
99 | ];
100 | networking.firewall.allowedUDPPorts = [
101 | 8301
102 | 8302
103 | 8600
104 | ];
105 | })
106 |
107 | (mkIf (cfg.client.enable && cfg.client.connect) {
108 | # Consul service mesh config.
109 | services.consul = {
110 | extraConfig = {
111 | connect.enabled = true;
112 | ports.grpc = 8502;
113 | ports.grpc_tls = 8503;
114 | };
115 | };
116 | })
117 |
118 | (mkIf cfg.enableServer {
119 | # Consul server config.
120 | services.consul = {
121 | webUi = true;
122 |
123 | extraConfig = {
124 | server = true;
125 |
126 | bootstrap_expect = 3;
127 |
128 | # Encrypt and verify TLS.
129 | tls.defaults = {
130 | cert_file = ./files/consul/skynet-server-consul-0.pem;
131 | key_file = config.age.secrets."skynet-server-consul-0-key.pem".path;
132 |
133 | verify_incoming = true;
134 | };
135 |
136 | # Create certs for clients.
137 | connect.enabled = true;
138 | auto_encrypt.allow_tls = true;
139 | };
140 | };
141 |
142 | age.secrets = {
143 | "skynet-server-consul-0-key.pem" = {
144 | file = ../secrets/skynet-server-consul-0-key.pem.age;
145 | owner = "consul";
146 | };
147 | };
148 | })
149 |
150 | (mkIf (cfg.client.enable && !cfg.enableServer) {
151 | # Consul client only config.
152 | services.consul = {
153 | extraConfig = {
154 | # Get our certificate from the server.
155 | auto_encrypt.tls = true;
156 | };
157 |
158 | # Install extra HCL file to hold encryption key.
159 | extraConfigFiles = [ config.age-template.files."consul-agent-token.hcl".path ];
160 | };
161 |
162 | # Template config file for agent token.
163 | age-template.files = {
164 | "consul-agent-token.hcl" = {
165 | vars.token = config.age.secrets.consul-agent-token.path;
166 | content = ''acl { tokens { default = "$token" } }'';
167 | owner = "consul";
168 | };
169 | };
170 |
171 | age.secrets = {
172 | "consul-agent-token" = {
173 | file = ../secrets/consul-agent-token.age;
174 | owner = "consul";
175 | };
176 | };
177 | })
178 | ];
179 | }
180 |
--------------------------------------------------------------------------------
/nixos/roles/default.nix:
--------------------------------------------------------------------------------
1 | { ... }:
2 | {
3 | imports = [
4 | ./cluster-volumes.nix
5 | ./consul.nix
6 | ./dns.nix
7 | ./gateway-online.nix
8 | ./gui-wayland.nix
9 | ./gui-xorg.nix
10 | ./homesite.nix
11 | ./influxdb.nix
12 | ./log-forwarder.nix
13 | ./loki.nix
14 | ./mosquitto.nix
15 | ./nomad.nix
16 | ./nfs-bind.nix
17 | ./tailscale.nix
18 | ./telegraf.nix
19 | ./traefik.nix
20 | ./upsmon.nix
21 | ./websvc.nix
22 | ./workstation.nix
23 | ];
24 | }
25 |
--------------------------------------------------------------------------------
/nixos/roles/dns.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | pkgs,
4 | lib,
5 | self,
6 | catalog,
7 | ...
8 | }:
9 | with lib;
10 | let
11 | cfg = config.roles.dns;
12 | in
13 | {
14 | options.roles.dns = with lib.types; {
15 | bind = mkOption {
16 | type = submodule {
17 | options = {
18 | enable = mkEnableOption "Run bind DNS server";
19 | serveLocalZones = mkEnableOption "Serve local zone files directly";
20 | };
21 | };
22 | default = { };
23 | };
24 | };
25 |
26 | config =
27 | let
28 | namedWorkDir = "/var/lib/named";
29 |
30 | transferAddrs = [
31 | "192.168.1.0/24"
32 | "192.168.128.0/18"
33 | ];
34 |
35 | unifiZones = [
36 | "dyn.home.arpa."
37 | "cluster.home.arpa."
38 | ];
39 |
40 | mkZone = name: rec {
41 | inherit name;
42 | file = "${name}.zone";
43 | path = "${namedWorkDir}/${file}";
44 |
45 | # Barebones zone file that will be overwritten by transfers.
46 | emptyZone = pkgs.writeText file ''
47 | $ORIGIN ${name}.
48 | @ 3600 SOA ns1.${name}. (
49 | zone-admin.home.arpa.
50 | 1 ; serial number
51 | 3600 ; refresh period
52 | 600 ; retry period
53 | 604800 ; expire time
54 | 1800 ; min TTL
55 | )
56 |
57 | @ 600 IN NS ns1
58 | ns1 600 IN A ${catalog.dns.ns1}
59 | '';
60 | };
61 |
62 | bytemonkeyZone = mkZone "bytemonkey.org";
63 | homeZone = mkZone "home.arpa";
64 | in
65 | mkIf cfg.bind.enable {
66 | networking.resolvconf = {
67 | # 127.0.0.1 is not useful in containers, instead we will use our
68 | # private IP.
69 | useLocalResolver = false;
70 | extraConfig = ''
71 | name_servers='${self.ip.priv} ${catalog.dns.ns1}'
72 | '';
73 | };
74 |
75 | services.bind = mkIf cfg.bind.enable {
76 | enable = true;
77 |
78 | cacheNetworks = [ "0.0.0.0/0" ];
79 | forwarders = [
80 | "1.1.1.1"
81 | "8.8.8.8"
82 | ];
83 |
84 | extraOptions = ''
85 | allow-update { key "rndc-key"; };
86 |
87 | dnssec-validation auto;
88 |
89 | validate-except { "consul"; };
90 | '';
91 |
92 | zones = builtins.listToAttrs (
93 | map
94 | (zone: {
95 | name = zone.name + ".";
96 | value =
97 | if cfg.bind.serveLocalZones then
98 | {
99 | master = true;
100 | slaves = transferAddrs;
101 | file = zone.path;
102 | }
103 | else
104 | {
105 | master = false;
106 | masters = [ "${catalog.dns.ns1}" ];
107 | file = zone.path;
108 | };
109 | })
110 | [
111 | bytemonkeyZone
112 | homeZone
113 | ]
114 | );
115 |
116 | extraConfig =
117 | let
118 | unifiForwardZones = concatMapStrings (zone: ''
119 | zone "${zone}" {
120 | type forward;
121 | forward only;
122 | forwarders { 192.168.1.1; };
123 | };
124 | '') unifiZones;
125 | in
126 | ''
127 | zone "consul." IN {
128 | type forward;
129 | forward only;
130 | forwarders { 127.0.0.1 port 8600; };
131 | };
132 |
133 | ${unifiForwardZones}
134 | '';
135 | };
136 |
137 | # Setup named work directory during activation.
138 | system.activationScripts.init-named-zones =
139 | let
140 | copyZone = zone: ''
141 | # Copy zone file if it does not already exist.
142 | if [[ ! -e "${zone.path}" ]]; then
143 | cp "${zone.emptyZone}" "${zone.path}"
144 | chown named: "${zone.path}"
145 | fi
146 | '';
147 | in
148 | ''
149 | mkdir -p ${namedWorkDir}
150 | chown named: ${namedWorkDir}
151 | ''
152 | + (
153 | if cfg.bind.serveLocalZones then
154 | ''
155 | ${copyZone bytemonkeyZone}
156 | ${copyZone homeZone}
157 | ''
158 | else
159 | ""
160 | );
161 |
162 | networking.firewall.allowedTCPPorts = [ 53 ];
163 | networking.firewall.allowedUDPPorts = [ 53 ];
164 | };
165 | }
166 |
--------------------------------------------------------------------------------
/nixos/roles/files/consul/consul-agent-ca.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIC7TCCApSgAwIBAgIRAMVr8frvRQlVhvHIO7SthBowCgYIKoZIzj0EAwIwgbkx
3 | CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj
4 | bzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw
5 | FQYDVQQKEw5IYXNoaUNvcnAgSW5jLjFAMD4GA1UEAxM3Q29uc3VsIEFnZW50IENB
6 | IDI2MjQxODM5ODg3NTI5NzEzOTQ0NDgwOTU2OTgxNTYyNzY2MjM2MjAeFw0yMjA1
7 | MTUyMzQ1NDhaFw0yNzA1MTQyMzQ1NDhaMIG5MQswCQYDVQQGEwJVUzELMAkGA1UE
8 | CBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xGjAYBgNVBAkTETEwMSBTZWNv
9 | bmQgU3RyZWV0MQ4wDAYDVQQREwU5NDEwNTEXMBUGA1UEChMOSGFzaGlDb3JwIElu
10 | Yy4xQDA+BgNVBAMTN0NvbnN1bCBBZ2VudCBDQSAyNjI0MTgzOTg4NzUyOTcxMzk0
11 | NDQ4MDk1Njk4MTU2Mjc2NjIzNjIwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARO
12 | /rMdj/SyG9Ai8yMx7Ap8d/9HA4BUHMsMlDQ04Lz6Cau6uQUzLLSt77xFTtE+bPnQ
13 | 4IADaROa/Wew0rfsDB40o3sweTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUw
14 | AwEB/zApBgNVHQ4EIgQgSqBK+951pBNp/96hj1zM4rXlB8jptCqPE/4XPfuP4dAw
15 | KwYDVR0jBCQwIoAgSqBK+951pBNp/96hj1zM4rXlB8jptCqPE/4XPfuP4dAwCgYI
16 | KoZIzj0EAwIDRwAwRAIgFf7/3okJAjEnWh1hLWEicF2N7RzZhBHYXE7s6nBBqusC
17 | IHp0cuElTeSXhrtN6ENizXWgMuwvxQ89lLG9xon0bAsr
18 | -----END CERTIFICATE-----
19 |
--------------------------------------------------------------------------------
/nixos/roles/files/consul/skynet-server-consul-0.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIICozCCAkmgAwIBAgIRAO4vKHJPq0+wqIKrv+AlTsIwCgYIKoZIzj0EAwIwgbkx
3 | CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj
4 | bzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw
5 | FQYDVQQKEw5IYXNoaUNvcnAgSW5jLjFAMD4GA1UEAxM3Q29uc3VsIEFnZW50IENB
6 | IDI2MjQxODM5ODg3NTI5NzEzOTQ0NDgwOTU2OTgxNTYyNzY2MjM2MjAeFw0yNDA2
7 | MDExOTQzMDRaFw0yNTA2MDExOTQzMDRaMB8xHTAbBgNVBAMTFHNlcnZlci5za3lu
8 | ZXQuY29uc3VsMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEK+QbLk4hBxXVwSKK
9 | Lm4MIbrQQ3nxs2mnKlAReOxYJte2QxiVwMWkYxAIXQvKZKy5fcdDVvs0xBklANxY
10 | Nd+pL6OByjCBxzAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
11 | CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwKQYDVR0OBCIEIDmBY9SowebA0tJ6cCJv
12 | EUw2PaNonfnUSofCzjyIejvVMCsGA1UdIwQkMCKAIEqgSvvedaQTaf/eoY9czOK1
13 | 5QfI6bQqjxP+Fz37j+HQMDAGA1UdEQQpMCeCFHNlcnZlci5za3luZXQuY29uc3Vs
14 | gglsb2NhbGhvc3SHBH8AAAEwCgYIKoZIzj0EAwIDSAAwRQIhAIA38xujlApmDDGU
15 | PRKuUrXfhAnLgcTXkatRECAVRew8AiBomZr+3fnpET50uodytoftWwto8MLq70yt
16 | Xsb+KamKOA==
17 | -----END CERTIFICATE-----
18 |
--------------------------------------------------------------------------------
/nixos/roles/files/nomad/nomad-ca.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIB5zCCAY6gAwIBAgIUJ/EnXzDrwIrtUhnDp1SxalAh7DAwCgYIKoZIzj0EAwIw
3 | UjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMRAwDgYDVQQHEwdSZWRtb25kMSQw
4 | IgYDVQQDExtub21hZC1jZnNzbC1jYS5za3luZXQubG9jYWwwHhcNMjIwNTIxMTcx
5 | OTAwWhcNMjcwNTIwMTcxOTAwWjBSMQswCQYDVQQGEwJVUzELMAkGA1UECBMCV0Ex
6 | EDAOBgNVBAcTB1JlZG1vbmQxJDAiBgNVBAMTG25vbWFkLWNmc3NsLWNhLnNreW5l
7 | dC5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABD0+QB3FVvKx7tSkviJF
8 | pByMB+s5e3QkYGemoQPQY7XLwVyoLaimP4Xm859TaaUePDxuTXKvY7CSBrB5u7ST
9 | zgqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
10 | BBRnH99g5gDZGtXTj7NWmjVFbbYVaDAKBggqhkjOPQQDAgNHADBEAiBikM9KBCGG
11 | Ytrokzmp/ki4qGaGsTyfPEnLB2XwXCGeMgIgPFzTWo5tLZqocSOaln4gt3ixOQQ2
12 | G7iJlyx6HJkM+h4=
13 | -----END CERTIFICATE-----
14 |
--------------------------------------------------------------------------------
/nixos/roles/files/nomad/server-client.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIICXjCCAgOgAwIBAgIUON7RkH9P60JEOC7Z+J3GQnGE6SQwCgYIKoZIzj0EAwIw
3 | UjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMRAwDgYDVQQHEwdSZWRtb25kMSQw
4 | IgYDVQQDExtub21hZC1jZnNzbC1jYS5za3luZXQubG9jYWwwHhcNMjIwNTMxMDQw
5 | MjAwWhcNMjcwNTMwMDQwMjAwWjAfMR0wGwYDVQQDExRub21hZC5zZXJ2aWNlLmNv
6 | bnN1bDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABDZzm35TrzosGWxfpTU7qDun
7 | dfkIv+d7kXkZTeA2KtNjSGUfJD7nzxFctDtFBhEeNJ/Qnd98/teOOQWh+zxHiBqj
8 | gekwgeYwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEF
9 | BQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBTzpt/xUz/vygKmN2KH+V9YwOD0
10 | yDAfBgNVHSMEGDAWgBRnH99g5gDZGtXTj7NWmjVFbbYVaDBnBgNVHREEYDBeghNz
11 | ZXJ2ZXIuZ2xvYmFsLm5vbWFkghNjbGllbnQuZ2xvYmFsLm5vbWFkggsqLmhvbWUu
12 | YXJwYYIUbm9tYWQuc2VydmljZS5jb25zdWyCCWxvY2FsaG9zdIcEfwAAATAKBggq
13 | hkjOPQQDAgNJADBGAiEAqnCM1one2m72zZ2wxEaK1LYx2Tu1ONDXkyWlxTXMkzkC
14 | IQDOOLuNGgdB7EqyEH6THifBpeQTfhAF40zF/qBCAFHGng==
15 | -----END CERTIFICATE-----
16 |
--------------------------------------------------------------------------------
/nixos/roles/files/nut/upssched-cmd:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | swbin=/run/current-system/sw/bin
4 |
5 | case $1 in
6 | onbatt)
7 | $swbin/logger -t upssched-cmd "UPS running on battery"
8 | ;;
9 | earlyshutdown)
10 | $swbin/logger -t upssched-cmd "UPS on battery too long, early shutdown"
11 | $swbin/upsmon -c fsd
12 | ;;
13 | shutdowncritical)
14 | $swbin/logger -t upssched-cmd "UPS on battery critical, forced shutdown"
15 | $swbin/upsmon -c fsd
16 | ;;
17 | upsgone)
18 | $swbin/logger -t upssched-cmd "UPS has been gone too long, can't reach"
19 | ;;
20 | *)
21 | $swbin/logger -t upssched-cmd "Unrecognized command: $1"
22 | ;;
23 | esac
24 |
--------------------------------------------------------------------------------
/nixos/roles/files/telegraf/zfs_snap_times.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import subprocess
4 | import time
5 | import re
6 |
7 | cmd = [ "zfs", "list", "-H", "-p", "-t", "snapshot", "-o", "name,creation", "-S", "creation" ]
8 |
9 | def sanitize_tag(value):
10 | return re.sub(r"([,= ])", r"\\\1", value)
11 |
12 | result = subprocess.run(cmd, capture_output=True)
13 | now_ns = time.time_ns()
14 | now_s = int(time.time())
15 |
16 | # Loop over snapshots, fmt: "/@ "
17 | for line in result.stdout.decode("utf-8").split("\n"):
18 | if line == "":
19 | continue
20 | (fullname, created) = line.split("\t")
21 | (volume, name) = fullname.split("@")
22 |
23 | created_s = int(created)
24 | age_s = now_s - created_s
25 |
26 | creator = "other"
27 | if name.startswith("sanoid"):
28 | creator = "sanoid"
29 | elif name.startswith("syncoid"):
30 | creator = "syncoid"
31 |
32 | # Output metric in influx format.
33 | print('zfs.snapshot,volume=%s,creator=%s created=%su,age_seconds=%du %d' % (
34 | sanitize_tag(volume), creator, created, age_s, now_ns))
35 |
--------------------------------------------------------------------------------
/nixos/roles/gateway-online.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | pkgs,
4 | lib,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | cfg = config.roles.gateway-online;
10 | in
11 | {
12 | options.roles.gateway-online = {
13 | addr = mkOption {
14 | type = types.nullOr types.str;
15 | description = "Name or address of gateway to ping";
16 | default = null;
17 | };
18 | };
19 |
20 | config = mkIf (cfg.addr != null) {
21 | # Delay network-online.target until gateway address is pingable.
22 | systemd.services."gateway-online" = {
23 | enable = true;
24 | before = [ "network-online.target" ];
25 | after = [ "nss-lookup.target" ];
26 | wantedBy = [ "network-online.target" ];
27 |
28 | unitConfig = {
29 | DefaultDependencies = "no";
30 | };
31 |
32 | serviceConfig = {
33 | ExecStart = ''
34 | /bin/sh -c "while ! ${pkgs.iputils}/bin/ping -c 1 \
35 | ${escapeShellArg cfg.addr}; do sleep 1; done"
36 | '';
37 | };
38 | };
39 | };
40 | }
41 |
--------------------------------------------------------------------------------
/nixos/roles/gui-wayland.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | lib,
4 | pkgs,
5 | ...
6 | }:
7 | let
8 | inherit (lib) mkEnableOption mkIf;
9 | cfg = config.roles.gui-wayland;
10 | in
11 | {
12 | options.roles.gui-wayland = {
13 | enable = mkEnableOption "Wayland GUI";
14 | };
15 |
16 | config = mkIf cfg.enable {
17 | environment.systemPackages =
18 | let
19 | remaps = [
20 | (pkgs.writeShellScriptBin "x-www-browser" ''
21 | exec ${pkgs.firefox}/bin/firefox "$@"
22 | '')
23 | ];
24 | in
25 | (with pkgs; [
26 | alsa-utils
27 | audacity
28 | clipman
29 | dunst
30 | firefox
31 | gedit
32 | gimp
33 | google-chrome
34 | i3-balance-workspace
35 | libnotify # for notify-send
36 | lxappearance
37 | obs-studio
38 | pantheon.elementary-icon-theme
39 | pavucontrol
40 | rofi-wayland
41 | slurp # region selector
42 | virt-manager
43 | wl-clipboard # clipboard commands
44 | xfce.ristretto # image viwer
45 | yambar
46 | ])
47 | ++ remaps;
48 |
49 | # Enable Ozone Wayland support in Chromium and Electron based applications
50 | # Still breaks camera in Chrome.
51 | # environment.sessionVariables.NIXOS_OZONE_WL = "1";
52 |
53 | programs.dconf.enable = true;
54 |
55 | programs.thunar = {
56 | enable = true;
57 | plugins = with pkgs.xfce; [ thunar-volman ];
58 | };
59 |
60 | programs.sway = {
61 | enable = true;
62 | wrapperFeatures.gtk = true;
63 | extraOptions = [ "--unsupported-gpu" ];
64 | };
65 |
66 | services.xserver.displayManager.gdm.enable = true;
67 | services.greetd = {
68 | enable = true;
69 | settings = {
70 | default_session = {
71 | command = pkgs.writeShellScript "start-tuigreet" ''
72 | setterm --blank=10
73 | setterm --powersave on
74 | ${pkgs.greetd.tuigreet}/bin/tuigreet --time --cmd sway
75 | '';
76 | user = "greeter";
77 | };
78 | };
79 |
80 | # Avoid kernel messages.
81 | vt = 7;
82 | };
83 |
84 | # Used by thunar.
85 | services.gvfs.enable = true;
86 | services.tumbler.enable = true;
87 |
88 | services.libinput.enable = true;
89 | services.libinput.mouse.accelProfile = "flat";
90 |
91 | fonts.packages = with pkgs; [
92 | font-awesome
93 | fira-code
94 | inconsolata
95 | noto-fonts
96 | terminus_font
97 | ];
98 |
99 | # Enable sound.
100 | security.rtkit.enable = true;
101 | services.pipewire = {
102 | enable = true;
103 | alsa.enable = true;
104 | alsa.support32Bit = true;
105 | pulse.enable = true;
106 | };
107 |
108 | hardware.bluetooth.enable = true;
109 | hardware.bluetooth.powerOnBoot = true;
110 | services.blueman.enable = true;
111 |
112 | # IPP Printer support.
113 | services.printing.enable = true;
114 | services.avahi = {
115 | enable = true;
116 | nssmdns4 = true;
117 | openFirewall = true;
118 | };
119 | };
120 | }
121 |
--------------------------------------------------------------------------------
/nixos/roles/gui-xorg.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | lib,
4 | pkgs,
5 | ...
6 | }:
7 | let
8 | inherit (lib) mkEnableOption mkIf;
9 | cfg = config.roles.gui-xorg;
10 | in
11 | {
12 | options.roles.gui-xorg = {
13 | enable = mkEnableOption "Xorg GUI";
14 | };
15 |
16 | config = mkIf cfg.enable {
17 | environment.systemPackages =
18 | let
19 | remaps = [
20 | (pkgs.writeShellScriptBin "x-www-browser" ''
21 | exec ${pkgs.firefox}/bin/firefox "$@"
22 | '')
23 | ];
24 | in
25 | (with pkgs; [
26 | audacity
27 | dmenu
28 | dunst
29 | firefox
30 | gedit
31 | gimp
32 | google-chrome
33 | i3-balance-workspace
34 | libnotify # for notify-send
35 | lxappearance
36 | maim # takes screenshots
37 | obs-studio
38 | pantheon.elementary-icon-theme
39 | polybarFull
40 | pavucontrol
41 | rofi
42 | rxvt-unicode
43 | sxhkd
44 | virt-manager
45 | xclip
46 | xfce.ristretto # image viwer
47 | xorg.xdpyinfo
48 | xorg.xev
49 | xsecurelock
50 | xss-lock
51 | ])
52 | ++ remaps;
53 |
54 | programs.dconf.enable = true;
55 |
56 | programs.thunar = {
57 | enable = true;
58 | plugins = with pkgs.xfce; [ thunar-volman ];
59 | };
60 |
61 | # Used by thunar.
62 | services.gvfs.enable = true;
63 | services.tumbler.enable = true;
64 |
65 | services.xserver = {
66 | enable = true;
67 | xkb.layout = "us";
68 |
69 | windowManager.i3.enable = true;
70 | };
71 |
72 | services.libinput.enable = true;
73 | services.libinput.mouse.accelProfile = "flat";
74 |
75 | fonts.packages = with pkgs; [
76 | fira-code
77 | inconsolata
78 | noto-fonts
79 | siji
80 | terminus_font
81 | unifont
82 | ];
83 |
84 | # Enable sound.
85 | security.rtkit.enable = true;
86 | services.pipewire = {
87 | enable = true;
88 | alsa.enable = true;
89 | alsa.support32Bit = true;
90 | pulse.enable = true;
91 | };
92 |
93 | hardware.bluetooth.enable = true;
94 | hardware.bluetooth.powerOnBoot = true;
95 | services.blueman.enable = true;
96 |
97 | # IPP Printer support.
98 | services.printing.enable = true;
99 | services.avahi = {
100 | enable = true;
101 | nssmdns4 = true;
102 | openFirewall = true;
103 | };
104 |
105 | # Mouse button mappings.
106 | environment.etc."X11/xorg.conf.d/99-mouse-buttons.conf".text = ''
107 | Section "InputClass"
108 | Identifier "SONiX Evoluent VerticalMouse D"
109 | Option "ButtonMapping" "1 2 3 4 5 6 7 10 9 8"
110 | EndSection
111 | '';
112 | };
113 | }
114 |
--------------------------------------------------------------------------------
/nixos/roles/homesite.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | pkgs,
4 | lib,
5 | homesite,
6 | ...
7 | }:
8 | with lib;
9 | let
10 | cfg = config.roles.homesite;
11 | in
12 | {
13 | options.roles.homesite = {
14 | enable = mkEnableOption "Enable home website";
15 |
16 | sections =
17 | with types;
18 | mkOption {
19 | type = listOf (submodule {
20 | options = {
21 | title = mkOption { type = str; };
22 | services = mkOption {
23 | type =
24 | with types;
25 | listOf (submodule {
26 | options = {
27 | name = mkOption { type = str; };
28 | host = mkOption { type = str; };
29 | port = mkOption {
30 | type = nullOr port;
31 | default = null;
32 | };
33 | path = mkOption {
34 | type = path;
35 | default = "/";
36 | };
37 | proto = mkOption {
38 | type = enum [
39 | "http"
40 | "https"
41 | ];
42 | default = "https";
43 | };
44 | icon = mkOption { type = str; };
45 | };
46 | });
47 | description = "Service links";
48 | default = [ ];
49 | };
50 | };
51 | });
52 | default = [ ];
53 | };
54 | };
55 |
56 | config =
57 | let
58 | data = {
59 | sections = cfg.sections;
60 | };
61 |
62 | configDir = pkgs.writeTextDir "data.json" (builtins.toJSON data);
63 | in
64 | mkIf cfg.enable {
65 | services.nginx = {
66 | enable = true;
67 | virtualHosts."homesite" = {
68 | root = "${homesite.defaultPackage.x86_64-linux}"; # From flake
69 |
70 | locations."/config/" = {
71 | alias = "${configDir}/";
72 | };
73 |
74 | listen = [
75 | {
76 | addr = "0.0.0.0";
77 | port = 12701;
78 | ssl = false;
79 | }
80 | ];
81 | };
82 | };
83 |
84 | networking.firewall.allowedTCPPorts = [ 12701 ];
85 | };
86 | }
87 |
--------------------------------------------------------------------------------
/nixos/roles/influxdb.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | pkgs,
4 | lib,
5 | ...
6 | }:
7 | let
8 | inherit (lib)
9 | mapAttrsToList
10 | mkEnableOption
11 | mkIf
12 | mkOption
13 | ;
14 | inherit (lib.types)
15 | attrsOf
16 | path
17 | port
18 | str
19 | submodule
20 | ;
21 | cfg = config.roles.influxdb;
22 | in
23 | {
24 | options.roles.influxdb = {
25 | enable = mkEnableOption "Enable InfluxDB role";
26 |
27 | port = mkOption {
28 | type = port;
29 | description = "API port. Do not change, for reference only";
30 | default = 8086;
31 | };
32 |
33 | adminUser = mkOption {
34 | type = str;
35 | description = "Database admin username";
36 | default = "admin";
37 | };
38 |
39 | adminPasswordFile = mkOption {
40 | type = path;
41 | description = "Database admin password file";
42 | };
43 |
44 | databases = mkOption {
45 | type = attrsOf (submodule {
46 | options = {
47 | user = mkOption { type = str; };
48 | passwordFile = mkOption { type = str; };
49 | retention = mkOption {
50 | type = str;
51 | default = "104w";
52 | };
53 | };
54 | });
55 | description = "Influx databases";
56 | default = { };
57 | };
58 | };
59 |
60 | config =
61 | let
62 | createDb = name: db: ''
63 | CREATE DATABASE "${name}";
64 | CREATE USER "${db.user}" WITH PASSWORD '$(< ${db.passwordFile})';
65 | GRANT ALL ON "${name}" TO "${db.user}";
66 | ALTER RETENTION POLICY "autogen" ON "${name}" DURATION ${db.retention};
67 | '';
68 |
69 | initSql = lib.concatStringsSep "\n" (mapAttrsToList createDb cfg.databases);
70 | in
71 | mkIf cfg.enable {
72 | environment.systemPackages = [ pkgs.influxdb ]; # for diagnostics
73 |
74 | services.influxdb = {
75 | enable = true;
76 | extraConfig.http.auth-enabled = true;
77 | };
78 |
79 | systemd.services.influxdb-init = {
80 | enable = true;
81 | description = "Configure influxdb at first boot";
82 | wantedBy = [ "multi-user.target" ];
83 | after = [ "influxdb.service" ];
84 |
85 | serviceConfig = {
86 | Type = "oneshot";
87 | RemainAfterExit = true;
88 | };
89 |
90 | script = ''
91 | export INFLUX_USERNAME=${lib.escapeShellArg cfg.adminUser}
92 | export INFLUX_PASSWORD="$(< ${cfg.adminPasswordFile})"
93 | lockfile=/var/db/influxdb-init-completed
94 | set -eo pipefail
95 |
96 | if [ -f "$lockfile" ]; then
97 | exit
98 | fi
99 |
100 | touch "$lockfile"
101 | ${pkgs.influxdb}/bin/influx < 0) {
91 | systemd.services.nfs-bind-init = {
92 | script = lib.concatStringsSep "\n" (mapAttrsToList setupDir cfg.binds);
93 | wantedBy = [ "multi-user.target" ];
94 | after = [ "remote-fs.target" ];
95 | before = cfg.before;
96 | serviceConfig = {
97 | Type = "oneshot";
98 | };
99 | };
100 |
101 | # Create fstab bindings; e.g. mount /data/grafana at /var/lib/grafana
102 | fileSystems = (mapAttrs' fsBindEntry (filterAttrs (name: bind: bind.path != null) cfg.binds)) // {
103 | # Mount NFS volume
104 | "${cfg.mountPoint}" = {
105 | device = cfg.nfsPath;
106 | fsType = "nfs";
107 | };
108 | };
109 | };
110 | }
111 |
--------------------------------------------------------------------------------
/nixos/roles/tailscale.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | pkgs,
4 | lib,
5 | catalog,
6 | ...
7 | }:
8 | with lib;
9 | let
10 | cfg = config.roles.tailscale;
11 | in
12 | {
13 | options.roles.tailscale = with types; {
14 | enable = mkEnableOption "Enable Tailscale daemon";
15 |
16 | exitNode = mkEnableOption "Register as an exit node";
17 |
18 | useAuthKey = mkOption {
19 | type = bool;
20 | description = "Use secrets/tailscale.age for auto-join key";
21 | default = true;
22 | };
23 | };
24 |
25 | config = mkIf cfg.enable {
26 | # Enable tailscale daemon.
27 | services.tailscale = {
28 | enable = true;
29 | interfaceName = catalog.tailscale.interface;
30 |
31 | authKeyFile = mkIf cfg.useAuthKey config.age.secrets.tailscale.path;
32 | extraUpFlags = mkIf cfg.exitNode [ "--advertise-exit-node" ];
33 | };
34 |
35 | age.secrets.tailscale.file = mkIf cfg.useAuthKey ../secrets/tailscale.age;
36 |
37 | networking.firewall = {
38 | # Trust inbound tailnet traffic.
39 | trustedInterfaces = [ catalog.tailscale.interface ];
40 |
41 | # Allow tailscale through firewall.
42 | allowedUDPPorts = [ config.services.tailscale.port ];
43 | };
44 | };
45 | }
46 |
--------------------------------------------------------------------------------
/nixos/roles/telegraf.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | pkgs,
4 | lib,
5 | ...
6 | }:
7 | let
8 | inherit (lib)
9 | mkEnableOption
10 | mkIf
11 | mkOption
12 | length
13 | ;
14 | inherit (lib.types)
15 | attrs
16 | bool
17 | listOf
18 | str
19 | ;
20 |
21 | cfg = config.roles.telegraf;
22 | in
23 | {
24 | options.roles.telegraf = {
25 | enable = mkEnableOption "Telegraf node";
26 |
27 | influxdb = mkOption {
28 | type = attrs;
29 | description = "Influxdb output options";
30 | };
31 |
32 | http_response = mkOption {
33 | type = listOf attrs;
34 | description = "Telegraf http_response monitoring input config";
35 | default = [ ];
36 | };
37 |
38 | ping = mkOption {
39 | type = listOf str;
40 | description = "List of hosts for telegraf to ping";
41 | default = [ ];
42 | };
43 |
44 | nomad = mkOption {
45 | type = bool;
46 | description = "Scrape local nomad metrics exposed via prometheus";
47 | default = false;
48 | };
49 |
50 | x509_certs = mkOption {
51 | type = listOf str;
52 | description = "List of URLs to monitor for certificate expiration";
53 | default = [ ];
54 | };
55 |
56 | zfs = mkOption {
57 | type = bool;
58 | description = "Collect ZFS snapshot metrics";
59 | default = false;
60 | };
61 | };
62 |
63 | config = mkIf cfg.enable {
64 | services.telegraf = {
65 | enable = true;
66 |
67 | extraConfig = {
68 | inputs = {
69 | cpu = {
70 | percpu = true;
71 | };
72 | disk = { };
73 | kernel = { };
74 | mem = { };
75 | net = { };
76 | netstat = { };
77 | processes = { };
78 | swap = { };
79 | system = { };
80 |
81 | http_response = mkIf (length cfg.http_response > 0) (
82 | map (a: a // { interval = "30s"; }) cfg.http_response
83 | );
84 |
85 | ping = mkIf (length cfg.ping > 0) (
86 | map (host: {
87 | urls = [ host ];
88 | interval = "30s";
89 | binary = "${pkgs.iputils}/bin/ping";
90 | }) cfg.ping
91 | );
92 |
93 | nomad = mkIf (cfg.nomad) {
94 | url = "https://127.0.0.1:4646";
95 | insecure_skip_verify = true;
96 | };
97 |
98 | x509_cert = mkIf (length cfg.x509_certs > 0) { sources = cfg.x509_certs; };
99 |
100 | exec = mkIf (cfg.zfs) {
101 | commands = [ ./files/telegraf/zfs_snap_times.py ];
102 | timeout = "5s";
103 | data_format = "influx";
104 | environment = [ "PATH=/run/current-system/sw/bin" ];
105 | };
106 | };
107 |
108 | outputs.influxdb = with cfg.influxdb; {
109 | inherit urls database;
110 | username = user;
111 | password = "$PASSWORD";
112 | };
113 | };
114 |
115 | environmentFiles = [ config.age-template.files."telegraf-influx.env".path ];
116 | };
117 |
118 | # Create an environment file containing the influxdb password.
119 | age-template.files."telegraf-influx.env" = {
120 | vars.password = cfg.influxdb.passwordFile;
121 | content = ''PASSWORD=$password'';
122 | };
123 | };
124 | }
125 |
--------------------------------------------------------------------------------
/nixos/roles/traefik.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | pkgs,
4 | lib,
5 | catalog,
6 | ...
7 | }:
8 | with lib;
9 | let
10 | cfg = config.roles.traefik;
11 | in
12 | {
13 | options.roles.traefik = {
14 | enable = mkEnableOption "Enable traefik daemon";
15 |
16 | autheliaUrl = mkOption {
17 | type = types.str;
18 | description = "forwardAuth URL for authelia service";
19 | example = "http://localhost:9091/api/verify?rd=https://login.example.com/";
20 | };
21 |
22 | # TODO: Too much abstraction, let websvc role handle this.
23 | services = mkOption {
24 | type =
25 | with types;
26 | attrsOf (submodule {
27 | options = {
28 | domainName = mkOption { type = str; };
29 | backendUrls = mkOption { type = listOf str; };
30 | sticky = mkOption {
31 | type = bool;
32 | default = false;
33 | };
34 | checkHost = mkOption {
35 | type = nullOr str;
36 | default = null;
37 | };
38 | checkPath = mkOption {
39 | type = str;
40 | default = "/";
41 | };
42 | checkInterval = mkOption {
43 | type = str;
44 | default = "15s";
45 | };
46 | external = mkOption {
47 | type = bool;
48 | default = false;
49 | };
50 | externalAuth = mkOption {
51 | type = bool;
52 | default = true;
53 | };
54 | };
55 | });
56 | description = "Services to proxy";
57 | default = { };
58 | };
59 |
60 | certificateEmail = mkOption {
61 | type = types.str;
62 | description = "Email passed to Let's Encrypt";
63 | };
64 |
65 | cloudflareDnsApiTokenFile = mkOption {
66 | type = types.path;
67 | description = "File containing API token with DNS:Edit permission";
68 | };
69 | };
70 |
71 | config = mkIf cfg.enable {
72 | services.traefik = {
73 | enable = true;
74 |
75 | staticConfigOptions = {
76 | api.dashboard = true;
77 |
78 | entryPoints =
79 | let
80 | catalogEntrypoints =
81 | # Convert catalog name=addr to name.address=addr for traefik.
82 | mapAttrs (name: address: { inherit address; }) catalog.traefik.entrypoints;
83 | in
84 | {
85 | web = {
86 | address = ":80/tcp";
87 |
88 | # Always redirect to HTTPS.
89 | http.redirections.entryPoint.to = "websecure";
90 | };
91 | }
92 | // catalogEntrypoints;
93 |
94 | certificatesResolvers.letsencrypt.acme = {
95 | email = cfg.certificateEmail;
96 | storage = "/var/lib/traefik/letsencrypt-certs.json";
97 | caServer = "https://acme-v02.api.letsencrypt.org/directory";
98 | dnsChallenge = {
99 | provider = "cloudflare";
100 | delayBeforeCheck = "0";
101 | resolvers = [ "1.1.1.1:53" ];
102 | };
103 | };
104 |
105 | serversTransport = {
106 | # Disable backend certificate verification.
107 | insecureSkipVerify = true;
108 | };
109 |
110 | providers.consulCatalog = {
111 | prefix = "traefik";
112 | exposedByDefault = false;
113 | connectAware = true;
114 |
115 | endpoint = {
116 | address = "consul.service.skynet.consul:8500";
117 | scheme = "http";
118 | datacenter = "skynet";
119 | };
120 | };
121 |
122 | accessLog = { }; # enabled
123 | log.level = "info";
124 | };
125 |
126 | dynamicConfigOptions =
127 | let
128 | routerEntry = name: opt: {
129 | # Always allow internal entry points, external is optional.
130 | entryPoints = [
131 | "web"
132 | "websecure"
133 | ] ++ (if opt.external then [ "extweb" ] else [ ]);
134 |
135 | rule = "Host(`" + opt.domainName + "`)";
136 | service = name;
137 | tls.certresolver = "letsencrypt";
138 | middlewares = mkIf (opt.external && opt.externalAuth) [ "authelia@file" ];
139 | };
140 |
141 | serviceEntry = name: opt: {
142 | loadBalancer = {
143 | # Map list of urls to individual url= attributes.
144 | servers = map (url: { url = url; }) opt.backendUrls;
145 | sticky = mkIf opt.sticky { cookie = { }; };
146 | healthCheck = {
147 | hostname = mkIf (opt.checkHost != null) opt.checkHost;
148 | path = opt.checkPath;
149 | interval = opt.checkInterval;
150 | };
151 | };
152 | };
153 | in
154 | {
155 | http = {
156 | # Combine static routes with cfg.services entries.
157 | routers = {
158 | # Router for built-in traefik API.
159 | api = {
160 | entryPoints = [
161 | "web"
162 | "websecure"
163 | ];
164 | rule = "Host(`traefik.bytemonkey.org`)";
165 | service = "api@internal";
166 | tls.certresolver = "letsencrypt";
167 | };
168 | } // mapAttrs routerEntry cfg.services;
169 |
170 | services = mapAttrs serviceEntry cfg.services;
171 |
172 | middlewares.authelia = {
173 | # Forward requests w/ middlewares=authelia@file to authelia.
174 | forwardAuth = {
175 | address = cfg.autheliaUrl;
176 | trustForwardHeader = true;
177 | authResponseHeaders = [
178 | "Remote-User"
179 | "Remote-Name"
180 | "Remote-Email"
181 | "Remote-Groups"
182 | ];
183 | };
184 | };
185 | };
186 | };
187 | };
188 |
189 | # Setup secrets.
190 | age.secrets = {
191 | traefik-consul-token.file = ../secrets/traefik-consul-token.age;
192 | };
193 |
194 | age-template.files."traefik.env" = {
195 | vars = {
196 | cfDnsToken = cfg.cloudflareDnsApiTokenFile;
197 | consulToken = config.age.secrets.traefik-consul-token.path;
198 | };
199 | content = ''
200 | CF_DNS_API_TOKEN=$cfDnsToken
201 | CONSUL_HTTP_TOKEN=$consulToken
202 | '';
203 | };
204 |
205 | systemd.services.traefik.serviceConfig.EnvironmentFile = [
206 | config.age-template.files."traefik.env".path
207 | ];
208 |
209 | # TODO: autogenerate this list from catalog entrypoints
210 | networking.firewall.allowedTCPPorts = [
211 | 25
212 | 80
213 | 222
214 | 443
215 | 8443
216 | ];
217 | networking.firewall.allowedUDPPorts = [
218 | 7777
219 | 15000
220 | 15777
221 | ];
222 | };
223 | }
224 |
--------------------------------------------------------------------------------
/nixos/roles/upsmon.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | lib,
4 | pkgs,
5 | ...
6 | }:
7 | let
8 | cfg = config.roles.upsmon;
9 | in
10 | {
11 | options.roles.upsmon = {
12 | enable = lib.mkEnableOption "Monitor remote UPS status and shutdown after a delay";
13 |
14 | wave = lib.mkOption {
15 | description = "Shutdown ordering, lower values shutdown earlier";
16 | type = lib.types.enum [
17 | 1
18 | 2
19 | 3
20 | ];
21 | };
22 | };
23 |
24 | config = lib.mkIf cfg.enable {
25 | power.ups =
26 | let
27 | wave = {
28 | "1" = 120;
29 | "2" = 180;
30 | "3" = 240;
31 | };
32 |
33 | secret = pkgs.writeText "upsmon" "secret";
34 |
35 | rules = pkgs.writeText "upssched" ''
36 | CMDSCRIPT ${../roles/files/nut/upssched-cmd}
37 | PIPEFN /etc/nut/upssched.pipe
38 | LOCKFN /etc/nut/upssched.lock
39 |
40 | AT ONBATT * START-TIMER onbatt 15
41 | AT ONLINE * CANCEL-TIMER onbatt online
42 | AT ONBATT * START-TIMER earlyshutdown ${toString wave."${toString cfg.wave}"}
43 | AT ONLINE * CANCEL-TIMER earlyshutdown online
44 | AT LOWBATT * EXECUTE onbatt
45 | AT COMMBAD * START-TIMER commbad 30
46 | AT COMMOK * CANCEL-TIMER commbad commok
47 | AT NOCOMM * EXECUTE commbad
48 | AT SHUTDOWN * EXECUTE powerdown
49 | AT SHUTDOWN * EXECUTE powerdown
50 | '';
51 | in
52 | {
53 | enable = true;
54 | mode = "netclient";
55 | schedulerRules = "${rules}";
56 |
57 | upsmon.settings = {
58 | NOTIFYFLAG = [
59 | [
60 | "ONLINE"
61 | "SYSLOG+EXEC"
62 | ]
63 | [
64 | "ONBATT"
65 | "SYSLOG+EXEC"
66 | ]
67 | [
68 | "LOWBATT"
69 | "SYSLOG+EXEC"
70 | ]
71 | ];
72 | };
73 |
74 | upsmon.monitor.skynas = {
75 | system = "ups@witness.home.arpa";
76 | type = "secondary";
77 | user = "monuser";
78 | passwordFile = "${secret}";
79 | };
80 | };
81 | };
82 | }
83 |
--------------------------------------------------------------------------------
/nixos/roles/workstation.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | lib,
4 | pkgs,
5 | authorizedKeys,
6 | nixpkgs-unstable,
7 | nixd-flake,
8 | ...
9 | }:
10 | let
11 | inherit (lib) mkEnableOption mkIf;
12 | cfg = config.roles.workstation;
13 | in
14 | {
15 | options.roles.workstation = {
16 | enable = mkEnableOption "Base CLI workstation";
17 | };
18 |
19 | config = mkIf cfg.enable {
20 | environment.systemPackages =
21 | let
22 | inherit (pkgs) system;
23 | unstable = nixpkgs-unstable.legacyPackages.${system};
24 | in
25 | (with pkgs; [
26 | bashmount
27 | cachix
28 | chezmoi
29 | docker-compose
30 | fzf
31 | gitAndTools.gh
32 | gcc
33 | gnumake
34 | kitty # always install for terminfo
35 | lazydocker
36 | lazygit
37 | lua51Packages.luarocks-nix # for rest.nvim
38 | lynx
39 | mqttui
40 | nfs-utils
41 | nixfmt-rfc-style
42 | nixpkgs-review
43 | nodejs
44 | openssl
45 | patchelf
46 | postgresql_14
47 | python311Packages.python-lsp-server
48 | ripgrep
49 | sshfs
50 | starship
51 | sumneko-lua-language-server
52 | tmux
53 | universal-ctags
54 | unzip
55 | usbutils
56 | watchexec
57 | yaml-language-server
58 | zip
59 | ])
60 | ++ [
61 | nixd-flake.packages.${system}.nixd
62 | unstable.devenv
63 | unstable.rust-analyzer
64 | ];
65 |
66 | # Programs and services
67 | programs.direnv.enable = true;
68 | programs.fish.enable = true;
69 | programs.mosh.enable = true;
70 |
71 | # NFS mount support
72 | boot.supportedFilesystems = [ "nfs" ];
73 | services.rpcbind.enable = true;
74 |
75 | services.udisks2.enable = true;
76 |
77 | # Setup ST-Link MCU probe.
78 | services.udev.extraRules = ''
79 | ACTION!="add|change", GOTO="probe_rs_rules_end"
80 | SUBSYSTEM=="gpio", MODE="0660", GROUP="dialout", TAG+="uaccess"
81 | SUBSYSTEM!="usb|tty|hidraw", GOTO="probe_rs_rules_end"
82 |
83 | # STMicroelectronics ST-LINK/V2
84 | ATTRS{idVendor}=="0483", ATTRS{idProduct}=="3748", MODE="660", GROUP="dialout", TAG+="uaccess"
85 | # DAP42 Bluepill CMSIS-DAP Debug Probe
86 | ATTRS{idVendor}=="1209", ATTRS{idProduct}=="da42", MODE="660", GROUP="dialout", TAG+="uaccess"
87 | # WeACT Blackbill CMSIS-DAP Debug Probe
88 | ATTRS{idVendor}=="c251", ATTRS{idProduct}=="f001", MODE="660", GROUP="dialout", TAG+="uaccess"
89 |
90 | LABEL="probe_rs_rules_end"
91 | '';
92 |
93 | virtualisation.docker.enable = true;
94 |
95 | # Environment
96 | environment.sessionVariables = {
97 | # Workaround for fish: https://github.com/NixOS/nixpkgs/issues/36146
98 | TERMINFO_DIRS = "/run/current-system/sw/share/terminfo";
99 | };
100 |
101 | # Users
102 | users.mutableUsers = true;
103 |
104 | # Extend common.nix user configuration
105 | users.users.james = {
106 | uid = 1026;
107 | isNormalUser = true;
108 | home = "/home/james";
109 | description = "James Hillyerd";
110 | shell = pkgs.fish;
111 | initialPassword = "hello github";
112 |
113 | extraGroups = [
114 | "audio"
115 | "dialout"
116 | "docker"
117 | "libvirtd"
118 | "networkmanager"
119 | "vboxsf"
120 | "video"
121 | "wheel"
122 | ];
123 |
124 | openssh.authorizedKeys.keys = authorizedKeys;
125 | };
126 |
127 | services.autofs = {
128 | enable = true;
129 | debug = true;
130 | autoMaster =
131 | let
132 | netConf = pkgs.writeText "auto" ''
133 | skynas -rw,fstype=nfs4 /home skynas.home.arpa:/volume1/homes
134 | '';
135 | in
136 | ''
137 | /net file:${netConf}
138 | '';
139 | };
140 |
141 | security.sudo.wheelNeedsPassword = false;
142 |
143 | nix = {
144 | settings = {
145 | connect-timeout = 5;
146 | keep-derivations = true;
147 | keep-outputs = true;
148 | log-lines = 25;
149 | trusted-users = [
150 | "root"
151 | "james"
152 | ];
153 | };
154 |
155 | # Enable nix flakes, not yet stable.
156 | extraOptions = ''
157 | experimental-features = nix-command flakes
158 | '';
159 | };
160 | };
161 | }
162 |
--------------------------------------------------------------------------------
/nixos/run:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env fish
2 |
3 | if test -z "$host"
4 | echo "host env or argument required" >&2
5 | exit 1
6 | end
7 |
8 | set target "root@$host.home.arpa"
9 |
10 | echo "Running on $host"
11 |
12 | ssh $target $argv
13 |
--------------------------------------------------------------------------------
/nixos/secrets/cloudflare-dns-api.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/cloudflare-dns-api.age
--------------------------------------------------------------------------------
/nixos/secrets/consul-agent-token.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/consul-agent-token.age
--------------------------------------------------------------------------------
/nixos/secrets/consul-encrypt.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/consul-encrypt.age
--------------------------------------------------------------------------------
/nixos/secrets/gitea-runner-token.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/gitea-runner-token.age
--------------------------------------------------------------------------------
/nixos/secrets/influxdb-admin.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/influxdb-admin.age
--------------------------------------------------------------------------------
/nixos/secrets/influxdb-homeassistant.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/influxdb-homeassistant.age
--------------------------------------------------------------------------------
/nixos/secrets/influxdb-telegraf.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/influxdb-telegraf.age
--------------------------------------------------------------------------------
/nixos/secrets/k3s-token.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/k3s-token.age
--------------------------------------------------------------------------------
/nixos/secrets/mqtt-admin.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/mqtt-admin.age
--------------------------------------------------------------------------------
/nixos/secrets/mqtt-clock.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/mqtt-clock.age
--------------------------------------------------------------------------------
/nixos/secrets/mqtt-sensor.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/mqtt-sensor.age
--------------------------------------------------------------------------------
/nixos/secrets/mqtt-zwave.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/mqtt-zwave.age
--------------------------------------------------------------------------------
/nixos/secrets/nomad-consul-token.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/nomad-consul-token.age
--------------------------------------------------------------------------------
/nixos/secrets/nomad-encrypt.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/nomad-encrypt.age
--------------------------------------------------------------------------------
/nixos/secrets/nomad-server-client-key.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/nomad-server-client-key.age
--------------------------------------------------------------------------------
/nixos/secrets/secrets.nix:
--------------------------------------------------------------------------------
1 | let
2 | # Users
3 | james-eph = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM1Rq9OFHVus5eULteCEGNkHgINch40oPP2LwvlVd6ng";
4 | james-ryzen = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICAXHtE9NI16ZPNSKF6Cn0JNJS6fTNQYduerVmVa6WKY";
5 | james-nix-ryzen = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICJoH0p+6iSISUAqRO8+6+uvQWpjaP0eQjDeGAXIYUI6";
6 | users = [
7 | james-eph
8 | james-ryzen
9 | james-nix-ryzen
10 | ];
11 |
12 | # Nodes
13 | eph = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJy9svdsaaW77i5QPwiNLDSN+jJmFvkK5NisZzbm7Lro";
14 | fastd = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEhFOgRRiQp/P/amORoCK7woLM8koTmDCCNA+9+/ThrY";
15 | game = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINwJjb9823qVwZPp95MrfTekFoMtHPeybTRbogwi6B24";
16 | metrics = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAthVAxIOvyRWkUlxH19erBZGNC6LCW1IAFE+1T4AxGL";
17 | nexus = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEuqgUtpyOVfnxa1oKLdjN4AIN5piKHfdumQHonqjH4P";
18 | nix-ryzen = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPO18qRQvPfbyWYkG5J5K1T1NbCw4Y7QeeRhdQG8CzI5";
19 | nixtarget1-virtd = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILozTQNcPY2BNQZNW+F29M2euRzD7wZ1XtsKsWFjzpeJ";
20 | scratch = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO9E9qftUIsznkjQXN9Bwov9bme0ZPD9fd704XwChrtV";
21 | web = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICHzyS01Xs/BFkkwlIa+F3K/23yw/9GE/NFcachriRgl";
22 | home-nodes = [
23 | eph
24 | fastd
25 | game
26 | metrics
27 | nexus
28 | nix-ryzen
29 | nixtarget1-virtd
30 | scratch
31 | web
32 | ];
33 |
34 | # Runners
35 | ci-runner1 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnOeW75UezreS51pqSHleYjx7tNg67Nv34rh5/dJLiZ";
36 | runner-nodes = [
37 | ci-runner1
38 | scratch
39 | ];
40 |
41 | # Cluster nodes
42 | witness = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAXoEYcViLYLHXZRThjTh61ZA43DS2lCCbJa5EXbFAwc";
43 |
44 | kube1 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB7K81sGBvuRcbOaQpippdNHhCRL2eDfmsJ1BNosZ8+o";
45 | kube2 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIziR7mI9vwr2/qHYx89GDJh95oQkZmbfb5AdDePXUtZ";
46 | kube-cluster = [
47 | kube1
48 | kube2
49 | scratch
50 | witness
51 | ];
52 |
53 | nc-um350-1 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMY7Sz0qZCTg2rJNZ1SX61eMosZwPyh0Mq8+kxp5AB31";
54 | nc-um350-2 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKmHTTSRM1PuZ45KXJACZhJc1GAgcT9i+QCClo6sV88R";
55 | nomad-cluster = [
56 | nexus
57 | nc-um350-1
58 | nc-um350-2
59 | scratch
60 | web
61 | witness
62 | ];
63 |
64 | group = {
65 | common = users ++ home-nodes ++ nomad-cluster ++ kube-cluster ++ runner-nodes;
66 | home = users ++ home-nodes;
67 | kube = users ++ kube-cluster;
68 | nomad = users ++ nomad-cluster;
69 | runners = users ++ runner-nodes;
70 | };
71 | in
72 | {
73 | # Common
74 | "influxdb-telegraf.age".publicKeys = group.common;
75 | "tailscale.age".publicKeys = group.common;
76 | "wifi-env.age".publicKeys = group.common;
77 |
78 | # Home
79 | "cloudflare-dns-api.age".publicKeys = group.home;
80 | "influxdb-admin.age".publicKeys = group.home;
81 | "influxdb-homeassistant.age".publicKeys = group.home;
82 | "mqtt-admin.age".publicKeys = group.home;
83 | "mqtt-clock.age".publicKeys = group.home;
84 | "mqtt-sensor.age".publicKeys = group.home;
85 | "mqtt-zwave.age".publicKeys = group.home;
86 |
87 | # Kube cluster
88 | "k3s-token.age".publicKeys = group.kube;
89 |
90 | # Nomad cluster
91 | "consul-encrypt.age".publicKeys = group.nomad;
92 | "consul-agent-token.age".publicKeys = group.nomad;
93 | "nomad-encrypt.age".publicKeys = group.nomad;
94 | "nomad-consul-token.age".publicKeys = group.nomad;
95 | "nomad-server-client-key.age".publicKeys = group.nomad;
96 | "skynet-server-consul-0-key.pem.age".publicKeys = group.nomad;
97 | "traefik-consul-token.age".publicKeys = group.nomad;
98 |
99 | # Runners.
100 | "gitea-runner-token.age".publicKeys = group.runners;
101 | }
102 |
--------------------------------------------------------------------------------
/nixos/secrets/skynet-server-consul-0-key.pem.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/skynet-server-consul-0-key.pem.age
--------------------------------------------------------------------------------
/nixos/secrets/tailscale.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/tailscale.age
--------------------------------------------------------------------------------
/nixos/secrets/traefik-consul-token.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/traefik-consul-token.age
--------------------------------------------------------------------------------
/nixos/secrets/wifi-env.age:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhillyerd/homelab/1793c709211e098dc2f9687c99c9d01687a550c8/nixos/secrets/wifi-env.age
--------------------------------------------------------------------------------
/nixos/status:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env fish
2 |
3 | if test -n "$argv[1]"
4 | set host $argv[1]
5 | end
6 | if test -z "$host"
7 | echo "host env or argument required" >&2
8 | exit 1
9 | end
10 |
11 | set target "root@$host.home.arpa"
12 |
13 | echo "Status for $host"
14 |
15 | ssh $target "set -x; systemctl --failed; uname -a; uptime; df -h -x tmpfs -x overlay" 2>&1 \
16 | | sed -E -e "s/^(\+ .*)/\n$(set_color -r)\1$(set_color normal)/"
17 |
--------------------------------------------------------------------------------
/nomad/bin/alloc-stream:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | curl -G -s -N \
4 | --header "X-Nomad-Token: $NOMAD_TOKEN" \
5 | --cacert ~/secrets/nomad/ca/nomad-ca.pem \
6 | --data-urlencode "topic=Allocation" \
7 | https://nomad.service.consul:4646/v1/event/stream \
8 | | jq '.Events[]? | {
9 | "topic": .Topic,
10 | "type": .Type,
11 | "client_description": .Payload.Allocation.ClientDescription,
12 | "client_status": .Payload.Allocation.ClientStatus,
13 | "desired_status": .Payload.Allocation.DesiredStatus,
14 | "job_id": .Payload.Allocation.JobID,
15 | "name": .Payload.Allocation.Name,
16 | "namespace": .Payload.Allocation.Namespace,
17 | "node_name": .Payload.Allocation.NodeName,
18 | "task_group": .Payload.Allocation.TaskGroup
19 | }'
20 |
--------------------------------------------------------------------------------
/nomad/consul/fabio.policy.hcl:
--------------------------------------------------------------------------------
1 | node_prefix {
2 | "" {
3 | policy = "read"
4 | }
5 | }
6 | service {
7 | fabio {
8 | policy = "write"
9 | }
10 | }
11 | service_prefix {
12 | "" {
13 | policy = "write"
14 | }
15 | }
16 | agent_prefix {
17 | "" {
18 | policy = "read"
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/nomad/consul/waypoint-ci-agent.policy.hcl:
--------------------------------------------------------------------------------
1 | namespace "default" {
2 | policy = "read"
3 |
4 | capabilities = ["submit-job"]
5 | }
6 |
--------------------------------------------------------------------------------
/nomad/env-prod.fish:
--------------------------------------------------------------------------------
1 | set -x NOMAD_ADDR https://nomad.service.consul:4646
2 | set -x NOMAD_CACERT ~/secrets/nomad/ca/nomad-ca.pem
3 | set -x NOMAD_CLIENT_CERT ~/secrets/nomad/cli.pem
4 | set -x NOMAD_CLIENT_KEY ~/secrets/nomad/cli-key.pem
5 | set -x NOMAD_TOKEN (cat ~/secrets/nomad/manager-token)
6 |
--------------------------------------------------------------------------------
/nomad/etc/ca-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "nomad-cfssl-ca.skynet.local",
3 | "hosts": [
4 | "nomad-cfssl-ca.skynet.local"
5 | ],
6 | "key": {
7 | "algo": "ecdsa",
8 | "size": 256
9 | },
10 | "names": [
11 | {
12 | "C": "US",
13 | "ST": "WA",
14 | "L": "Redmond"
15 | }
16 | ]
17 | }
18 |
--------------------------------------------------------------------------------
/nomad/etc/cfssl-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "signing": {
3 | "default": {
4 | "expiry": "43800h",
5 | "usages": ["signing", "key encipherment", "server auth", "client auth"]
6 | },
7 | "profiles": {
8 | "client": {
9 | "expiry": "8760h",
10 | "usages": [
11 | "signing",
12 | "key encipherment",
13 | "client auth"
14 | ]
15 | }
16 | }
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/nomad/etc/init-ca.fish:
--------------------------------------------------------------------------------
1 | cfssl gencert -initca ca-config.json | cfssljson -bare nomad-ca
2 |
--------------------------------------------------------------------------------
/nomad/etc/make-certs.fish:
--------------------------------------------------------------------------------
1 | set local_wildcard "*.home.arpa"
2 | set config cfssl-config.json
3 | set ca_crt ca/nomad-ca.pem
4 | set ca_key ca/nomad-ca-key.pem
5 | set output certs
6 |
7 | function gencert
8 | set short_name (string join "-" $argv)
9 | set host_names \
10 | (string join "," (string replace -r '$' ".global.nomad" $argv))
11 |
12 | echo '{ "CN": "nomad.service.consul" }' | cfssl gencert \
13 | -ca=$ca_crt -ca-key=$ca_key -config=$config \
14 | -hostname="$host_names,$local_wildcard,nomad.service.consul,localhost,127.0.0.1" - \
15 | | cfssljson -bare $output/$short_name
16 | end
17 |
18 | mkdir -p $output
19 |
20 | gencert server client
21 |
22 | echo '{}' | cfssl gencert -ca=$ca_crt -ca-key=$ca_key -profile=client - \
23 | | cfssljson -bare $output/cli
24 |
25 | echo '{ "CN": "nomad.browser" }' | cfssl gencert \
26 | -ca=$ca_crt -ca-key=$ca_key -profile=browser - \
27 | | cfssljson -bare $output/browser
28 | openssl pkcs12 -export -out $output/browser.pfx -passout pass: \
29 | -in $ca_crt -in $output/browser.pem -inkey $output/browser-key.pem
30 |
--------------------------------------------------------------------------------
/nomad/etc/nomad-ca.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIB5zCCAY6gAwIBAgIUJ/EnXzDrwIrtUhnDp1SxalAh7DAwCgYIKoZIzj0EAwIw
3 | UjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMRAwDgYDVQQHEwdSZWRtb25kMSQw
4 | IgYDVQQDExtub21hZC1jZnNzbC1jYS5za3luZXQubG9jYWwwHhcNMjIwNTIxMTcx
5 | OTAwWhcNMjcwNTIwMTcxOTAwWjBSMQswCQYDVQQGEwJVUzELMAkGA1UECBMCV0Ex
6 | EDAOBgNVBAcTB1JlZG1vbmQxJDAiBgNVBAMTG25vbWFkLWNmc3NsLWNhLnNreW5l
7 | dC5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABD0+QB3FVvKx7tSkviJF
8 | pByMB+s5e3QkYGemoQPQY7XLwVyoLaimP4Xm859TaaUePDxuTXKvY7CSBrB5u7ST
9 | zgqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
10 | BBRnH99g5gDZGtXTj7NWmjVFbbYVaDAKBggqhkjOPQQDAgNHADBEAiBikM9KBCGG
11 | Ytrokzmp/ki4qGaGsTyfPEnLB2XwXCGeMgIgPFzTWo5tLZqocSOaln4gt3ixOQQ2
12 | G7iJlyx6HJkM+h4=
13 | -----END CERTIFICATE-----
14 |
--------------------------------------------------------------------------------
/nomad/examples/linuxvm.nomad:
--------------------------------------------------------------------------------
1 | job "linuxvm" {
2 | datacenters = ["skynet"]
3 | type = "service"
4 |
5 | group "linuxvm" {
6 | count = 1
7 |
8 | network {
9 | port "ssh" {}
10 | }
11 |
12 | task "linuxvm" {
13 | driver = "qemu"
14 |
15 | config {
16 | image_path = "local/nixos.qcow2"
17 |
18 | accelerator = "kvm"
19 | graceful_shutdown = true
20 | guest_agent = true
21 |
22 | args = [
23 | "-device",
24 | "e1000,netdev=user.0",
25 | "-netdev",
26 | "user,id=user.0,hostfwd=tcp::${NOMAD_PORT_ssh}-:22",
27 | ]
28 | }
29 |
30 | artifact {
31 | source = "http://skynas.home.arpa/vms/nixos.qcow2"
32 | }
33 |
34 | kill_timeout = "30s"
35 |
36 | resources {
37 | cpu = 1000 # MHz
38 | memory = 2048 # MB
39 | }
40 |
41 | logs {
42 | max_files = 10
43 | max_file_size = 1
44 | }
45 | }
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/nomad/examples/whoami-connect.nomad:
--------------------------------------------------------------------------------
1 | job "whoami-connect" {
2 | datacenters = ["skynet"]
3 | type = "service"
4 |
5 | constraint {
6 | attribute = "${attr.kernel.name}"
7 | value = "linux"
8 | }
9 |
10 | constraint {
11 | attribute = "${attr.kernel.arch}"
12 | value = "x86_64"
13 | }
14 |
15 | group "whoami" {
16 | count = 1
17 |
18 | network {
19 | mode = "bridge"
20 | }
21 |
22 | service {
23 | name = "whoami-connect"
24 | port = 80
25 |
26 | # Allows sidecar to connect.
27 | address_mode = "alloc"
28 |
29 | tags = [
30 | "http",
31 | ]
32 |
33 | connect {
34 | sidecar_service {}
35 | }
36 |
37 | check {
38 | name = "Whoami HTTP Check"
39 | type = "http"
40 | path = "/"
41 | interval = "10s"
42 | timeout = "2s"
43 |
44 | # Allow local consul to connect.
45 | address_mode = "alloc"
46 | }
47 | }
48 |
49 | task "whoami" {
50 | driver = "docker"
51 |
52 | config {
53 | image = "traefik/whoami"
54 | ports = ["http"]
55 | }
56 |
57 | env {
58 | WHOAMI_PORT_NUMBER = "80"
59 | WHOAMI_NAME = "connect"
60 | }
61 |
62 | resources {
63 | cpu = 100 # MHz
64 | memory = 16 # MB
65 | }
66 |
67 | logs {
68 | max_files = 10
69 | max_file_size = 1
70 | }
71 | }
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/nomad/examples/whoami.nomad:
--------------------------------------------------------------------------------
1 | job "whoami" {
2 | datacenters = ["skynet"]
3 | type = "service"
4 |
5 | group "whoami" {
6 | count = 1
7 |
8 | network {
9 | port "public" { to = 80 }
10 | port "private" { to = 81 }
11 | }
12 |
13 | service {
14 | name = "whoami-public"
15 | port = "public"
16 |
17 | tags = [
18 | "http",
19 | "traefik.enable=true",
20 | "traefik.http.routers.whoami-public.entrypoints=extweb",
21 | "traefik.http.routers.whoami-public.rule=Host(`x.bytemonkey.org`) && PathPrefix(`/public/`)",
22 | "traefik.http.routers.whoami-public.tls.certresolver=letsencrypt",
23 | "urlprefix-/public/",
24 | ]
25 |
26 | check {
27 | name = "Public HTTP Check"
28 | type = "http"
29 | path = "/"
30 | interval = "10s"
31 | timeout = "2s"
32 | }
33 | }
34 |
35 | service {
36 | name = "whoami-private"
37 | port = "private"
38 |
39 | tags = [
40 | "http",
41 | "traefik.enable=true",
42 | "traefik.http.routers.whoami-private.entrypoints=extweb",
43 | "traefik.http.routers.whoami-private.rule=Host(`x.bytemonkey.org`) && PathPrefix(`/private/`)",
44 | "traefik.http.routers.whoami-private.tls.certresolver=letsencrypt",
45 | "traefik.http.routers.whoami-private.middlewares=authelia@file",
46 | "urlprefix-/private/",
47 | ]
48 |
49 | check {
50 | name = "Private HTTP Check"
51 | type = "http"
52 | path = "/"
53 | interval = "10s"
54 | timeout = "2s"
55 | }
56 | }
57 |
58 | task "public" {
59 | driver = "docker"
60 |
61 | config {
62 | image = "traefik/whoami"
63 | ports = ["public"]
64 | }
65 |
66 | env {
67 | WHOAMI_PORT_NUMBER = "${NOMAD_PORT_public}"
68 | WHOAMI_NAME = "public"
69 | }
70 |
71 | resources {
72 | cpu = 100 # MHz
73 | memory = 16 # MB
74 | }
75 |
76 | logs {
77 | max_files = 10
78 | max_file_size = 1
79 | }
80 | }
81 |
82 | task "private" {
83 | driver = "docker"
84 |
85 | config {
86 | image = "traefik/whoami"
87 | ports = ["private"]
88 | }
89 |
90 | env {
91 | WHOAMI_PORT_NUMBER = "${NOMAD_PORT_private}"
92 | WHOAMI_NAME = "private"
93 | }
94 |
95 | resources {
96 | cpu = 100 # MHz
97 | memory = 16 # MB
98 | }
99 |
100 | logs {
101 | max_files = 10
102 | max_file_size = 1
103 | }
104 | }
105 | }
106 | }
107 |
--------------------------------------------------------------------------------
/nomad/fabio.nomad:
--------------------------------------------------------------------------------
1 | job "fabio" {
2 | datacenters = ["skynet"]
3 | type = "system"
4 |
5 | constraint {
6 | attribute = "${attr.kernel.name}"
7 | value = "linux"
8 | }
9 |
10 | constraint {
11 | attribute = "${attr.kernel.arch}"
12 | value = "x86_64"
13 | }
14 |
15 | group "fabio" {
16 | network {
17 | port "lb" {
18 | static = 80
19 | to = 9999
20 | }
21 | port "ui" {
22 | static = 9998
23 | }
24 | }
25 |
26 | task "fabio" {
27 | driver = "docker"
28 |
29 | config {
30 | image = "fabiolb/fabio"
31 | ports = ["lb","ui"]
32 | args = ["-cfg", "/secrets/fabio.properties"]
33 | }
34 |
35 | user = "nobody"
36 |
37 | resources {
38 | cpu = 200
39 | memory = 128
40 | }
41 |
42 | template {
43 | data = <