sha256:[a-f0-9]+))?"',
29 | ],
30 | datasourceTemplate: 'docker',
31 | },
32 | ],
33 | }
34 |
--------------------------------------------------------------------------------
/nixos/modules/applications/search/searxng/remove:
--------------------------------------------------------------------------------
1 | pinterest.com
2 | pinterest.co.uk
3 | pinterest.de
4 | pinterest.ca
5 | pinterest.fr
6 | pinterest.com.au
7 | pinterest.es
8 | foxnews.com
9 | tiktok.com
10 | breitbart.com
11 | facebook.com
12 | quora.com
13 | dailymail.co.uk
14 | instagram.com
15 | w3schools.com
16 | githubplus.com
17 | msn.com
18 | appsloveworld.com
19 | geeksforgeeks.org
20 | twitter.com
21 | alternativeto.net
22 | libhunt.com
23 | 9to5answer.com
24 | healthline.com
25 | nypost.com
26 | wikihow.com
27 | linkedin.com
28 | solveforum.com
29 | giters.com
30 | codegrepper.com
31 | cnn.com
32 | coder.social
33 | medium.com
34 | amazon.com
35 | forbes.com
36 | issuehint.com
37 | geekrepos.com
38 | kknews.cc
39 | webmd.com
40 | programcreek.com
41 | bleepcoder.com
42 | otosection.com
43 | drivereasy.com
44 | educba.com
45 | newbedev.com
46 | you.com
47 | nytimes.com
48 | blog.csdn.net
49 | washingtonpost.com
50 | lightrun.com
--------------------------------------------------------------------------------
/nixos/modules/nixos/system/nix.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.nix;
9 | in
10 | {
11 | options.mySystem.nix = {
12 | autoOptimiseStore = mkOption {
13 | type = lib.types.bool;
14 | description = "If we want to auto optimise store";
15 | default = true;
16 |
17 | };
18 | gc = {
19 | enable = mkEnableOption "automatic garbage collection" // {
20 | default = true;
21 | };
22 | persistent = mkOption {
23 | type = lib.types.bool;
24 | description = "Persistent timer for gc, runs at startup if timer missed";
25 | default = true;
26 | };
27 | };
28 |
29 | };
30 |
31 | config.nix = {
32 |
33 | optimise.automatic = cfg.autoOptimiseStore;
34 |
35 | # automatically garbage collect nix store
36 | gc = mkIf cfg.gc.enable {
37 | # garbage collection
38 | automatic = cfg.gc.enable;
39 | dates = "daily";
40 | options = "--delete-older-than 7d";
41 | inherit (cfg.gc) persistent;
42 | };
43 |
44 | };
45 |
46 | }
47 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Truxnell
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/.github/workflows/deploy-vault.yaml:
--------------------------------------------------------------------------------
1 | name: Deploy Vaultwarden to Fly.io
2 | on:
3 | workflow_dispatch:
4 | pull_request:
5 | paths:
6 | - .github/workflows/deploy-vault.yaml
7 | - "deployments/flyio/vaultwarden/fly.toml"
8 |
9 | jobs:
10 | build-deploy:
11 | if: ${{ github.ref_name == 'main' }}
12 | runs-on: ubuntu-latest
13 | steps:
14 | - name: Checkout
15 | uses: actions/checkout@v6
16 | with:
17 | persist-credentials: false
18 |
19 | - name: setup flyctl
20 | uses: superfly/flyctl-actions/setup-flyctl@master
21 |
22 | - name: Publish
23 | run: flyctl deploy --config deployments/flyio/vaultwarden/fly.toml
24 | env:
25 | FLY_ACCESS_TOKEN: ${{ secrets.FLY_ACCESS_TOKEN }}
26 | FLY_APP: ${{ secrets.FLY_APP_VAULTWARDEN }}
27 |
28 | - name: Push Build Status Notifications
29 | if: ${{ always() }}
30 | uses: desiderati/github-action-pushover@v1
31 | with:
32 | job-status: ${{ job.status }}
33 | pushover-api-token: ${{ secrets.PUSHOVER_API_TOKEN }}
34 | pushover-user-key: ${{ secrets.PUSHOVER_USER_KEY }}
35 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | fail_fast: false
3 |
4 | repos:
5 | - repo: https://github.com/adrienverge/yamllint
6 | rev: v1.37.1
7 | hooks:
8 | - args:
9 | - --config-file
10 | - .github/lint/.yamllint.yaml
11 | id: yamllint
12 | - repo: https://github.com/pre-commit/pre-commit-hooks
13 | rev: v6.0.0
14 | hooks:
15 | - id: trailing-whitespace
16 | - id: end-of-file-fixer
17 | - id: fix-byte-order-marker
18 | - id: mixed-line-ending
19 | - id: check-added-large-files
20 | args: [--maxkb=2048]
21 | - id: check-merge-conflict
22 | - id: check-executables-have-shebangs
23 | - repo: https://github.com/Lucas-C/pre-commit-hooks
24 | rev: v1.5.5
25 | hooks:
26 | - id: remove-crlf
27 | - id: remove-tabs
28 | exclude: (Makefile)
29 | - repo: https://github.com/zricethezav/gitleaks
30 | rev: v8.30.0
31 | hooks:
32 | - id: gitleaks
33 | - repo: https://github.com/yuvipanda/pre-commit-hook-ensure-sops
34 | rev: v1.1
35 | hooks:
36 | - id: sops-encryption
37 | # Uncomment to exclude all markdown files from encryption
38 | # exclude: *.\.md
39 |
--------------------------------------------------------------------------------
/nixos/hosts/xerxes/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | ...
4 | }:
5 | {
6 | config = {
7 | networking.hostName = "xerxes";
8 | networking.useDHCP = lib.mkDefault true;
9 | system.stateVersion = lib.mkDefault "23.11";
10 | services.smartd.enable=false; # no smartd disks on vps
11 | mySystem.security.acme.enable = true;
12 |
13 | mySystem.services = {
14 |
15 | openssh.enable = true;
16 | podman.enable = true;
17 | # Networking
18 | nginx.enable = true;
19 | technitium-dns-server.enable = true;
20 | };
21 |
22 | boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "sr_mod" "virtio_blk" ];
23 | boot.initrd.kernelModules = [ ];
24 | boot.kernelModules = [ ];
25 | boot.extraModulePackages = [ ];
26 |
27 | boot.loader.grub = {
28 | enable = true;
29 | devices = [ "/dev/vda" ];
30 | };
31 |
32 | fileSystems."/" =
33 | { device = "/dev/disk/by-uuid/3829960b-43f1-4d64-9bd9-2286e8c0345f";
34 | fsType = "ext4";
35 | };
36 |
37 | # swapDevices =
38 | # [ { device = "/dev/disk/by-uuid/7e2ffce0-4f21-45a8-93b2-9ea83377f9ec"; }
39 | # ];
40 |
41 |
42 | nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
43 | };
44 |
45 | }
46 |
47 |
--------------------------------------------------------------------------------
/nixos/modules/applications/infrastructure/podman/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 |
8 | with lib;
9 | let
10 | cfg = config.mySystem.services.podman;
11 | in
12 | {
13 | options.mySystem.services.podman.enable = mkEnableOption "Podman";
14 |
15 | config = mkIf cfg.enable {
16 | virtualisation.podman = {
17 | enable = true;
18 |
19 | dockerCompat = true;
20 | extraPackages = [ pkgs.zfs ];
21 |
22 | # regular cleanup
23 | autoPrune.enable = true;
24 | autoPrune.dates = "weekly";
25 |
26 | # and add dns
27 | defaultNetwork.settings = {
28 | dns_enabled = true;
29 | };
30 | };
31 | virtualisation.oci-containers = {
32 | backend = "podman";
33 | };
34 |
35 | environment.systemPackages = with pkgs; [
36 | podman-tui # status of containers in the terminal
37 | ];
38 |
39 | networking.firewall.interfaces.podman0.allowedUDPPorts = [ 53 ];
40 |
41 | # extra user for containers
42 | users.users.kah = {
43 | uid = 568;
44 | group = "kah";
45 | };
46 | users.groups.kah = {
47 | gid = 568;
48 | };
49 | users.users.truxnell.extraGroups = [ "kah" ];
50 | };
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/nixos/pkgs/cockpit-podman.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | stdenv,
4 | fetchzip,
5 | gettext,
6 | }:
7 |
8 | stdenv.mkDerivation rec {
9 | pname = "cockpit-podman";
10 | version = "99";
11 |
12 | src = fetchzip {
13 | url = "https://github.com/cockpit-project/${pname}/releases/download/${version}/${pname}-${version}.tar.xz";
14 | sha256 = "140sjw0pmzxs2hm33hrrragyhqq5gfp7n5ypma4bm6mnnnx0dydd";
15 | };
16 |
17 | nativeBuildInputs = [
18 | gettext
19 | ];
20 |
21 | makeFlags = [
22 | "DESTDIR=$(out)"
23 | "PREFIX="
24 | ];
25 |
26 | postPatch = ''
27 | substituteInPlace Makefile \
28 | --replace /usr/share $out/share
29 | touch pkg/lib/cockpit-po-plugin.js
30 | touch dist/manifest.json
31 | '';
32 |
33 | dontBuild = true;
34 |
35 | postFixup = ''
36 | substituteInPlace $out/share/cockpit/podman/manifest.json \
37 | --replace-warn "/lib/systemd/system/podman.socket" "/run/podman/podman.sock"
38 | '';
39 |
40 | meta = with lib; {
41 | description = "Cockpit UI for podman containers";
42 | license = licenses.lgpl21;
43 | homepage = "https://github.com/cockpit-project/cockpit-podman";
44 | platforms = platforms.linux;
45 | maintainers = with maintainers; [ ];
46 | };
47 | }
48 |
--------------------------------------------------------------------------------
/nixos/modules/applications/infrastructure/mariadb/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | cfg = config.mySystem.${category}.${app};
10 | app = "mariadb";
11 | category = "services";
12 | description = "mysql-compatiable database";
13 | # image = "";#string
14 | inherit (config.services.mysql) group; # string
15 | # port = ; #int
16 | # appFolder = "/var/lib/${app}";
17 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
18 | in
19 | {
20 | options.mySystem.${category}.${app} = {
21 | enable = mkEnableOption "${app}";
22 | prometheus = mkOption {
23 | type = lib.types.bool;
24 | description = "Enable prometheus scraping";
25 | default = true;
26 | };
27 | };
28 |
29 | config = mkIf cfg.enable {
30 |
31 | ## Secrets
32 | # sops.secrets."${category}/${app}/env" = {
33 | # sopsFile = ./secrets.sops.yaml;
34 | # owner = user;
35 | # group = group;
36 | # restartUnits = [ "${app}.service" ];
37 | # };
38 |
39 | users.users.truxnell.extraGroups = [ group ];
40 |
41 | ## service
42 | services.mysql = {
43 | enable = true;
44 | package = pkgs.mariadb;
45 | };
46 |
47 | };
48 | }
49 |
--------------------------------------------------------------------------------
/deployments/flyio/vaultwarden/fly.toml:
--------------------------------------------------------------------------------
1 | primary_region = "syd"
2 | kill_signal = "SIGINT"
3 | kill_timeout = "5s"
4 | app = "vaultwarden"
5 |
6 | [experimental]
7 | auto_rollback = true
8 |
9 | [build]
10 | image = "ghcr.io/dani-garcia/vaultwarden:1.34.3@sha256:84fd8a47f58d79a1ad824c27be0a9492750c0fa5216b35c749863093bfa3c3d7"
11 |
12 | [env]
13 | DATABASE_URL = "data/db.sqlite3"
14 | PASSWORD_ITERATIONS = "2000000"
15 | PRIMARY_REGION = "syd"
16 | SIGNUPS_ALLOWED = "false"
17 | INVITATIONS_ALLOWED = "true"
18 | SMTP_FROM_NAME = "Vault"
19 | SMTP_SECURITY = "off"
20 | SMTP_SSL = "true"
21 | TZ = "Australia/Melbourne"
22 | WEB_VAULT_ENABLED = "true"
23 | WEB_VAULT_FOLDER = "web-vault"
24 | DATA_FOLDER = "data"
25 |
26 | [[mounts]]
27 | source = "vw_data_machines"
28 | destination = "/data"
29 | processes = ["app"]
30 |
31 | [[services]]
32 | protocol = "tcp"
33 | internal_port = 80
34 | processes = ["app"]
35 |
36 | [[services.ports]]
37 | port = 80
38 | handlers = ["http"]
39 | force_https = true
40 |
41 | [[services.ports]]
42 | port = 443
43 | handlers = ["tls", "http"]
44 | [services.concurrency]
45 | type = "connections"
46 | hard_limit = 25
47 | soft_limit = 20
48 |
49 | [[services.tcp_checks]]
50 | interval = "15s"
51 | timeout = "2s"
52 | grace_period = "1s"
53 | restart_limit = 0
54 |
--------------------------------------------------------------------------------
/nixos/modules/nixos/security/acme/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.security.acme;
9 | app = "acme";
10 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
11 |
12 | in
13 | {
14 | options.mySystem.security.acme.enable = mkEnableOption "acme";
15 |
16 | config = mkIf cfg.enable {
17 | sops.secrets = {
18 | "security/acme/env".sopsFile = ./secrets.sops.yaml;
19 | "security/acme/env".restartUnits = [ "${app}.service" ];
20 | };
21 |
22 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
23 | lib.mkIf config.mySystem.system.impermanence.enable
24 | {
25 | directories = [ "/var/lib/acme" ];
26 | };
27 |
28 | security.acme = {
29 | acceptTerms = true;
30 | defaults.email = "admin@${config.networking.domain}";
31 |
32 | certs.${config.networking.domain} = {
33 | extraDomainNames = [
34 | "${config.networking.domain}"
35 | "*.${config.networking.domain}"
36 | ];
37 | dnsProvider = "cloudflare";
38 | dnsResolver = "1.1.1.1:53";
39 | credentialsFile = config.sops.secrets."security/acme/env".path;
40 | };
41 | };
42 |
43 | };
44 | }
45 |
--------------------------------------------------------------------------------
/nixos/modules/applications/infrastructure/nfs/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | cfg = config.mySystem.nfs.nas;
10 | in
11 | {
12 | options.mySystem.nfs.nas = {
13 | enable = mkEnableOption "Mount NAS";
14 | lazy = mkOption {
15 | type = lib.types.bool;
16 | description = "Enable lazymount";
17 | default = false;
18 | };
19 |
20 | };
21 |
22 | config = mkIf cfg.enable {
23 |
24 | services.rpcbind.enable = true; # needed for NFS
25 |
26 | environment.systemPackages = with pkgs; [ nfs-utils ];
27 |
28 | systemd.mounts = lib.mkIf cfg.lazy [
29 | {
30 | type = "nfs4";
31 | mountConfig = {
32 | Options = "noatime";
33 | };
34 | what = "daedalus.${config.mySystem.internalDomain}:/";
35 | where = "/mnt/nas";
36 | }
37 | ];
38 |
39 | systemd.automounts = lib.mkIf cfg.lazy [
40 | {
41 | wantedBy = [ "multi-user.target" ];
42 | automountConfig = {
43 | TimeoutIdleSec = "600";
44 | };
45 | where = "/mnt/nas";
46 | }
47 | ];
48 |
49 | fileSystems."${config.mySystem.nasFolder}" = lib.mkIf (!cfg.lazy) {
50 | device = "daedalus.${config.mySystem.internalDomain}:/";
51 | fsType = "nfs";
52 | };
53 |
54 | };
55 | }
56 |
--------------------------------------------------------------------------------
/nixos/modules/nixos/default.nix:
--------------------------------------------------------------------------------
1 | { lib, config, ... }:
2 | with lib;
3 | {
4 | imports = [
5 | ./system
6 | ./programs
7 | ./services/monitoring.nix
8 | ./services/promtail.nix
9 | ./services/reboot-required-check.nix
10 | ./editor
11 | ./lib.nix
12 | ./security
13 | ];
14 |
15 | options.mySystem.persistentFolder = mkOption {
16 | type = types.str;
17 | description = "persistent folder for nixos mutable files";
18 | default = "/persist";
19 | };
20 |
21 | options.mySystem.nasFolder = mkOption {
22 | type = types.str;
23 | description = "folder where nas mounts reside";
24 | default = "/mnt/nas";
25 | };
26 | options.mySystem.domain = mkOption {
27 | type = types.str;
28 | description = "domain for hosted services";
29 | default = "";
30 | };
31 | options.mySystem.internalDomain = mkOption {
32 | type = types.str;
33 | description = "domain for local devices";
34 | default = "";
35 | };
36 | options.mySystem.purpose = mkOption {
37 | type = types.str;
38 | description = "System purpose";
39 | default = "Production";
40 | };
41 |
42 | config = {
43 | systemd.tmpfiles.rules = [
44 | "d ${config.mySystem.persistentFolder} 777 - - -" # The - disables automatic cleanup, so the file wont be removed after a period
45 | ];
46 |
47 | };
48 | }
49 |
--------------------------------------------------------------------------------
/nixos/modules/nixos/system/security.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.security;
9 | in
10 | {
11 | options.mySystem.security = {
12 |
13 | wheelNeedsSudoPassword = lib.mkOption {
14 | type = lib.types.bool;
15 | description = "If wheel group users need password for sudo";
16 | default = true;
17 | };
18 | increaseWheelLoginLimits = lib.mkOption {
19 | type = lib.types.bool;
20 | description = "If wheel group users receive increased login limits";
21 | default = true;
22 | };
23 | };
24 |
25 | config = {
26 | security = {
27 | sudo.wheelNeedsPassword = cfg.wheelNeedsSudoPassword;
28 | # Don't bother with the lecture or the need to keep state about who's been lectured
29 | sudo.extraConfig = "Defaults lecture=\"never\"";
30 |
31 | pam.sshAgentAuth.enable = true;
32 |
33 | # Increase open file limit for sudoers
34 | pam.loginLimits = mkIf cfg.increaseWheelLoginLimits [
35 | {
36 | domain = "@wheel";
37 | item = "nofile";
38 | type = "soft";
39 | value = "524288";
40 | }
41 | {
42 | domain = "@wheel";
43 | item = "nofile";
44 | type = "hard";
45 | value = "1048576";
46 | }
47 | ];
48 | };
49 | };
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/nixos/pkgs/snapraid-btrfs.nix:
--------------------------------------------------------------------------------
1 | {
2 | symlinkJoin,
3 | writeScriptBin,
4 | makeWrapper,
5 | coreutils,
6 | gnugrep,
7 | gawk,
8 | gnused,
9 | snapraid,
10 | snapper,
11 | }:
12 | let
13 | name = "snapraid-btrfs";
14 | deps = [
15 | coreutils
16 | gnugrep
17 | gawk
18 | gnused
19 | snapraid
20 | snapper
21 | ];
22 | # snapper 11 has broken the btrfs script for now
23 | # patched at:
24 | # TODO https://github.com/automorphism88/snapraid-btrfs/issues/35
25 | # script =
26 | # (
27 | # writeScriptBin name
28 | # (builtins.readFile ((fetchFromGitHub {
29 | # owner = "automorphism88";
30 | # repo = "snapraid-btrfs";
31 | # rev = "6492a45ad55c389c0301075dcc8bc8784ef3e274";
32 | # sha256 = "IQgL55SMwViOnl3R8rQ9oGsanpFOy4esENKTwl8qsgo=";
33 | # })
34 | # + "/snapraid-btrfs"))
35 | # ).overrideAttrs (old: {
36 | # buildCommand = "${old.buildCommand}\n patchShebangs $out";
37 | # });
38 | script = (writeScriptBin name (builtins.readFile ./snapraid-btrfs.sh)).overrideAttrs (old: {
39 | buildCommand = "${old.buildCommand}\n patchShebangs $out";
40 | });
41 |
42 | in
43 | symlinkJoin {
44 | inherit name;
45 | paths = [ script ] ++ deps;
46 | buildInputs = [ makeWrapper ];
47 | postBuild = "wrapProgram $out/bin/${name} --set PATH $out/bin";
48 | }
49 |
--------------------------------------------------------------------------------
/docs/overview/structure.md:
--------------------------------------------------------------------------------
1 | # Repository Structure
2 |
3 | !!! note inline end
4 |
5 | Oh god writing this now is a horrid idea, I always refactor like 50 times...
6 |
7 | Here is a bit of a walkthrough of the repository structure so ~~you~~ I can have a vague idea on what is going on. Organizing a monorepo is hard at the best of times.
8 |
9 |
10 | ```
11 | ├── .github
12 | │ ├── renovate Renovate modules
13 | │ ├── workflows Github Action workflows (i.e. CI/Site building)
14 | │ └── renovate.json5 Renovate core settings
15 | ├── docs This mkdocs-material site
16 | │ nixos Nixos Modules
17 | │ └── home home-manager nix files
18 | │ ├── modules home-manager modules
19 | │ └── truxnell home-manager user
20 | │ ├── hosts hosts for nix - starting point of configs.
21 | │ ├── modules nix modules
22 | │ ├── overlays nixpkgs overlays
23 | │ ├── pkgs custom nix packages
24 | │ └── profiles host profiles
25 | ├── README.md Github Repo landing page
26 | ├── flake.nix Core flake
27 | ├── flake.lock Lockfile
28 | ├── LICENSE Project License
29 | └── mkdocs.yml mkdocs settings
30 | ```
31 |
32 | Whew that wasnt so hard right... right?
33 |
--------------------------------------------------------------------------------
/docs/overview/features.md:
--------------------------------------------------------------------------------
1 | # Features
2 |
3 | Some things I'm proud of. Or just happy they exist so I can forget about something until I need to worry.
4 |
5 |
6 | - :octicons-copy-16: [__Nightly Backups__](/maintenance/backups/) A ZFS snapshot is done at night, with restic then backing up to both locally and cloud. NixOS wrappers make restoring a single command line entry. ZFS snapshot before backup is important to ensure restic isnt backing up files that are in use, which would cause corruption.
7 | - :material-update: [__Software Updates__](/maintenance/software_updates/) Renovate Bot regulary runs on this Github repo, updating the flake lockfile, containers and other dependencies automatically. Automerge is enabled for updates I expect will be routine, but waits for manual PR approval for updates I suspect may require reading changelog for breaking changes
8 | - :ghost: __Impermance__: Inspried by the [Erase your Darlings](https://grahamc.com/blog/erase-your-darlings/) post, Servers run zfs and rollback to a blank snapshot at night. This ensures repeatable NixOS deployments and no cruft, and also hardens servers a little.
9 | - :material-alarm-light: __SystemD Notifications__: Systemd hook that adds a pushover notification to __any__ systemd unit failure for any unit NixOS is aware of. No worrying about forgetting to add a notification to every new service or worrying about missing one.
10 |
11 |
--------------------------------------------------------------------------------
/nixos/pkgs/cockpit-files.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | stdenv,
4 | fetchzip,
5 | gettext,
6 | }:
7 |
8 | stdenv.mkDerivation rec {
9 | pname = "cockpit-files";
10 | version = "12";
11 |
12 | src = fetchzip {
13 | sha256 = "0mzx468dx8ss408pf1c7f5171ki7snsa0dfs9m7rm6dchk59rm9a";
14 | url = "https://github.com/cockpit-project/cockpit-files/releases/download/${version}/cockpit-files-${version}.tar.xz";
15 | };
16 |
17 | nativeBuildInputs = [
18 | gettext
19 | ];
20 |
21 | makeFlags = [
22 | "DESTDIR=$(out)"
23 | "PREFIX="
24 | ];
25 |
26 | # postPatch = ''
27 | # substituteInPlace Makefile \
28 | # --replace /usr/share $out/share
29 | # touch pkg/lib/cockpit.js
30 | # touch pkg/lib/cockpit-po-plugin.js
31 | # touch dist/manifest.json
32 | # '';
33 |
34 | # postFixup = ''
35 | # gunzip $out/share/cockpit/machines/index.js.gz
36 | # sed -i "s#/usr/bin/python3#/usr/bin/env python3#ig" $out/share/cockpit/machines/index.js
37 | # sed -i "s#/usr/bin/pwscore#/usr/bin/env pwscore#ig" $out/share/cockpit/machines/index.js
38 | # gzip -9 $out/share/cockpit/machines/index.js
39 | # '';
40 |
41 | dontBuild = true;
42 |
43 | meta = {
44 | description = "Cockpit UI for local files";
45 | license = lib.licenses.lgpl21;
46 | homepage = "https://github.com/cockpit-project/cockpit-files";
47 | platforms = lib.platforms.linux;
48 | maintainers = with lib.maintainers; [ ];
49 | };
50 | }
51 |
--------------------------------------------------------------------------------
/nixos/pkgs/cockpit-sensors.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | stdenv,
4 | fetchzip,
5 | gettext,
6 | }:
7 |
8 | stdenv.mkDerivation rec {
9 | pname = "cockpit-sensors";
10 | version = "1.1";
11 |
12 | src = fetchzip {
13 | url = "https://github.com/ocristopfer/cockpit-sensors/releases/download/${version}/cockpit-sensors.tar.xz";
14 | sha256 = "sha256-HWcUUqXtxN00paaqRpcTwIlYy6D0Qz27C2PDekrGU+U=";
15 | };
16 |
17 | nativeBuildInputs = [
18 | gettext
19 | ];
20 |
21 | makeFlags = [
22 | "DESTDIR=$(out)"
23 | "PREFIX="
24 | ];
25 |
26 | # postPatch = ''
27 | # substituteInPlace Makefile \
28 | # --replace /usr/share $out/share
29 | # touch pkg/lib/cockpit.js
30 | # touch pkg/lib/cockpit-po-plugin.js
31 | # touch dist/manifest.json
32 | # '';
33 |
34 | # postFixup = ''
35 | # gunzip $out/share/cockpit/machines/index.js.gz
36 | # sed -i "s#/usr/bin/python3#/usr/bin/env python3#ig" $out/share/cockpit/machines/index.js
37 | # sed -i "s#/usr/bin/pwscore#/usr/bin/env pwscore#ig" $out/share/cockpit/machines/index.js
38 | # gzip -9 $out/share/cockpit/machines/index.js
39 | # '';
40 |
41 | dontBuild = true;
42 |
43 | meta = {
44 | description = "Cockpit UI for virtual machines";
45 | license = lib.licenses.lgpl21;
46 | homepage = "https://github.com/cockpit-project/cockpit-sensors";
47 | platforms = lib.platforms.linux;
48 | maintainers = with lib.maintainers; [ ];
49 | };
50 | }
51 |
--------------------------------------------------------------------------------
/.github/workflows/update-flake.yaml:
--------------------------------------------------------------------------------
1 | name: update-flake-lock
2 | on:
3 | workflow_dispatch: # manual trigger
4 | schedule:
5 | - cron: '13 4 * * 1' # 04:13 UTC every Monday (≈ 14:13 AEST)
6 |
7 | jobs:
8 | update:
9 | runs-on: ubuntu-latest
10 | permissions:
11 | contents: write # push the new lock file
12 | pull-requests: write # open / merge the PR
13 | steps:
14 | - name: Generate Github App Token
15 | uses: navikt/github-app-token-generator@v1
16 | id: github-app-token
17 | with:
18 | app-id: ${{ secrets.TRUXNELL_APP_ID }}
19 | private-key: ${{ secrets.TRUXNELL_APP_PRIVATE_KEY }}
20 | - uses: actions/checkout@v6
21 | - name: Install Nix
22 | uses: DeterminateSystems/nix-installer-action@main
23 | - name: Update flake.lock
24 | uses: DeterminateSystems/update-flake-lock@main
25 | id: update-flake-lock
26 | with:
27 | commit-msg: "chore: Update flake.lock"
28 | pr-title: "Update flake.lock"
29 | pr-body: |
30 | Automated changes by the [update-flake-lock](https://github.com/DeterminateSystems/update-flake-lock) GitHub Action.
31 |
32 | ```
33 | {{ env.GIT_COMMIT_MESSAGE }}
34 | ```
35 | pr-labels: |
36 | automated
37 | - run: "gh pr merge --auto --rebase --delete-branch ${{ steps.update-flake-lock.outputs.pull-request-number }}"
38 | env:
39 | GH_TOKEN: ${{ github.token }}
--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------
1 | # Claude Configuration for NixOS Homelab Repository
2 |
3 | This file provides essential context for AI assistants working with this NixOS homelab configuration repository.
4 |
5 | ## Critical Methodology & Goals
6 | - **Primary Goal**: Simple, reliable, and robust homelab infrastructure
7 | - **Dependency Philosophy**: Avoid dependencies unless benefits outweigh maintenance burden
8 | - **Technology Preference**: Plain Nix/NixOS modules > OCI containers > complex tooling
9 | - **Stability**: Use stable channel for production hosts, unstable only when features justify it
10 |
11 | ## Quick Reference
12 | - **Application Development**: See [docs/APPLICATION_PATTERNS.md](docs/APPLICATION_PATTERNS.md)
13 | - **Repository Structure**: See [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md)
14 | - **Development Workflow**: See [docs/DEVELOPMENT.md](docs/DEVELOPMENT.md)
15 |
16 | ## Key Files to Understand
17 | - `flake.nix`: Input management, system definitions
18 | - `nixos/global.nix`: Baseline configuration for all hosts
19 | - `nixos/lib/default.nix`: Custom helper functions
20 | - `Justfile`: Operational task definitions
21 |
22 | ## Environment Context
23 | - **Development**: Nix dev-container environment (no direct host access)
24 | - **Deployment**: Remote hosts via SSH (daedalus, shodan)
25 | - **Storage**: ZFS for important data, impermanence for system state
26 | - **Networking**: Internal domain `.l.voltaicforge.com` for services
27 |
28 | When making changes, always consider the impact on system reliability and maintainability.
29 |
--------------------------------------------------------------------------------
/nixos/modules/nixos/system/openssh.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.services.openssh;
9 | in
10 | {
11 | options.mySystem.services.openssh = {
12 | enable = mkEnableOption "openssh" // {
13 | default = true;
14 | };
15 | passwordAuthentication = mkOption {
16 | type = lib.types.bool;
17 | description = "If password can be accepted for ssh (commonly disable for security hardening)";
18 | default = false;
19 |
20 | };
21 | permitRootLogin = mkOption {
22 | type = types.enum [
23 | "yes"
24 | "without-password"
25 | "prohibit-password"
26 | "forced-commands-only"
27 | "no"
28 | ];
29 | description = "If root can login via ssh (commonly disable for security hardening)";
30 | default = "no";
31 |
32 | };
33 | };
34 |
35 | config = mkIf cfg.enable {
36 | services.openssh = {
37 | enable = true;
38 | openFirewall = true;
39 | # TODO: Enable this when option becomes available
40 | # Don't allow home-directory authorized_keys
41 | # authorizedKeysFiles = mkForce [ "/etc/ssh/authorized_keys.d/%u" ];
42 | settings = {
43 | # Harden
44 | PasswordAuthentication = cfg.passwordAuthentication;
45 | PermitRootLogin = cfg.permitRootLogin;
46 | # Automatically remove stale sockets
47 | StreamLocalBindUnlink = "yes";
48 | # Allow forwarding ports to everywhere
49 | GatewayPorts = "clientspecified";
50 | };
51 |
52 | };
53 |
54 | };
55 | }
56 |
--------------------------------------------------------------------------------
/nixos/modules/nixos/system/zfs.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | let
8 | cfg = config.mySystem.system.zfs;
9 | in
10 | with lib;
11 | {
12 | options.mySystem.system.zfs = {
13 | enable = lib.mkEnableOption "zfs";
14 | mountPoolsAtBoot = lib.mkOption {
15 | type = lib.types.listOf lib.types.str;
16 | default = [ ];
17 | };
18 |
19 | };
20 |
21 | config = lib.mkIf cfg.enable {
22 |
23 | # setup boot
24 | boot = {
25 | supportedFilesystems = [
26 | "zfs"
27 | ];
28 | zfs = {
29 | forceImportRoot = false; # if stuck on boot, modify grub options , force importing isnt secure
30 | extraPools = cfg.mountPoolsAtBoot;
31 | };
32 |
33 | };
34 |
35 | services.zfs = {
36 | autoScrub.enable = true;
37 | # Defaults to weekly and is a bit too regular for my NAS
38 | autoScrub.interval = "monthly";
39 | trim.enable = true;
40 | };
41 |
42 | services.prometheus.exporters.zfs.enable = true;
43 |
44 | services.vmagent = {
45 | prometheusConfig = {
46 | scrape_configs = [
47 | {
48 | job_name = "zfs";
49 | scrape_interval = "10s";
50 | static_configs = [
51 | { targets = [ "127.0.0.1:${builtins.toString config.services.prometheus.exporters.zfs.port}" ]; }
52 | ];
53 | }
54 | ];
55 | };
56 | };
57 |
58 | services.zfs.zed.settings = {
59 | ZED_NTFY_TOPIC = "homelab";
60 | ZED_NTFY_URL = "https://ntfy.trux.dev";
61 | };
62 | };
63 | }
64 |
--------------------------------------------------------------------------------
/nixos/modules/nixos/system/deploy-user.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | cfg = config.mySystem.deploy;
10 | in
11 | {
12 | options.mySystem.deploy = {
13 | enable = lib.mkOption {
14 | type = lib.types.bool;
15 | description = "Enable deploy user for deploy-rs";
16 | default = false;
17 | };
18 | };
19 |
20 | config = mkIf cfg.enable {
21 | users.users.deploy = {
22 | isNormalUser = true;
23 | shell = pkgs.fish;
24 | extraGroups = [ "wheel" ];
25 | openssh.authorizedKeys.keys = [
26 | "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMZS9J1ydflZ4iJdJgO8+vnN8nNSlEwyn9tbWU9OcysW truxnell@home"
27 | ];
28 | };
29 |
30 | security.sudo = {
31 | extraRules = [
32 | {
33 | users = [ "deploy" ];
34 | commands = [
35 | {
36 | command = "/run/current-system/bin/switch-to-configuration";
37 | options = [ "NOPASSWD" ];
38 | }
39 | ];
40 | }
41 | ];
42 | extraConfig = ''
43 | # Deploy user passwordless sudo for deploy-rs commands
44 | deploy ALL=(ALL) NOPASSWD: /nix/var/nix/profiles/system/bin/activate-rs activate *
45 | deploy ALL=(ALL) NOPASSWD: /nix/var/nix/profiles/system/bin/activate-rs wait *
46 | deploy ALL=(ALL) NOPASSWD: /nix/store/*/bin/activate-rs activate *
47 | deploy ALL=(ALL) NOPASSWD: /nix/store/*/bin/activate-rs wait *
48 | deploy ALL=(ALL) NOPASSWD: /run/current-system/sw/bin/rm /tmp/deploy-rs*
49 | '';
50 | };
51 | };
52 | }
53 |
--------------------------------------------------------------------------------
/.sops.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # config files for sops & used for encrypting keys that sops-nix decrypts.
3 | # each machine key is derieved from its generated `ssh_hosts_ed` file
4 | # via ssh-to-age
5 | # sops encrypts the secrets ready to decrypt with the private key of any of the below machines
6 | # OR my 'main' key thats kept outside this repo securely.
7 |
8 | # key-per-machine is a little more secure and a little more work than
9 | # copying one key to each machine
10 |
11 | keys:
12 | - &dns01 age1lj5vmr02qkudvv2xedfj5tq8x93gllgpr6tzylwdlt7lud4tfv5qfqsd5u
13 | - &dns02 age17edew3aahg3t5nte5g0a505sn96vnj8g8gqse8q06ccrrn2n3uysyshu2c
14 | - &citadel age1rpkr0le4ff550wgyazssfe8r335gjwpyflqezz7trtrhw6ygge3qgydv3y
15 | - &rickenbacker age1cp6vegrmqfkuj8nmt2u3z0sur7n0f7e9x9zmdv4zygp8j2pnucpsdkgagc
16 | - &shodan age1ekt5xz7u2xgdzgsrffhd9x22n80cn4thxd8zxjy2ey5vq3ca7gnqz25g5r
17 | - &daedalus_old age1mmdjc4qk20accal4p4yhm5d2nsnlfnmmn484pg4mqtsu70fqvy4s6namra
18 | - &durandal age1j2r8mypw44uvqhfs53424h6fu2rkr5m7asl7rl3zn3xzva9m3dcqpa97gw
19 | - &daedalus age1ezmtw7qaw93yggtcncqpyej9qjlref8au5uceqat49htmlfgps2swsttyr
20 | - &playsatan age1m24gvg0wq5ps872ezcjwdx7e9rrs65rq2vt05qwt9purptxyyessasckk9
21 | - &xerxes age13dmfusa7kvgmc9xqzu55ws07y6etzcxa5cx58vkthdaeyuv32ecq3c624g
22 |
23 | creation_rules:
24 | - path_regex: .*\.sops\.yaml$
25 | key_groups:
26 | - age:
27 | - *dns01
28 | - *dns02
29 | - *citadel
30 | - *rickenbacker
31 | - *shodan
32 | - *daedalus_old
33 | - *durandal
34 | - *daedalus
35 | - *playsatan
36 | - *xerxes
37 |
--------------------------------------------------------------------------------
/docs/monitoring/warnings.md:
--------------------------------------------------------------------------------
1 | I've added warnings and assertations to code using nix to help me avoid misconfigurations. For example, if a module needs a database enabled, it can abort a deployment if it is not enabled. Similary, I have added warnings if I have disabled backups for production machines.
2 |
3 | !!! question "But why, when its not being shared with others?"
4 |
5 | Because I guarentee ill somehow stuff it up down the track and accidently disable things I didnt mean to. Roll your eyes, Ill thank myself later.
6 |
7 | > Learnt from: [Nix Manual](https://nlewo.github.io/nixos-manual-sphinx/development/assertions.xml.html)
8 |
9 | ## Warnings
10 |
11 | Warnings will print a warning message duyring a nix build or deployment, but **NOT** stop the action. Great for things like reminders on disabled features
12 |
13 | To add a warning inside a module:
14 |
15 | ```nix
16 | # Warn if backups are disable and machine isn't a dev box
17 | config.warnings = [
18 | (mkIf (!cfg.local.enable && config.mySystem.purpose != "Development")
19 | "WARNING: Local backups are disabled!")
20 | (mkIf (!cfg.remote.enable && config.mySystem.purpose != "Development")
21 | "WARNING: Remote backups are disabled!")
22 | ];
23 |
24 | ```
25 |
26 |
27 | 
28 | Oh THATS what I forgot to re-enable...
29 |
30 |
31 | ## Abort/assert
32 |
33 | Warnings bigger and meaner brother. Stops a nix build/deploy dead in its tracks. Only useful for when deployment is incompatiable with running - i.e. a dependency not met in options.
34 |
--------------------------------------------------------------------------------
/docs/vm/installing-zfs-impermance.md:
--------------------------------------------------------------------------------
1 | > https://grahamc.com/blog/erase-your-darlings/
2 |
3 | # Get hostid
4 |
5 | run `head -c 8 /etc/machine-id`
6 | and copy into networking.hostId to ensure ZFS doesnt get borked on reboot
7 |
8 | # Partitioning
9 |
10 | parted /dev/sda -- mklabel gpt
11 | parted /dev/sda -- mkpart root ext4 512MB -8GB
12 | parted /dev/sda -- mkpart ESP fat32 1MB 512MB
13 | parted /dev/sda -- set 2 esp on
14 |
15 | # Formatting
16 |
17 | mkswap -L swap /dev/sdap2
18 | swapon /dev/sdap2
19 | mkfs.fat -F 32 -n boot /dev/sdap3
20 |
21 | # ZFS on root partition
22 |
23 | zpool create -O mountpoint=none rpool /dev/sdap1
24 |
25 | zfs create -p -o mountpoint=legacy rpool/local/root
26 |
27 | ## immediate blank snapshot
28 |
29 | zfs snapshot rpool/local/root@blank
30 | mount -t zfs rpool/local/root /mnt
31 |
32 | # Boot partition
33 |
34 | mkdir /mnt/boot
35 | mount /dev/sdap3 /mnt/boot
36 |
37 | #mk nix
38 | zfs create -p -o mountpoint=legacy rpool/local/nix
39 | mkdir /mnt/nix
40 | mount -t zfs rpool/local/nix /mnt/nix
41 |
42 | # And a dataset for /home: if needed
43 |
44 | zfs create -p -o mountpoint=legacy rpool/safe/home
45 | mkdir /mnt/home
46 | mount -t zfs rpool/safe/home /mnt/home
47 |
48 | zfs create -p -o mountpoint=legacy rpool/safe/persist
49 | mkdir /mnt/persist
50 | mount -t zfs rpool/safe/persist /mnt/persist
51 |
52 | Set ` networking.hostId`` in the nixos config to `head -c 8 /etc/machine-id`
53 |
54 | nixos-install --impure --flake github:truxnell/nix-config#
55 |
56 | consider a nixos-enter to import a zpool if required (for NAS) instead of rebooting post-install
57 |
58 | NOTE: do secrets for sops and shit!!
59 |
--------------------------------------------------------------------------------
/.github/workflows/docs-release.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | name: "Docs: Release to GitHub pages"
3 |
4 | on:
5 | workflow_dispatch:
6 | push:
7 | branches:
8 | - main
9 | paths:
10 | - ".github/workflows/docs-release.yaml"
11 | - ".mkdocs.yml"
12 | - "docs/**"
13 |
14 | permissions:
15 | contents: write
16 |
17 | jobs:
18 | release-docs:
19 | name: Release documentation
20 | runs-on: ubuntu-24.04
21 | concurrency:
22 | group: ${{ github.workflow }}-${{ github.ref }}
23 | steps:
24 | - name: "Generate Short Lived OAuth App Token (ghs_*)"
25 | uses: actions/create-github-app-token@v2.2.1
26 | id: app-token
27 | with:
28 | app-id: "${{ secrets.TRUXNELL_APP_ID }}"
29 | private-key: "${{ secrets.TRUXNELL_APP_PRIVATE_KEY }}"
30 |
31 | - name: Checkout main branch
32 | uses: actions/checkout@v6
33 | with:
34 | token: ${{ steps.app-token.outputs.token }}
35 | fetch-depth: 0
36 |
37 | - uses: actions/setup-python@v6
38 | with:
39 | python-version: 3.x
40 |
41 | - name: Install requirements
42 | run: pip install -r docs/requirements.txt
43 |
44 | - name: Build and publish docs
45 | run: mkdocs build -f mkdocs.yml
46 |
47 | - name: Deploy
48 | uses: peaceiris/actions-gh-pages@v4.0.0
49 | if: ${{ github.ref == 'refs/heads/main' }}
50 | with:
51 | github_token: ${{ steps.app-token.outputs.token }}
52 | publish_dir: ./site
53 | destination_dir: docs
54 | user_name: "Trux-Bot[bot]"
55 | user_email: "Trux-Bot[bot] <19149206+trux-bot[bot]@users.noreply.github.com>"
56 |
--------------------------------------------------------------------------------
/nixos/pkgs/grafana-dashboards/default.nix:
--------------------------------------------------------------------------------
1 | { pkgs }:
2 |
3 | with pkgs;
4 |
5 | let
6 | inherit (pkgs) stdenv fetchurl;
7 | in
8 |
9 | lib.makeScope pkgs.newScope (
10 | _self:
11 | let
12 | buildGrafanaDashboard =
13 | args:
14 | stdenv.mkDerivation (
15 | args
16 | // {
17 | pname = "grafana-dashboard-${args.pname}-${toString args.id}";
18 | inherit (args) version;
19 | src = fetchurl {
20 | url = "https://grafana.com/api/dashboards/${toString args.id}/revisions/${args.version}/download";
21 | inherit (args) hash;
22 | };
23 | dontUnpack = true;
24 | installPhase = ''
25 | runHook preInstall
26 | mkdir -p $out
27 | cp $src $out/${args.pname}-${toString args.id}.json
28 | runHook postInstall
29 | '';
30 | }
31 | );
32 | in
33 | {
34 | inherit buildGrafanaDashboard;
35 |
36 | node-exporter = buildGrafanaDashboard {
37 | id = 1860;
38 | pname = "node-exporter-full";
39 | version = "31";
40 | hash = "sha256-QsRHsnayYRRGc+2MfhaKGYpNdH02PesnR5b50MDzHIg=";
41 | };
42 | node-systemd =
43 | (buildGrafanaDashboard {
44 | id = 1617;
45 | pname = "node-systemd";
46 | version = "1";
47 | hash = "sha256-MEWU5rIqlbaGu3elqdSoMZfbk67WDnH0VWuC8FqZ8v8=";
48 | }).overrideAttrs
49 | (_: {
50 | src = ./node-systemd.json; # sadly only imported dashboards work
51 | });
52 |
53 | nginx = buildGrafanaDashboard {
54 | id = 12708;
55 | pname = "nginx";
56 | version = "1";
57 | hash = "sha256-T1HqWbwt+i/We+Y2B7hcl3CijGxZF5QI38aPcXjk9y0=";
58 | };
59 |
60 | }
61 | )
62 |
--------------------------------------------------------------------------------
/docs/motd.md:
--------------------------------------------------------------------------------
1 | # Message of the day
2 |
3 | Why not include a nice message of the day for each server I log into?
4 |
5 | The below gives some insight into what the servers running, status of zpools, usage, etc.
6 | While not show below - thankfully - If a zpool error is found the status gives a full `zpool status -x` debrief which is particulary eye-catching upon login.
7 |
8 | I've also squeezed in a 'reboot required' flag for when the server had detected its running kernel/init/systemd is a different version to what it booted with - useful to know when long running servers require a reboot to pick up new kernel/etc versions.
9 |
10 |
11 | 
12 | Message of the day
13 |
14 |
15 | Code TLDR
16 |
17 | :simple-github:[/nixos/modules/nixos/system/motd](https://github.com/truxnell/nix-config/blob/462144babe7e7b2a49a985afe87c4b2f1fa8c3f9/nixos/modules/nixos/system/motd/default.nix])
18 |
19 | Write a shell script using nix with a bash motd of your choosing.
20 |
21 | ```nix
22 | let
23 | motd = pkgs.writeShellScriptBin "motd"
24 | ''
25 | #! /usr/bin/env bash
26 | source /etc/os-release
27 | service_status=$(systemctl list-units | grep podman-)
28 |
29 | <- SNIP ->
30 | printf "$BOLDService status$ENDCOLOR\n"
31 | '';
32 | in
33 | ```
34 |
35 | This gets us a shells script we can then directly call into systemPackages - and after that its just a short hop to make this part of the shell init.
36 |
37 | !!! note
38 |
39 | Replace with your preferred shell!
40 |
41 | ```nix
42 | environment.systemPackages = [
43 | motd
44 | ];
45 | programs.fish.interactiveShellInit = ''
46 | motd
47 | '';
48 | ```
49 |
--------------------------------------------------------------------------------
/nixos/modules/nixos/services/reboot-required-check.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.services.rebootRequiredCheck;
9 | in
10 | {
11 | options.mySystem.services.rebootRequiredCheck.enable = mkEnableOption "Reboot required check";
12 |
13 | config = mkIf cfg.enable {
14 |
15 | # Enable timer
16 | systemd.timers."reboot-required-check" = {
17 | wantedBy = [ "timers.target" ];
18 | timerConfig = {
19 | # start at boot
20 | OnBootSec = "0m";
21 | # check every hour
22 | OnUnitActiveSec = "1h";
23 | Unit = "reboot-required-check.service";
24 | };
25 | };
26 |
27 | # Below script will check if initrd, kernel, kernel-modules that were booted match the current system
28 | # i.e. if a nixos-rebuild switch has upgraded anything
29 | systemd.services."reboot-required-check" = {
30 | script = ''
31 | #!/usr/bin/env bash
32 |
33 | # compare current system with booted sysetm to determine if a reboot is required
34 | if [[ "$(readlink /run/booted-system/{initrd,kernel,kernel-modules})" == "$(readlink /run/current-system/{initrd,kernel,kernel-modules})" ]]; then
35 | # check if the '/var/run/reboot-required' file exists and if it does, remove it
36 | if [[ -f /var/run/reboot-required ]]; then
37 | rm /var/run/reboot-required || { echo "Failed to remove /var/run/reboot-required"; exit 1; }
38 | fi
39 | else
40 | echo "reboot required"
41 | touch /var/run/reboot-required || { echo "Failed to create /var/run/reboot-required"; exit 1; }
42 | fi
43 | '';
44 | serviceConfig = {
45 | Type = "oneshot";
46 | User = "root";
47 | };
48 | };
49 |
50 | };
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/nixos/modules/applications/search/searxng/update-hosts.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import re
3 | from collections import defaultdict
4 |
5 | def parse_domains(url):
6 | response = requests.get(url)
7 | content = response.content.decode('utf-8')
8 | domains = defaultdict(list)
9 | for line in content.splitlines():
10 | if 'site=' not in line:
11 | continue
12 | site_name = re.search(r'site=([^,]*)', line).group(1)
13 | if 'boost' in line:
14 | domains['higher'].append(site_name)
15 | elif 'downrank' in line:
16 | domains['lower'].append(site_name)
17 | else:
18 | domains['remove'].append(site_name)
19 | for key in domains:
20 | domains[key] = list(set(domains[key]))
21 | return dict(domains)
22 |
23 | urls = [
24 | 'https://raw.githubusercontent.com/kynoptic/wikipedia-reliable-sources/refs/heads/main/wikipedia-reliable-sources.goggle',
25 | 'https://raw.githubusercontent.com/gayolGate/gayolGate/8f26b202202e76896bce59d865c5e7d4c35d5855/goggle.txt'
26 |
27 | ]
28 |
29 | all_domains = defaultdict(list)
30 | for key in ['neutral', 'lower', 'higher', 'remove']:
31 | with open(f'{key}', 'r') as file:
32 | all_domains[key] = [line.strip() for line in file.readlines()]
33 |
34 |
35 | for url in urls:
36 | domains = parse_domains(url)
37 | for key in domains:
38 | all_domains[key].extend(domains[key])
39 |
40 |
41 | all_domains['remove'] = [domain for domain in all_domains['remove'] if domain not in all_domains['higher'] and domain not in all_domains['lower'] and domain not in all_domains['neutral']]
42 |
43 | keys = ['higher', 'lower', 'remove']
44 | for key in keys:
45 | with open(f'{key}_domains.txt', 'w') as file:
46 | for domain in all_domains[key]:
47 | file.write(f"{domain}\n")
48 |
--------------------------------------------------------------------------------
/nixos/modules/applications/networking/mosquitto/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.services.mosquitto;
9 | # persistentFolder = "${config.mySystem.persistentFolder}/nixos/services/mosquitto/";
10 | app = "mosquitto";
11 | user = app;
12 | group = app;
13 | appFolder = config.services.mosquitto.dataDir;
14 | in
15 | {
16 | options.mySystem.services.mosquitto.enable = mkEnableOption "mosquitto MQTT";
17 |
18 | config = mkIf cfg.enable {
19 |
20 | sops.secrets."services/mosquitto/mq/hashedPassword" = {
21 | sopsFile = ./secrets.sops.yaml;
22 | owner = app;
23 | group = app;
24 | restartUnits = [ "${app}.service" ];
25 | };
26 |
27 | services.mosquitto = {
28 | enable = true;
29 | # persistance for convienience on restarts
30 | # but not backed up, there is no data
31 | # that requires keeping in MQTT
32 | settings = {
33 | persistence_location = appFolder;
34 | max_keepalive = 300;
35 | };
36 |
37 | listeners = [
38 | {
39 | users.mq = {
40 | acl = [
41 | "readwrite #"
42 | ];
43 | hashedPasswordFile = config.sops.secrets."services/mosquitto/mq/hashedPassword".path;
44 | };
45 | }
46 | ];
47 | };
48 |
49 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
50 | lib.mkIf config.mySystem.system.impermanence.enable
51 | {
52 | directories = [
53 | {
54 | directory = appFolder;
55 | inherit user;
56 | inherit group;
57 | mode = "750";
58 | }
59 | ];
60 | };
61 |
62 | users.users.truxnell.extraGroups = [ "mosquitto" ];
63 | networking.firewall.allowedTCPPorts = [ 1883 ];
64 |
65 | };
66 | }
67 |
--------------------------------------------------------------------------------
/nixos/modules/nixos/system/impermanence.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | let
7 | cfg = config.mySystem.system.impermanence;
8 | in
9 | with lib;
10 | {
11 | options.mySystem.system.impermanence = {
12 | enable = mkEnableOption "system impermanence";
13 | rootBlankSnapshotName = lib.mkOption {
14 | type = lib.types.str;
15 | default = "blank";
16 | };
17 | rootPoolName = lib.mkOption {
18 | type = lib.types.str;
19 | default = "rpool/local/root";
20 | };
21 | persistPath = lib.mkOption {
22 | type = lib.types.str;
23 | default = "/persist";
24 | };
25 |
26 | };
27 |
28 | config = lib.mkIf cfg.enable {
29 | # move ssh keys
30 |
31 | # bind a initrd command to rollback to blank root after boot
32 | boot.initrd.postDeviceCommands = lib.mkAfter ''
33 | zfs rollback -r ${cfg.rootPoolName}@${cfg.rootBlankSnapshotName}
34 | '';
35 |
36 | systemd.tmpfiles.rules = mkIf config.services.openssh.enable [
37 | # "d /etc/ 0755 root root -" #The - disables automatic cleanup, so the file wont be removed after a period
38 | # "d /etc/ssh/ 0755 root root -" #The - disables automatic cleanup, so the file wont be removed after a period
39 | ];
40 |
41 | environment.persistence."${cfg.persistPath}" = {
42 | hideMounts = true;
43 | directories = [
44 | "/var/log" # persist logs between reboots for debugging
45 | "/var/lib/containers" # cache files (restic, nginx, contaienrs)
46 | "/var/lib/nixos" # nixos state
47 |
48 | ];
49 | files = [
50 | "/etc/machine-id"
51 | # "/etc/adjtime" # hardware clock adjustment
52 | # ssh keys
53 | "/etc/ssh/ssh_host_ed25519_key"
54 | "/etc/ssh/ssh_host_ed25519_key.pub"
55 | "/etc/ssh/ssh_host_rsa_key"
56 | "/etc/ssh/ssh_host_rsa_key.pub"
57 | ];
58 | };
59 |
60 | };
61 | }
62 |
--------------------------------------------------------------------------------
/.github/renovate/autoMerge.json5:
--------------------------------------------------------------------------------
1 | {
2 |
3 | "packageRules": [
4 | {
5 | // automerge minor, patch, digest
6 | "matchDatasources": ['docker', 'github-tags'],
7 | "automerge": "true",
8 | "automergeType": "branch",
9 | "schedule": [ "before 11am on Sunday" ],
10 | "matchUpdateTypes": [ 'minor', 'patch', 'digest'],
11 | "matchPackageNames": [
12 | 'ghcr.io/twin/gatus',
13 | 'vaultwarden/server',
14 | 'sissbruecker/linkding',
15 | 'ghcr.io/autobrr/autobrr',
16 | 'gotenberg/gotenberg',
17 | 'ghcr.io/recyclarr/recyclarr',
18 | 'ghcr.io/home-operations/sabnzbd',
19 | 'ghcr.io/home-operations/qbittorrent',
20 | 'docker.io/filebrowser/filebrowser',
21 | 'ghcr.io/jorenn92/Maintainerr',
22 | 'github.com/zricethezav/gitleaks',
23 | 'github.com/actions/create-github-app-token',
24 | 'ghcr.io/autobrr/qui',
25 | 'docker.io/sissbruecker/linkding',
26 | 'ghcr.io/home-operations/home-assistant'
27 |
28 | ],
29 |
30 | },
31 | // automerge patch and digest
32 | {
33 | "matchDatasources": ['docker'],
34 | "automerge": "true",
35 | "automergeType": "branch",
36 | "schedule": [ "before 11am on Sunday" ],
37 | "matchUpdateTypes": [ 'patch', 'digest'],
38 | "matchPackageNames": [
39 | "ghcr.io/gethomepage/homepage",
40 | "garethgeorge/backrest",
41 | "ghcr.io/buroa/qbtools",
42 | "ghcr.io/dgtlmoon/changedetection.io",
43 | "ghcr.io/amruthpillai/reactive-resume",
44 | "ghcr.io/foxxmd/multi-scrobbler",
45 | "ghcr.io/raylas/nextdns-exporter",
46 |
47 | ]
48 |
49 | },
50 | {
51 | // automerge all digests
52 | "matchDatasources": ['docker'],
53 | "automerge": "true",
54 | "automergeType": "branch",
55 | "matchUpdateTypes": [ 'digest'],
56 | },
57 |
58 | ],
59 | }
60 |
--------------------------------------------------------------------------------
/docs/vm/secrets.md:
--------------------------------------------------------------------------------
1 | # Secrets
2 |
3 | Secrets is always a challenge for systems that work like Infrastructure-as-Code (IAC). I have taken the approach of using [sops-nix](https://github.com/Mic92/sops-nix) as I am familiar with the [sops](https://github.com/getsops/sops) project, like it and like the [age](https://github.com/FiloSottile/age) key system.
4 |
5 | How im using sops-nix is:
6 |
7 | * Take a encrypted file that is in a folder/repo
8 | * Upon `nixos-rebuild` commands decrypt the file with the hosts ssh key
9 | * Place the unencrypted file in `/run/secrets/` folder with specific user/group/permissions
10 | * Services can then reference this files in a number of ways to ingest the secret.
11 |
12 | # Setup
13 |
14 | There are setup instructions in for a initial setup of sops-nix in the repository. At a core, you will want to
15 | * Get sops-nix into your flake (Docs at: https://github.com/Mic92/sops-nix)
16 | * Create the `.sops.yaml` file in the root of the git repo (Docs at: https://github.com/Mic92/sops-nix)
17 | * Populate keys from hosts (preferably by `nix-shell -p ssh-to-age --run 'cat /etc/ssh/ssh_host_ed25519_key.pub | ssh-to-age'` on each host)
18 | * Encrypt each secret in a `secrets.sops.yaml` file (`sops -e -i path/to/file/filename.sops.yaml)
19 | * Populate secrets in your nix
20 |
21 | !!! Info
22 |
23 | I have chosen to let each host have a unique age key, generated by its ssh-key, which is generated unique by nix at install. This means I have a key per host in my `.sops.yaml` file, and each machine can decrypt the secret with its own key.
24 | Another approach is to generate one master key, which Is then pushed to each machine. I chose not to do this as there is some small security benefit of having a unique key per host.
25 |
26 | ## Adding new hosts
27 |
28 | On new machine, run below to transfer its shiny new ed25519 to age
29 |
30 | ```sh
31 | nix-shell -p ssh-to-age --run 'cat /etc/ssh/ssh_host_ed25519_key.pub | ssh-to-age'
32 | ```
33 |
34 | Copy this into `./.sops.yaml` in base repo, then re-run taskfile `task sops:re-encrypt` to loop through all sops keys, decrypt then re-encrypt
35 |
--------------------------------------------------------------------------------
/nixos/modules/applications/_archive/glances/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | pkgs,
3 | config,
4 | lib,
5 | ...
6 | }:
7 | let
8 | cfg = config.mySystem.services.glances;
9 | app = "Glances";
10 | in
11 | with lib;
12 | {
13 | options.mySystem.services.glances = {
14 | enable = mkEnableOption "Glances system monitor";
15 | monitor = mkOption {
16 | type = lib.types.bool;
17 | description = "Enable gatus monitoring";
18 | default = true;
19 |
20 | };
21 | addToHomepage = mkOption {
22 | type = lib.types.bool;
23 | description = "Add to homepage";
24 | default = true;
25 |
26 | };
27 |
28 | };
29 | config = mkIf cfg.enable {
30 |
31 | environment.systemPackages = with pkgs; [
32 | glances
33 | python310Packages.psutil
34 | hddtemp
35 | ];
36 |
37 | # port 61208
38 | systemd.services.glances = {
39 | script = ''
40 | ${pkgs.glances}/bin/glances --enable-plugin smart --webserver --bind 0.0.0.0
41 | '';
42 | after = [ "network.target" ];
43 | wantedBy = [ "multi-user.target" ];
44 | };
45 |
46 | networking = {
47 | firewall.allowedTCPPorts = [ 61208 ];
48 | };
49 |
50 | environment.etc."glances/glances.conf" = {
51 | text = ''
52 | [global]
53 | check_update=False
54 |
55 | [network]
56 | hide=lo,docker.*
57 |
58 | [diskio]
59 | hide=loop.*
60 |
61 | [containers]
62 | disable=False
63 | podman_sock=unix:///var/run/podman/podman.sock
64 |
65 | [connections]
66 | disable=True
67 |
68 | [irq]
69 | disable=True
70 | '';
71 | };
72 |
73 | mySystem.services.gatus.monitors = mkIf cfg.monitor [
74 | {
75 |
76 | name = "${app} ${config.networking.hostName}";
77 | group = "${app}";
78 | url = "http://${config.networking.hostName}.${config.mySystem.internalDomain}:61208:/api/3/status";
79 |
80 | interval = "1m";
81 | conditions = [
82 | "[CONNECTED] == true"
83 | "[STATUS] == 200"
84 | "[RESPONSE_TIME] < 1500"
85 | ];
86 | }
87 | ];
88 |
89 | };
90 | }
91 |
--------------------------------------------------------------------------------
/nixos/modules/applications/networking/node-red/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.services.node-red;
9 | app = "node-red";
10 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
11 | appFolder = config.services.node-red.userDir;
12 | inherit (config.services.node-red) user;
13 | inherit (config.services.node-red) group;
14 | url = "${app}.${config.networking.domain}";
15 |
16 | in
17 | {
18 | options.mySystem.services.node-red = {
19 | enable = mkEnableOption "node-red";
20 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
21 | default = true;
22 | };
23 | };
24 |
25 | config = mkIf cfg.enable {
26 |
27 | services.node-red = {
28 | enable = true;
29 | };
30 |
31 | services.nginx.virtualHosts."${app}.${config.networking.domain}" = {
32 | useACMEHost = config.networking.domain;
33 | forceSSL = true;
34 | locations."^~ /" = {
35 | proxyPass = "http://127.0.0.1:${builtins.toString config.services.node-red.port}";
36 | proxyWebsockets = true;
37 | };
38 | };
39 |
40 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
41 | lib.mkIf config.mySystem.system.impermanence.enable
42 | {
43 | directories = [
44 | {
45 | directory = appFolder;
46 | inherit user;
47 | inherit group;
48 | mode = "750";
49 | }
50 | ];
51 | };
52 |
53 | mySystem.services.gatus.monitors = [
54 | {
55 |
56 | name = app;
57 | group = "media";
58 | url = "https://${url}";
59 | interval = "1m";
60 | conditions = [
61 | "[CONNECTED] == true"
62 | "[STATUS] == 200"
63 | "[RESPONSE_TIME] < 1500"
64 | ];
65 | }
66 | ];
67 |
68 | services.restic.backups = config.lib.mySystem.mkRestic {
69 | inherit app;
70 | user = builtins.toString user;
71 | excludePaths = [ "Backups" ];
72 | paths = [ appFolder ];
73 | inherit appFolder;
74 | };
75 |
76 | };
77 | }
78 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | 👋 Welcome to my NixoOS home and homelab configuration. This monorepo is my personal :simple-nixos: nix/nixos setup for all my devices, specifically my homelab.
2 |
3 | This is the end result of a recovering :simple-kubernetes: k8s addict - who no longer enjoyed the time and effort I **personally** found it took to run k8s at home.
4 |
5 | ## Why?
6 |
7 | Having needed a break from hobby's for some health related reasons, I found coming back to a unpatched cluster a chore, which was left unattented. Then a cheap SSD in my custom VyOS router blew, leading me to just put back in my Unifi Dreammachine router, which broke the custom DNS I was running for my cluster, which caused it issues.
8 |
9 | During fixing the DNS issue, a basic software upgrade for the custom k8s OS I was running k8s on broke my cluster for the 6th time running, coupled with using a older version of the script tool I used to manage its machine config yaml, which ended up leading to my 6th k8s disaster recovery :octicons-info-16:{ title="No I don't want to talk about it" }).
10 |
11 | Looking at my boring :simple-ubuntu: Ubuntu ZFS nas which just ran and ran and ran without needing TLC, and remembering the old days with Ubuntu + Docker Compose being hands-off :octicons-info-16:{ title="Too much hands off really as I auto-updated everything, but I digress" }), I dove into nix, with the idea of getting back to basics of boring proven tools, with the power of nix's declarative system.
12 |
13 | ## Goals
14 |
15 | One of my goals is to bring what I learnt running k8s at home with some of the best homelabbers, into the nix world and see just how much of the practices I learnt I can apply to a nix setup, while focussing on having a solid, reliable, setup that I can leave largely unattended for months without issues cropping up.
16 |
17 | The goal of this doc is for me to slow down a bit and jot down how and why I am doing what im doing in a module, and cover how I have approached the faucets of homelabbing, so **YOU** can understand, steal with pride from my code, and hopefully(?) learn a thing or two.
18 |
19 | To _teach me_ a thing or two, contact me or raise a Issue. PR's may or may not be taken as a personal attack - this is my home setup after all.
20 |
--------------------------------------------------------------------------------
/docs/overview/goals.md:
--------------------------------------------------------------------------------
1 | # Goals
2 |
3 | When I set about making this lab I had a number of goals - I wonder how well I will do :thinking:?
4 |
5 | A master list of ideas/goals/etc can be found at :octicons-issue-tracks-16: [Issue #1](https://github.com/truxnell/nix-config/issues/1)
6 |
7 |
8 |
9 | - __:material-sword: Stability__ NixOS stable channel for core services unstable for desktop apps/non-mission critical where desired. Containers with SHA256 pinning for server apps
10 | - __:kiss: KISS__ Keep it Simple, use boring, reliable, trusted tools - not todays flashy new software repo
11 | - __:zzz: Easy Updates__ Weekly update schedule, utilizing Renovate for updating lockfile and container images. Autoupdates enabled off main branch for mission critical. Aim for 'magic rollback' on upgrade failure
12 | - __:material-cloud-upload: Backups__ Nightly restic backups to both cloud and NAS. All databases to have nightly backups. _Test backups regulary_
13 | - __:repeat: Reproducability__ Flakes & Git for version pinning, SHA256 tags for containers.
14 | - __:alarm_clock: Monitoring__ Automated monitoring on failure & critical summaries, using basic tools. Use Gatus for both internal and external monitoring
15 | - __:clipboard: Continuous Integration__ CI against main branch to ensure all code compiles OK. Use PR's to add to main and dont skip CI due to impatience. Comprehensive testing infrastructure with automated validation, linting, and formatting checks
16 | - __:material-security: Security__ Dont use containers with S6 overlay/root (i.e. LSIO :grey_question:{ title="LSIO trades security for convenience with their container configuration" }). Expose minimal ports at router, Reduce attack surface by keeping it simple, review hardening containers/podman/NixOS
17 | - __:fontawesome-solid-martini-glass-citrus: Ease of administration__ Lean into the devil that is SystemD - and have one standard interface to see logs, manipulate services, etc. Run containers as podman services, and webui's for watching/debugging
18 | - __:simple-letsencrypt: Secrets__ _~ssshh~.._ [Sops-nix](https://github.com/Mic92/sops-nix) for secrets, living in my gitrepo. Avoid cloud services like I used in k8s (i.e. [Doppler.io](https://doppler.io))
19 |
20 |
--------------------------------------------------------------------------------
/nixos/modules/applications/search/searxng/higher:
--------------------------------------------------------------------------------
1 | github.com
2 | reddit.com
3 | stackoverflow.com
4 | en.wikipedia.org
5 | news.ycombinator.com
6 | developer.mozilla.org
7 | wikipedia.org
8 | stackexchange.com
9 | wiki.archlinux.org
10 | superuser.com
11 | docs.python.org
12 | arxiv.org
13 | imdb.com
14 | gitlab.com
15 | rtings.com
16 | serverfault.com
17 | store.steampowered.com
18 | nytimes.com
19 | unix.stackexchange.com
20 | rottentomatoes.com
21 | theverge.com
22 | ncbi.nlm.nih.gov
23 | theguardian.com
24 | doc.rust-lang.org
25 | en.wiktionary.org
26 | youtube.com
27 | seriouseats.com
28 | archive.org
29 | css-tricks.com
30 | docs.rs
31 | goodreads.com
32 | learn.microsoft.com
33 | pubmed.ncbi.nlm.nih.gov
34 | postgresql.org
35 | mayoclinic.org
36 | medium.com
37 | bbc.com
38 | en.cppreference.com
39 | developer.apple.com
40 | genius.com
41 | de.wikipedia.org
42 | askubuntu.com
43 | nhs.uk
44 | docs.aws.amazon.com
45 | alternativeto.net
46 | minecraft.wiki
47 | docs.microsoft.com
48 | support.apple.com
49 | investopedia.com
50 | washingtonpost.com
51 | en.wikipedia.org
52 | developer.mozilla.org
53 | reddit.com
54 | news.ycombinator.com
55 | wikipedia.org
56 | wiki.archlinux.org
57 | github.com
58 | stackoverflow.com
59 | docs.python.org
60 | imdb.com
61 | de.wikipedia.org
62 | arxiv.org
63 | store.steampowered.com
64 | en.wiktionary.org
65 | rottentomatoes.com
66 | seriouseats.com
67 | rtings.com
68 | en.cppreference.com
69 | doc.rust-lang.org
70 | minecraft.wiki
71 | letterboxd.com
72 | goodreads.com
73 | nytimes.com
74 | docs.rs
75 | stackexchange.com
76 | archive.org
77 | youtube.com
78 | learn.microsoft.com
79 | myanimelist.net
80 | postgresql.org
81 | ncbi.nlm.nih.gov
82 | fr.wikipedia.org
83 | anilist.co
84 | genius.com
85 | man7.org
86 | merriam-webster.com
87 | superuser.com
88 | theverge.com
89 | themoviedb.org
90 | nhs.uk
91 | css-tricks.com
92 | pubmed.ncbi.nlm.nih.gov
93 | metacritic.com
94 | developer.apple.com
95 | gitlab.com
96 | theguardian.com
97 | registry.terraform.io
98 | mayoclinic.org
99 | serverfault.com
100 | medium.com
--------------------------------------------------------------------------------
/nixos/modules/nixos/system/restic-justfile.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | resticBackups = config.services.restic.backups or { };
10 | backupNames = builtins.attrNames resticBackups;
11 |
12 | # Extract local and remote backups
13 | localBackups = builtins.filter (name: lib.hasSuffix "-local" name) backupNames;
14 | remoteBackups = builtins.filter (name: lib.hasSuffix "-remote" name) backupNames;
15 |
16 | # Generate the complete Justfile content
17 | justfileContent = ''
18 | # Restic backup tasks
19 | # Auto-generated by NixOS configuration
20 | # Do not edit manually - changes will be overwritten
21 |
22 | default:
23 | @just --list
24 |
25 | backup-local:
26 | #!/usr/bin/env bash
27 | set -euo pipefail
28 | sudo restic_nightly_snapshot.service
29 | ${
30 | if localBackups != [ ] then
31 | lib.concatMapStrings (
32 | name: " sudo systemctl start restic-backups-${name}.service\n"
33 | ) localBackups
34 | else
35 | " echo 'No local backups configured'\n"
36 | }
37 |
38 | backup-remote:
39 | #!/usr/bin/env bash
40 | set -euo pipefail
41 | ${
42 | if remoteBackups != [ ] then
43 | lib.concatMapStrings (
44 | name: " sudo systemctl start restic-backups-${name}.service\n"
45 | ) remoteBackups
46 | else
47 | " echo 'No remote backups configured'\n"
48 | }
49 |
50 | backup-all:
51 | ${
52 | if localBackups != [ ] || remoteBackups != [ ] then
53 | lib.concatStringsSep "" (
54 | lib.optional (localBackups != [ ]) " just backup-local\n"
55 | ++ lib.optional (remoteBackups != [ ]) " just backup-remote\n"
56 | )
57 | else
58 | " @echo 'No backups configured'\n"
59 | }
60 |
61 | '';
62 |
63 | justfilePath = "/home/truxnell/Justfile";
64 | justfileSource = pkgs.writeText "restic-backup-justfile" justfileContent;
65 | in
66 | {
67 | config = mkIf (backupNames != [ ]) {
68 |
69 | # Deploy Justfile during system activation
70 | system.activationScripts.restic-justfile = ''
71 | mkdir -p $(dirname ${justfilePath})
72 | cp ${justfileSource} ${justfilePath}
73 | chown truxnell:users ${justfilePath}
74 | chmod 0644 ${justfilePath}
75 | '';
76 | };
77 | }
78 |
--------------------------------------------------------------------------------
/docs/overview/design.md:
--------------------------------------------------------------------------------
1 | # Design principles
2 |
3 | Taking some lead from the [Zen of Python](https://peps.python.org/pep-0020/):
4 |
5 | - Minimise dependencies, where required, explicitly define dependencies
6 | - Use plain Nix & bash to solve problems over additional tooling
7 | - Stable channel for stable machines. Unstable only where features are important.
8 | - Modules for a specific service - Profiles for broad configuration of state.
9 | - Write readable code - descriptive variable names and modules
10 | - Keep functions/dependencies within the relevant module where possible
11 | - Errors should never pass silently - use assert etc for misconfigurations
12 | - Flat is better than nested - use built-in functions like map, filter, and fold to operate on lists or sets
13 |
14 | # Logic
15 |
16 | Do I have logic in all this mess? Sort of?
17 |
18 | ## Module use
19 |
20 | I have taken the following approach to using modules for different goals:
21 |
22 |
23 | 
24 | Daring logic for using profiles and modules
25 |
26 |
27 | **Profiles** to incrementally build up a hosts shape in layers. These are 'wide' and touch a broad number of settings to acheive a certain goal:
28 |
29 | They can be broken down into:
30 | - Global profiles - Settings every single machine I'll ever roll will use. *(ex. Timezone, secret config, basic nix settings)*.
31 | - Hardware profile - Settings for a specific hardware platform. Taps into the nixos-hardware modules and defines my own. Useful to ensure all my raspi4's have the same hardware setup etc. *(ex. grub setup, eeprom updates, kernel modules)*
32 | - Role profiles - General use of host. Allows all 'servers' to have the same settings, workstations, development environemtns etc. *(ex. monitoring, log rotation, gui)*
33 | - Host profiles - Currently left in each hosts file in the hosts folder. These are machine specific settings that are unique to that host. *(ex. boot disks, services to run on machine, hostname)
34 |
35 | **Modules** to define a specific service or setting. These are a lot tighter in scope and only do what is required to setup one particular thing - however they do still touch a few areas as each module may setup reverse proxy, backups, impermanence, etc - but only exactly what the service needs.
36 |
37 | This aproach does help massively with [DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself).
--------------------------------------------------------------------------------
/nixos/modules/applications/search/searxng/update-hostrank.sh:
--------------------------------------------------------------------------------
1 |
2 | # template codeblock for
3 | # start template
4 | echo "{services.searx.settings.hostnames.high_priority = [" > higher.nix
5 | echo "{services.searx.settings.hostnames.low_priority = [" > lower.nix
6 | echo "{services.searx.settings.hostnames.remove = [" > remove.nix
7 |
8 | # services.searx.settings.hostnames.high_priority|low_priority
9 | # services.searx.settings.hostnames.low_priority
10 | # services.searx.settings.hostnames.remove
11 |
12 | function parse_goggle {
13 | local url="$1"
14 | local temp_file=$(mktemp)
15 |
16 | # Download the source once
17 | if ! curl -s "$url" | grep -v '^!' > "$temp_file"; then
18 | echo "Failed to download $url" >&2
19 | rm -f "$temp_file"
20 | return 1
21 | fi
22 |
23 | # Function to process each pattern and output file
24 | process_pattern() {
25 | local pattern="$1"
26 | local output_file="$2"
27 | grep -E "$pattern" "$temp_file" |
28 | sed -n -E 's/\r$//;
29 | s/^[^#]*(site|domain)=([^,[:space:]"'\'']+)($|,.*$)/"(.*\\.)?\2"/p;
30 | s/^[^#]*(site|domain)="([^"]+)"($|,.*$)/"(.*\\.)?\2"/p;
31 | s/^[^#]*(site|domain)='\''([^'\'']+)'\''($|,.*$)/"(.*\\.)?\2"/p' >> "$output_file"
32 |
33 | # Check if any domains were found and processed
34 | if [[ ! -s "$output_file" ]]; then
35 | echo "Warning: No domains found with pattern '$pattern'" >&2
36 | fi
37 | }
38 |
39 | # Calls process_pattern for each desired pattern and file
40 | process_pattern "boost=" "higher.nix"
41 | process_pattern "downrank" "lower.nix"
42 | process_pattern "discard" "remove.nix"
43 |
44 | # Clean up the temporary file
45 | rm -f "$temp_file"
46 | }
47 |
48 | # FMHY
49 | # FMHY remove domains
50 | curl -s https://raw.githubusercontent.com/fmhy/FMHYFilterlist/refs/heads/main/sitelist.txt | grep -v '^!' | sed 's/^/"(.*\\.)?/' | sed 's/$/"/' >> remove.nix
51 | curl -s https://raw.githubusercontent.com/fmhy/FMHYFilterlist/refs/heads/main/sitelist-plus.txt | grep -v '^!' | sed 's/^/"(.*\\.)?/' | sed 's/$/"/' >> remove.nix
52 |
53 | # Wikipedia perennial/etc
54 | parse_goggle https://raw.githubusercontent.com/kynoptic/wikipedia-reliable-sources/refs/heads/main/wikipedia-reliable-sources.goggle
55 |
56 | # few android related rankings
57 | parse_goggle "https://raw.githubusercontent.com/gayolGate/gayolGate/8f26b202202e76896bce59d865c5e7d4c35d5855/goggle.txt"
58 |
59 | echo "];}" >> higher.nix
60 | echo "];}" >> lower.nix
61 | echo "];}" >> remove.nix
62 |
--------------------------------------------------------------------------------
/nixos/modules/applications/networking/rapt2mqtt/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | rapt2mqtt = pkgs.writeText "rapt2mqtt.py" (builtins.readFile ./rapt2mqtt.py);
10 | cfg = config.mySystem.${category}.${app};
11 | app = "rapt2mqtt";
12 | category = "services";
13 | description = "";
14 | user = "root"; # string
15 | group = "root"; # string #int
16 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
17 | in
18 | {
19 | options.mySystem.${category}.${app} = {
20 | enable = mkEnableOption "${app}";
21 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
22 | default = true;
23 | };
24 | monitor = mkOption {
25 | type = lib.types.bool;
26 | description = "Enable gatus monitoring";
27 | default = true;
28 | };
29 | prometheus = mkOption {
30 | type = lib.types.bool;
31 | description = "Enable prometheus scraping";
32 | default = true;
33 | };
34 | addToDNS = mkOption {
35 | type = lib.types.bool;
36 | description = "Add to DNS list";
37 | default = true;
38 | };
39 | dev = mkOption {
40 | type = lib.types.bool;
41 | description = "Development instance";
42 | default = false;
43 | };
44 | backup = mkOption {
45 | type = lib.types.bool;
46 | description = "Enable backups";
47 | default = true;
48 | };
49 |
50 | };
51 |
52 | config = mkIf cfg.enable {
53 |
54 | ## Secrets
55 | sops.secrets."${category}/${app}/env" = {
56 | sopsFile = ./secrets.sops.yaml;
57 | owner = user;
58 | inherit group;
59 | restartUnits = [ "${app}.service" ];
60 | };
61 |
62 | systemd.services.rapt2mqtt = {
63 | description = "rapt2mqtt";
64 | wantedBy = [ "multi-user.target" ];
65 | after = [ "network.target" ];
66 | startAt = "hourly";
67 | serviceConfig = {
68 | Restart = "on-failure";
69 | User = user;
70 | EnvironmentFile = [ config.sops.secrets."${category}/${app}/env".path ];
71 | # https://github.com/sgoadhouse/rapt-mqtt-bridge
72 | ExecStart =
73 | let
74 | python = pkgs.python3.withPackages (
75 | ps: with ps; [
76 | paho-mqtt
77 | requests
78 | python-dateutil
79 | ]
80 | );
81 | in
82 | "${python.interpreter} ${rapt2mqtt} -n 15 -f -s -v 1";
83 |
84 | };
85 | };
86 |
87 | };
88 | }
89 |
--------------------------------------------------------------------------------
/nixos/modules/applications/search/whoogle/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | app = "whoogle";
9 | image = "ghcr.io/benbusby/whoogle-search:1.1.2@sha256:b09767349501fc46e570cc6b3f074f1ce1b59b844b6f49b721369ff408398184";
10 | user = "927"; # string
11 | group = "927"; # string
12 | port = 5000; # int
13 | cfg = config.mySystem.services.${app};
14 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
15 | in
16 | {
17 | options.mySystem.services.${app} = {
18 | enable = mkEnableOption "${app}";
19 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
20 | default = true;
21 | };
22 | };
23 |
24 | config = mkIf cfg.enable {
25 |
26 | virtualisation.oci-containers.containers.${app} = {
27 | image = "${image}";
28 | user = "${user}:${group}";
29 | ports = [ (builtins.toString port) ]; # expose port
30 | environment = {
31 | TZ = "${config.time.timeZone}";
32 | WHOOGLE_ALT_TW = "nitter.${config.networking.domain}";
33 | WHOOGLE_ALT_YT = "invidious.${config.networking.domain}";
34 | WHOOGLE_ALT_IG = "imginn.com";
35 | WHOOGLE_ALT_RD = "redlib.${config.networking.domain}";
36 | WHOOGLE_ALT_MD = "scribe.${config.networking.domain}";
37 | WHOOGLE_ALT_TL = "";
38 | WHOOGLE_ALT_IMG = "bibliogram.art";
39 | WHOOGLE_ALT_IMDB = "";
40 | WHOOGLE_ALT_WIKI = "";
41 | WHOOGLE_ALT_QUORA = "";
42 | WHOOGLE_CONFIG_ALTS = "1";
43 | WHOOGLE_CONFIG_THEME = "system";
44 | WHOOGLE_CONFIG_URL = "https://whoogle.${config.networking.domain}";
45 | WHOOGLE_CONFIG_GET_ONLY = "1";
46 | WHOOGLE_CONFIG_COUNTRY = "AU";
47 | WHOOGLE_CONFIG_VIEW_IMAGE = "1";
48 | WHOOGLE_CONFIG_DISABLE = "1";
49 | };
50 | };
51 |
52 | services.nginx.virtualHosts."${app}.${config.networking.domain}" = {
53 | useACMEHost = config.networking.domain;
54 | forceSSL = true;
55 | locations."^~ /" = {
56 | proxyPass = "http://${app}:${builtins.toString port}";
57 | extraConfig = "resolver 10.88.0.1;";
58 |
59 | };
60 | };
61 |
62 | mySystem.services.gatus.monitors = [
63 | {
64 |
65 | name = app;
66 | group = "services";
67 | url = "https://${app}.${config.mySystem.domain}/healthz";
68 | interval = "1m";
69 | conditions = [
70 | "[CONNECTED] == true"
71 | "[STATUS] == 200"
72 | "[RESPONSE_TIME] < 1500"
73 | ];
74 | }
75 | ];
76 |
77 | };
78 | }
79 |
--------------------------------------------------------------------------------
/nixos/modules/applications/media/tautulli/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | app = "tautulli";
9 | image = "ghcr.io/home-operations/tautulli:2.16.0";
10 | user = "kah"; # string
11 | group = "kah"; # string
12 | port = 8181; # int
13 | cfg = config.mySystem.services.${app};
14 | appFolder = "/var/lib/${app}";
15 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
16 | in
17 | {
18 | options.mySystem.services.${app} = {
19 | enable = mkEnableOption "${app}";
20 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
21 | default = true;
22 | };
23 | };
24 |
25 | config = mkIf cfg.enable {
26 | # ensure folder exist and has correct owner/group
27 | systemd.tmpfiles.rules = [
28 | "d ${appFolder} 0750 ${user} ${group} -" # The - disables automatic cleanup, so the file wont be removed after a period
29 | ];
30 |
31 | virtualisation.oci-containers.containers.${app} = {
32 | image = "${image}";
33 | user = "568:568";
34 | volumes = [
35 | "${appFolder}:/config:rw"
36 | "${config.mySystem.nasFolder}/natflix:/media:rw"
37 | "/etc/localtime:/etc/localtime:ro"
38 | ];
39 | };
40 |
41 | services.nginx.virtualHosts."${app}.${config.networking.domain}" = {
42 | useACMEHost = config.networking.domain;
43 | forceSSL = true;
44 | locations."^~ /" = {
45 | proxyPass = "http://${app}:${builtins.toString port}";
46 | extraConfig = "resolver 10.88.0.1;";
47 |
48 | };
49 | };
50 |
51 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
52 | lib.mkIf config.mySystem.system.impermanence.enable
53 | {
54 | directories = [
55 | {
56 | directory = appFolder;
57 | inherit user;
58 | inherit group;
59 | mode = "750";
60 | }
61 | ];
62 | };
63 |
64 | mySystem.services.gatus.monitors = [
65 | {
66 |
67 | name = app;
68 | group = "media";
69 | url = "https://${app}.${config.mySystem.domain}";
70 | interval = "1m";
71 | conditions = [
72 | "[CONNECTED] == true"
73 | "[STATUS] == 200"
74 | "[RESPONSE_TIME] < 1500"
75 | ];
76 |
77 | }
78 | ];
79 |
80 | services.restic.backups = config.lib.mySystem.mkRestic {
81 | inherit app user;
82 | excludePaths = [ "Backups" ];
83 | paths = [ appFolder ];
84 | inherit appFolder;
85 | };
86 |
87 | };
88 | }
89 |
--------------------------------------------------------------------------------
/nixos/modules/applications/development/code-server/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | cfg = config.mySystem.services.code-server;
10 | app = "code-server";
11 | url = "code-${config.networking.hostName}.${config.networking.domain}";
12 | appFolder = "/var/lib/${app}";
13 | user = "truxnell";
14 | group = "users";
15 | in
16 | {
17 | options.mySystem.services.code-server = {
18 | enable = mkEnableOption "code-server";
19 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
20 | default = true;
21 | };
22 | };
23 |
24 | config = mkIf cfg.enable {
25 |
26 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
27 | lib.mkIf config.mySystem.system.impermanence.enable
28 | {
29 | directories = [
30 | {
31 | directory = appFolder;
32 | inherit user;
33 | inherit group;
34 | mode = "750";
35 | }
36 | ];
37 | };
38 |
39 | services.code-server = {
40 | auth = "none";
41 | enable = true;
42 | disableTelemetry = true;
43 | disableUpdateCheck = true;
44 | proxyDomain = "code-${config.networking.hostName}.${config.networking.domain}";
45 | userDataDir = "${appFolder}";
46 | host = "127.0.0.1";
47 | extraPackages = with pkgs; [
48 | git
49 | nix
50 | nixfmt-rfc-style
51 | ];
52 | package = pkgs.vscode-with-extensions.override {
53 | vscode = pkgs.code-server;
54 | vscodeExtensions = with pkgs.vscode-extensions; [
55 | # Nix
56 | jnoortheen.nix-ide
57 | mkhl.direnv
58 | streetsidesoftware.code-spell-checker
59 | oderwat.indent-rainbow
60 |
61 | ];
62 | };
63 | user = "truxnell";
64 | };
65 | services.nginx.virtualHosts."code-${config.networking.hostName}.${config.networking.domain}" = {
66 | useACMEHost = config.networking.domain;
67 | forceSSL = true;
68 | locations."^~ /" = {
69 | proxyPass = "http://127.0.0.1:${builtins.toString config.services.code-server.port}";
70 | proxyWebsockets = true;
71 | };
72 | };
73 |
74 | mySystem.services.gatus.monitors = [
75 | {
76 |
77 | name = "${app}-${config.networking.hostName}";
78 | group = "services";
79 | url = "https://${url}";
80 | interval = "1m";
81 | conditions = [
82 | "[CONNECTED] == true"
83 | "[STATUS] == 200"
84 | "[RESPONSE_TIME] < 1500"
85 | ];
86 | }
87 | ];
88 |
89 | };
90 | }
91 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: Truxnell's NixOS homelab
2 | site_author: truxnell
3 | site_url: https://truxnell.github.io/nix-config/
4 |
5 | # Repository
6 | repo_name: truxnell/nix-config
7 | repo_url: https://github.com/truxnell/nix-config
8 |
9 | docs_dir: ./docs
10 | site_dir: ./site
11 |
12 | copyright: Copyright © 2024 Nat Allan
13 |
14 | theme:
15 | name: material
16 | # custom_dir: ../../docs/overrides
17 | features:
18 | - announce.dismiss
19 | - content.code.annotate
20 | - content.code.copy
21 | - navigation.expand
22 | - navigation.indexes
23 | - navigation.path
24 | # - navigation.sections
25 | - navigation.footer
26 | # - navigation.tabs
27 | - navigation.top
28 | - search.suggest
29 | palette:
30 | - scheme: slate
31 | media: "(prefers-color-scheme: light)"
32 | primary: black
33 | accent: indigo
34 | toggle:
35 | icon: material/brightness-4
36 | name: Switch to light mode
37 | - scheme: default
38 | media: "(prefers-color-scheme: dark)"
39 | toggle:
40 | icon: material/brightness-7
41 | name: Switch to dark mode
42 | font:
43 | text: Roboto
44 | code: Roboto Mono
45 | icon:
46 | logo: simple/nixos
47 | annotations: material/chat-question
48 |
49 | # Plugins
50 | plugins:
51 | - search:
52 | separator: '[\s\u200b\-_,:!=\[\]()"`/]+|\.(?!\d)|&[lg]t;|(?!\b)(?=[A-Z][a-z])'
53 | - minify:
54 | minify_html: true
55 |
56 | # Extensions
57 | markdown_extensions:
58 | - admonition
59 | - abbr
60 | - attr_list
61 | - md_in_html
62 | - pymdownx.emoji:
63 | emoji_index: !!python/name:material.extensions.emoji.twemoji
64 | emoji_generator: !!python/name:material.extensions.emoji.to_svg
65 | - pymdownx.highlight:
66 | anchor_linenums: true
67 | line_spans: __span
68 | pygments_lang_class: true
69 | - pymdownx.inlinehilite
70 | - pymdownx.caret
71 | - pymdownx.tilde
72 | - pymdownx.snippets:
73 | check_paths: true
74 | auto_append:
75 | - ./docs/includes/abbreviations.md
76 | - pymdownx.superfences
77 | - toc:
78 | permalink: true
79 | toc_depth: 3
80 |
81 | nav:
82 | - readme.md: index.md
83 | - Overview:
84 | - Goals: overview/goals.md
85 | - Features: overview/features.md
86 | - Design Principals: overview/design.md
87 | - Structure: overview/structure.md
88 | - Maintenance:
89 | - Software Updates: maintenance/software_updates.md
90 | - Backups: maintenance/backups.md
91 | - Monitoring:
92 | - SystemD failures: monitoring/systemd.md
93 | - Nix Warnings: monitoring/warnings.md
94 | - Other Features:
95 | - MOTD: motd.md
96 |
--------------------------------------------------------------------------------
/nixos/modules/applications/media/sabnzbd/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | app = "sabnzbd";
9 | image = "ghcr.io/home-operations/sabnzbd:4.5.5";
10 | user = "kah"; # string
11 | group = "kah"; # string
12 | port = 8080; # int
13 | cfg = config.mySystem.services.${app};
14 | appFolder = "/var/lib/${app}";
15 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
16 | in
17 | {
18 | options.mySystem.services.${app} = {
19 | enable = mkEnableOption "${app}";
20 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
21 | default = true;
22 | };
23 | };
24 |
25 | config = mkIf cfg.enable {
26 | # ensure folder exist and has correct owner/group
27 | systemd.tmpfiles.rules = [
28 | "d ${appFolder} 0750 ${user} ${group} -" # The - disables automatic cleanup, so the file wont be removed after a period
29 | ];
30 |
31 | virtualisation.oci-containers.containers.${app} = {
32 | image = "${image}";
33 | user = "568:568";
34 | environment = {
35 | SABNZBD__HOST_WHITELIST_ENTRIES = "sabnzbd, sabnzbd.trux.dev";
36 | };
37 | volumes = [
38 | "${appFolder}:/config:rw"
39 | "${config.mySystem.nasFolder}/natflix:/tank:rw"
40 | "/etc/localtime:/etc/localtime:ro"
41 | ];
42 | };
43 |
44 | services.nginx.virtualHosts."${app}.${config.networking.domain}" = {
45 | useACMEHost = config.networking.domain;
46 | forceSSL = true;
47 | locations."^~ /" = {
48 | proxyPass = "http://${app}:${builtins.toString port}";
49 | extraConfig = "resolver 10.88.0.1;";
50 |
51 | };
52 | };
53 |
54 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
55 | lib.mkIf config.mySystem.system.impermanence.enable
56 | {
57 | directories = [
58 | {
59 | directory = appFolder;
60 | inherit user;
61 | inherit group;
62 | mode = "750";
63 | }
64 | ];
65 | };
66 |
67 | mySystem.services.gatus.monitors = [
68 | {
69 |
70 | name = app;
71 | group = "media";
72 | url = "https://${app}.${config.mySystem.domain}";
73 |
74 | interval = "1m";
75 | conditions = [
76 | "[CONNECTED] == true"
77 | "[STATUS] == 200"
78 | "[RESPONSE_TIME] < 1500"
79 | ];
80 | }
81 | ];
82 |
83 | services.restic.backups = config.lib.mySystem.mkRestic {
84 | inherit app user;
85 | excludePaths = [ "Backups" ];
86 | paths = [ appFolder ];
87 | inherit appFolder;
88 | };
89 |
90 | };
91 | }
92 |
--------------------------------------------------------------------------------
/nixos/modules/applications/gaming/factorio/factorio-update.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | factorio-update = pkgs.writeShellScriptBin "factorio-update" ''
10 | #!/usr/bin/env bash
11 |
12 | # Define the container name, registry, and tag
13 | CONTAINER_NAME="factorio-space-age"
14 | REGISTRY="docker.io/factoriotools/factorio"
15 | TAG="latest"
16 |
17 | # Check if there are players playing online
18 | PLAYER_STATUS=$(${pkgs.podman}/bin/podman exec $CONTAINER_NAME rcon /players online)
19 |
20 | if [[ $PLAYER_STATUS != *"Online players (0):"* ]]; then
21 | echo "There are players online. Exiting."
22 | exit 0
23 | fi
24 |
25 | # Get the current image ID of the running container
26 | CURRENT_IMAGE=$(${pkgs.podman}/bin/podman inspect -f '{{.Image}}' $CONTAINER_NAME)
27 |
28 | # Pull the latest image from Docker Hub (or your specified registry)
29 | LATEST_IMAGE_ID=$(${pkgs.podman}/bin/podman pull --quiet $REGISTRY:$TAG)
30 |
31 | echo "Current version: $CURRENT_IMAGE"
32 | echo "Latest version: $LATEST_IMAGE_ID"
33 | # Compare the current image with the latest image
34 | if [[ "$CURRENT_IMAGE" == "$LATEST_IMAGE_ID" ]]; then
35 | echo "No new image available. Exiting."
36 | exit 0
37 | fi
38 |
39 | # Pull the latest image for the service
40 | echo "Pulling latest image..."
41 | ${pkgs.podman}/bin/podman pull $REGISTRY:$TAG
42 |
43 | # Restart the service
44 | echo "Restarting the container..."
45 | systemctl restart podman-$CONTAINER_NAME
46 | echo "Update and restart completed successfully."
47 |
48 | LATEST_VERSION=$(${pkgs.podman}/bin/podman inspect -f '{{index .Config.Labels "factorio.version"}}' $CONTAINER_NAME)
49 | echo "New version is $LATEST_VERSION"
50 |
51 | # Message to be posted in Discord
52 | MESSAGE="**Server Update:** $CONTAINER_NAME has been updated to version $LATEST_VERSION!"
53 |
54 | # Send the message using curl
55 | ${pkgs.curl}/bin/curl -X POST -H "Content-Type: application/json" \
56 | -d "{\"content\": \"$MESSAGE\"}" \
57 | "$DISCORD_WEBHOOK_URL"
58 |
59 |
60 | '';
61 | in
62 | {
63 |
64 | systemd.services.factorio-update = mkIf config.mySystem.services.factorio.space-age.enable {
65 | description = "Factorio update";
66 | wantedBy = [ "multi-user.target" ];
67 | after = [ "network.target" ];
68 | startAt = "hourly";
69 | serviceConfig = {
70 | Type = "oneshot";
71 | User = "root";
72 | EnvironmentFile = [ config.sops.secrets."services/factorio/env".path ];
73 | ExecStart = ''
74 | ${factorio-update}/bin/factorio-update
75 | '';
76 |
77 | };
78 | };
79 | }
80 |
--------------------------------------------------------------------------------
/docs/maintenance/upgrade-25.11.md:
--------------------------------------------------------------------------------
1 | ## NixOS 25.11 upgrade
2 |
3 | This document describes how to upgrade this flake and all hosts to NixOS 25.11.
4 |
5 | ### 1. Update flake inputs and lock file
6 |
7 | - Ensure `flake.nix` points `nixpkgs` at the 25.11 channel (already done in this repo).
8 | - On your development machine, from the repo root, run:
9 |
10 | ```bash
11 | nix flake update
12 | ```
13 |
14 | This will refresh `flake.lock` to match the 25.11 inputs.
15 |
16 | ### 2. Run flake checks
17 |
18 | From the repo root:
19 |
20 | ```bash
21 | ./test-flake.sh
22 | ```
23 |
24 | This script runs:
25 |
26 | - `nix-instantiate --parse flake.nix`
27 | - `nix flake metadata` and `nix flake show`
28 | - `nix flake check --no-build`
29 | - Evaluation of all `nixosConfigurations`
30 | - Basic validation of `lib` and application imports
31 |
32 | Fix any reported issues before proceeding.
33 |
34 | ### 3. Per-host system builds
35 |
36 | For each host defined under `nixosConfigurations`:
37 |
38 | ```bash
39 | nix build .#nixosConfigurations.daedalus.config.system.build.toplevel
40 | nix build .#nixosConfigurations.shodan.config.system.build.toplevel
41 | nix build .#nixosConfigurations.xerxes.config.system.build.toplevel
42 | ```
43 |
44 | Only proceed once all hosts build successfully.
45 |
46 | ### 4. Canary host upgrade
47 |
48 | Pick a canary host (for example `daedalus`) and, on that host, run:
49 |
50 | ```bash
51 | sudo nixos-rebuild test --flake /path/to/this/repo#daedalus
52 | ```
53 |
54 | If everything looks good (services start, no obvious regressions), commit the change with:
55 |
56 | ```bash
57 | sudo nixos-rebuild switch --flake /path/to/this/repo#daedalus
58 | ```
59 |
60 | Reboot the canary and verify:
61 |
62 | - Networking and storage
63 | - Core services (e.g. monitoring, backups)
64 | - Any important applications (e.g. gaming stack like Factorio)
65 |
66 | Confirm that the bootloader still exposes older generations for rollback.
67 |
68 | ### 5. Rollout to remaining hosts
69 |
70 | For each remaining host (e.g. `shodan`, `xerxes`):
71 |
72 | ```bash
73 | sudo nixos-rebuild test --flake /path/to/this/repo#shodan
74 | sudo nixos-rebuild switch --flake /path/to/this/repo#shodan
75 |
76 | sudo nixos-rebuild test --flake /path/to/this/repo#xerxes
77 | sudo nixos-rebuild switch --flake /path/to/this/repo#xerxes
78 | ```
79 |
80 | Upgrade hosts in batches according to how critical they are, validating services after each batch.
81 |
82 | ### 6. Post-upgrade cleanup
83 |
84 | - Remove any temporary workarounds you added while debugging 25.11 issues.
85 | - Consider adopting new NixOS 25.11 options or modules where they simplify your config.
86 | - Keep `./test-flake.sh` as a quick regression check for future flake or NixOS upgrades.
87 |
88 |
89 |
--------------------------------------------------------------------------------
/docs/vm/installing-x86_64.md:
--------------------------------------------------------------------------------
1 | ## Installing a playground VM
2 |
3 | I've used gnome-boxes from my current Fedora laptop for running playground vm's.
4 |
5 | Settings:
6 | ISO: nixos-minimal
7 | Hard drive: 32GB
8 | RAM: 2GB
9 | EFI: Enable
10 |
11 | Expose port 22 to allow ssh into vm (host port 3022, guest 22)
12 |
13 | ```sh
14 | # set temp root passwd
15 | sudo su
16 | passwd
17 | ```
18 |
19 | `sshd` is already running, so you can now ssh into the vm remotely for the rest of the setup.
20 | `ssh root@127.0.0.1 -p 3022`
21 |
22 | ```sh
23 | # Partitioning
24 | parted /dev/sda -- mklabel gpt
25 | parted /dev/sda -- mkpart root ext4 512MB -8GB
26 | parted /dev/sda -- mkpart swap linux-swap -8GB 100%
27 | parted /dev/sda -- mkpart ESP fat32 1MB 512MB
28 | parted /dev/sda -- set 3 esp on
29 |
30 | # Formatting
31 | mkfs.ext4 -L nixos /dev/sda1
32 | mkswap -L swap /dev/sda2
33 | mkfs.fat -F 32 -n boot /dev/sda3
34 |
35 | # Mounting disks for installation
36 | mount /dev/disk/by-label/nixos /mnt
37 | mkdir -p /mnt/boot
38 | mount /dev/disk/by-label/boot /mnt/boot
39 | swapon /dev/sda2
40 |
41 | # Generating default configuration
42 | nixos-generate-config --root /mnt
43 | ```
44 |
45 | From this config copy the bootstrap configuration and fetch the hardware configuration.
46 |
47 | ```sh
48 | scp -P 3022 nixos/hosts/bootstrap/configuration.nix root@127.0.0.1:/mnt/etc/nixos/configuration.nix
49 | scp -P 3022 root@127.0.0.1:/mnt/etc/nixos/hardware-configuration.nix nixos/hosts/nixosvm/hardware-configuration.nix
50 | ```
51 |
52 | Then back to the VM
53 |
54 | ```sh
55 | nixos-install
56 | reboot
57 | nixos-rebuild switch
58 | ```
59 |
60 | Set the password for the user that was created.
61 | Might need to use su?
62 |
63 | ```sh
64 | passwd truxnell
65 | ```
66 |
67 | Also grab the ssh keys and re-encrypt sops
68 |
69 | ```sh
70 | cat /etc/ssh/ssh_host_ed25519_key.pub | ssh-to-age
71 | ```
72 |
73 | then run task
74 |
75 | Login as user, copy nix git OR for remote machines/servers just `nixos-install --impure --flake github:truxnell/nix-config#`
76 |
77 | ```sh
78 | mkdir .local
79 | cd .local
80 | git clone https://github.com/truxnell/nix-config.git
81 | cd nix-config
82 | ```
83 |
84 | Apply config to bootstrapped device
85 | First time around, MUST APPLY with name of host in ./hosts/
86 | This is because `.. --flake .` looks for a `nixosConfigurations` key with the machines hostname
87 | The bootstrap machine will be called 'nixos-bootstrap' so the flake by default would resolve `nixosConfigurations.nixos-bootstrap`
88 | Subsequent rebuilds can be called with the default command as after first build the machines hostname will be changed to the desired machine
89 |
90 | ```sh
91 | nixos-rebuild switch --flake .#
92 | ```
93 |
94 | NOTE: do secrets for sops and shit!!
95 |
--------------------------------------------------------------------------------
/.github/settings.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # These settings are synced to GitHub by https://probot.github.io/apps/settings/
3 |
4 | repository:
5 | # See https://docs.github.com/en/rest/reference/repos#update-a-repository for all available settings.
6 |
7 | # The name of the repository. Changing this will rename the repository
8 | name: nix-config
9 |
10 | # A short description of the repository that will show up on GitHub
11 | description: My nix & nixos home setup
12 |
13 | # A URL with more information about the repository
14 | # homepage: https://example.github.io/
15 |
16 | # A comma-separated list of topics to set on the repository
17 | topics: nix, nixos
18 |
19 | # Either `true` to make the repository private, or `false` to make it public.
20 | private: false
21 |
22 | # Either `true` to enable issues for this repository, `false` to disable them.
23 | has_issues: true
24 |
25 | # Either `true` to enable projects for this repository, or `false` to disable them.
26 | # If projects are disabled for the organization, passing `true` will cause an API error.
27 | has_projects: false
28 |
29 | # Either `true` to enable the wiki for this repository, `false` to disable it.
30 | has_wiki: false
31 |
32 | # Either `true` to enable downloads for this repository, `false` to disable them.
33 | has_downloads: false
34 |
35 | # Updates the default branch for this repository.
36 | default_branch: main
37 |
38 | # Either `true` to allow squash-merging pull requests, or `false` to prevent
39 | # squash-merging.
40 | allow_squash_merge: true
41 |
42 | # Either `true` to allow merging pull requests with a merge commit, or `false`
43 | # to prevent merging pull requests with merge commits.
44 | allow_merge_commit: false
45 |
46 | # Either `true` to allow rebase-merging pull requests, or `false` to prevent
47 | # rebase-merging.
48 | allow_rebase_merge: true
49 |
50 | # Either `true` to enable automatic deletion of branches on merge, or `false` to disable
51 | delete_branch_on_merge: true
52 |
53 | # Either `true` to enable automated security fixes, or `false` to disable
54 | # automated security fixes.
55 | enable_automated_security_fixes: false
56 |
57 | # Either `true` to enable vulnerability alerts, or `false` to disable
58 | # vulnerability alerts.
59 | enable_vulnerability_alerts: true
60 |
61 | # Labels: define labels for Issues and Pull Requests
62 | # labels:
63 | # - name: bug
64 | # color: CC0000
65 | # description: An issue with the system 🐛.
66 |
67 | # - name: feature
68 | # # If including a `#`, make sure to wrap it with quotes!
69 | # color: '#336699'
70 | # description: New functionality.
71 |
72 | # - name: Help Wanted
73 | # # Provide a new name to rename an existing label
74 | # new_name: first-timers-only
75 |
76 | # TODO branch protection once nailed down.
77 |
--------------------------------------------------------------------------------
/nixos/modules/nixos/services/monitoring.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.services.monitoring;
9 | urlVmAgent = "vmagent-${config.networking.hostName}.${config.networking.domain}";
10 | portVmAgent = 8429; # int
11 | in
12 | {
13 | options.mySystem.services.monitoring.enable = mkEnableOption "Prometheus Monitoring";
14 | options.mySystem.monitoring.scrapeConfigs.node-exporter = mkOption {
15 | type = lib.types.listOf lib.types.str;
16 | description = "Prometheus node-exporter scrape targets";
17 | default = [ ];
18 | };
19 |
20 | config = mkIf cfg.enable {
21 |
22 | services.prometheus.exporters = {
23 | node = {
24 | enable = true;
25 | enabledCollectors = [
26 | "diskstats"
27 | "filesystem"
28 | "loadavg"
29 | "meminfo"
30 | "netdev"
31 | "stat"
32 | "time"
33 | "uname"
34 | "systemd"
35 | ];
36 | };
37 | smartctl = {
38 | enable = true;
39 | };
40 |
41 | };
42 |
43 | # ensure ports are open
44 | # networking.firewall.allowedTCPPorts = mkIf cfg.enable [
45 | # config.services.prometheus.exporters.node.port
46 | # config.services.prometheus.exporters.smartctl.port
47 | # ];
48 |
49 | services.vmagent = {
50 | enable = true;
51 | remoteWrite.url = "http://shodan:8428/api/v1/write";
52 | extraArgs = lib.mkForce [ "-remoteWrite.label=instance=${config.networking.hostName}" ];
53 | prometheusConfig = {
54 | scrape_configs = [
55 | {
56 | job_name = "node";
57 | # scrape_timeout = "40s";
58 | static_configs = [
59 | {
60 | targets = [ "http://127.0.0.1:9100" ];
61 | }
62 | ];
63 | }
64 | {
65 | job_name = "smartctl";
66 | # scrape_timeout = "40s";
67 | static_configs = [
68 | {
69 | targets = [ "http://127.0.0.1:9633" ];
70 | }
71 | ];
72 | }
73 | {
74 | job_name = "vmagent";
75 | # scrape_interval = "10s";
76 | static_configs = [
77 | { targets = [ "127.0.0.1:8429" ]; }
78 | ];
79 | }
80 | ];
81 | };
82 | };
83 |
84 | services.nginx.virtualHosts.${urlVmAgent} = {
85 | forceSSL = true;
86 | useACMEHost = config.networking.domain;
87 | locations."^~ /" = {
88 | proxyPass = "http://127.0.0.1:${builtins.toString portVmAgent}";
89 | };
90 | };
91 |
92 | mySystem.monitoring.scrapeConfigs.node-exporter = [
93 | "${config.networking.hostName}:${toString config.services.prometheus.exporters.node.port}"
94 | ];
95 |
96 | };
97 |
98 | }
99 |
--------------------------------------------------------------------------------
/scripts/backup-to-external-hdd.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Cold HDD Backup Script
3 | # Manual quarterly backup of critical data to external HDD
4 | # Usage: sudo ./backup-to-external-hdd.sh /mnt/external-hdd
5 |
6 | set -euo pipefail
7 |
8 | if [ $# -ne 1 ]; then
9 | echo "Usage: $0 "
10 | echo "Example: $0 /mnt/external-hdd"
11 | exit 1
12 | fi
13 |
14 | EXTERNAL_HDD="$1"
15 | BACKUP_DATE=$(date +%Y-%m-%d)
16 | BACKUP_ROOT="${EXTERNAL_HDD}/backups/${BACKUP_DATE}"
17 |
18 | # Critical data directories to backup
19 | CRITICAL_DATA=(
20 | "/zfs/photos/immich/"
21 | "/zfs/syncthing/"
22 | "/zfs/forgejo/"
23 | )
24 |
25 | # Check if external HDD is mounted and writable
26 | if [ ! -d "$EXTERNAL_HDD" ] || [ ! -w "$EXTERNAL_HDD" ]; then
27 | echo "ERROR: External HDD not mounted or not writable at $EXTERNAL_HDD" >&2
28 | exit 1
29 | fi
30 |
31 | # Create backup directory
32 | mkdir -p "$BACKUP_ROOT"
33 |
34 | echo "Starting cold HDD backup to $BACKUP_ROOT"
35 | echo "Backup date: $BACKUP_DATE"
36 | echo ""
37 |
38 | # Backup each critical directory
39 | for data_dir in "${CRITICAL_DATA[@]}"; do
40 | if [ ! -d "$data_dir" ]; then
41 | echo "WARNING: Directory $data_dir does not exist, skipping..." >&2
42 | continue
43 | fi
44 |
45 | dir_name=$(basename "$data_dir")
46 | echo "Backing up $data_dir to $BACKUP_ROOT/$dir_name..."
47 |
48 | # Use rsync with archive mode and progress
49 | rsync -avh --progress \
50 | --exclude='*.tmp' \
51 | --exclude='.DS_Store' \
52 | --exclude='Thumbs.db' \
53 | "$data_dir" "$BACKUP_ROOT/"
54 |
55 | echo "Completed: $dir_name"
56 | echo ""
57 | done
58 |
59 | # Create backup manifest
60 | MANIFEST="${BACKUP_ROOT}/MANIFEST.txt"
61 | {
62 | echo "Cold HDD Backup Manifest"
63 | echo "========================"
64 | echo "Date: $BACKUP_DATE"
65 | echo "Host: $(hostname)"
66 | echo "Backup location: $BACKUP_ROOT"
67 | echo ""
68 | echo "Directories backed up:"
69 | for data_dir in "${CRITICAL_DATA[@]}"; do
70 | if [ -d "$data_dir" ]; then
71 | echo " - $data_dir ($(du -sh "$data_dir" | cut -f1))"
72 | fi
73 | done
74 | echo ""
75 | echo "Total backup size: $(du -sh "$BACKUP_ROOT" | cut -f1)"
76 | echo ""
77 | echo "Verification:"
78 | echo " To verify backups, check directory structure and file counts."
79 | echo " Example: find $BACKUP_ROOT -type f | wc -l"
80 | } > "$MANIFEST"
81 |
82 | echo "Backup completed successfully!"
83 | echo "Manifest saved to: $MANIFEST"
84 | echo ""
85 | echo "Backup location: $BACKUP_ROOT"
86 | echo "Total size: $(du -sh "$BACKUP_ROOT" | cut -f1)"
87 | echo ""
88 | echo "Next steps:"
89 | echo " 1. Verify backup integrity"
90 | echo " 2. Store HDD in offsite location"
91 | echo " 3. Update backup rotation schedule"
92 |
93 |
--------------------------------------------------------------------------------
/nixos/modules/applications/media/plex/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | app = "plex";
9 | image = "ghcr.io/home-operations/plex:1.42.2";
10 | user = "kah"; # string
11 | group = "kah"; # string
12 | port = 32400; # int
13 | cfg = config.mySystem.services.${app};
14 | appFolder = "/var/lib/${app}";
15 |
16 | ## persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
17 | in
18 | {
19 | options.mySystem.services.${app} = {
20 | enable = mkEnableOption "${app}";
21 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
22 | default = true;
23 | };
24 | openFirewall = mkEnableOption "Open firewall for ${app}" // {
25 | default = true;
26 | };
27 | };
28 |
29 | config = mkIf cfg.enable {
30 |
31 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
32 | lib.mkIf config.mySystem.system.impermanence.enable
33 | {
34 | directories = [
35 | {
36 | directory = appFolder;
37 | inherit user;
38 | inherit group;
39 | mode = "750";
40 | }
41 | ];
42 | };
43 |
44 | virtualisation.oci-containers.containers.${app} = {
45 | image = "${image}";
46 | user = "568:568";
47 | volumes = [
48 | "${appFolder}:/config:rw"
49 | "${config.mySystem.nasFolder}/natflix:/data:rw"
50 | "/zfs/backup/nixos/plex:/config/backup:rw" # TODO fix backup path with var.
51 | "/dev/dri:/dev/dri" # for hardware transcoding
52 | "/etc/localtime:/etc/localtime:ro"
53 | ];
54 | environment = {
55 | PLEX_ADVERTISE_URL = "https://10.8.20.42:32400,https://${app}.${config.mySystem.domain}:443"; # TODO var ip
56 | };
57 | ports = [ "${builtins.toString port}:${builtins.toString port}" ]; # expose port
58 | };
59 | networking.firewall = mkIf cfg.openFirewall {
60 |
61 | allowedTCPPorts = [ port ];
62 | allowedUDPPorts = [ port ];
63 | };
64 |
65 | services.nginx.virtualHosts."${app}.${config.networking.domain}" = {
66 | useACMEHost = config.networking.domain;
67 | forceSSL = true;
68 | locations."^~ /" = {
69 | proxyPass = "http://${app}:${builtins.toString port}";
70 | extraConfig = "resolver 10.88.0.1;";
71 |
72 | };
73 | };
74 |
75 | mySystem.services.gatus.monitors = [
76 | {
77 |
78 | name = app;
79 | group = "media";
80 | url = "https://${app}.${config.mySystem.domain}/web/";
81 | interval = "1m";
82 | conditions = [
83 | "[CONNECTED] == true"
84 | "[STATUS] == 200"
85 | "[RESPONSE_TIME] < 1500"
86 | ];
87 | }
88 | ];
89 |
90 | services.restic.backups = config.lib.mySystem.mkRestic {
91 | inherit app user;
92 | # excludePaths = [ "Backups" ];
93 | paths = [ appFolder ];
94 | inherit appFolder;
95 | };
96 |
97 | };
98 | }
99 |
--------------------------------------------------------------------------------
/nixos/modules/applications/media/recyclarr/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | cfg = config.mySystem.${category}.${app};
10 | app = "recyclarr";
11 | category = "services";
12 | description = "TRaSH guides sync";
13 | image = "ghcr.io/recyclarr/recyclarr:7.5.2@sha256:2550848d43a453f2c6adf3582f2198ac719f76670691d76de0819053103ef2fb";
14 | user = "kah"; # string
15 | group = "kah"; # string #int
16 | appFolder = "/var/lib/${app}";
17 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
18 | # recyclarrYaml = (pkgs.formats.yaml { }).generate "recyclarr.yml" (recyclarrNix);
19 | in
20 | {
21 | options.mySystem.${category}.${app} = {
22 | enable = mkEnableOption "${app}";
23 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
24 | default = true;
25 | };
26 | monitor = mkOption {
27 | type = lib.types.bool;
28 | description = "Enable gatus monitoring";
29 | default = true;
30 | };
31 | prometheus = mkOption {
32 | type = lib.types.bool;
33 | description = "Enable prometheus scraping";
34 | default = true;
35 | };
36 | addToDNS = mkOption {
37 | type = lib.types.bool;
38 | description = "Add to DNS list";
39 | default = true;
40 | };
41 | dev = mkOption {
42 | type = lib.types.bool;
43 | description = "Development instance";
44 | default = false;
45 | };
46 | backup = mkOption {
47 | type = lib.types.bool;
48 | description = "Enable backups";
49 | default = true;
50 | };
51 |
52 | };
53 |
54 | config = mkIf cfg.enable {
55 |
56 | ## Secrets
57 | ## env files MUST be in format
58 | ## VAR="derp"
59 | ## not VAR=derp
60 | sops.secrets."${category}/${app}/env" = {
61 | sopsFile = ./secrets.sops.yaml;
62 | owner = user;
63 | inherit group;
64 | restartUnits = [ "${app}.service" ];
65 | };
66 |
67 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
68 | lib.mkIf config.mySystem.system.impermanence.enable
69 | {
70 | directories = [
71 | {
72 | directory = appFolder;
73 | inherit user group;
74 | mode = "755";
75 | }
76 | ];
77 | };
78 |
79 | systemd.services."recyclarr" =
80 |
81 | {
82 | script = ''
83 | ${pkgs.podman}/bin/podman run --rm \
84 | --user 568:568 \
85 | -v ${config.sops.secrets."${category}/${app}/env".path}:/config/recyclarr.yml:ro \
86 | -v ${appFolder}:/data:rw \
87 | ${image} \
88 | sync \
89 | -c /config/recyclarr.yml \
90 | --app-data /data \
91 | -d
92 | '';
93 | path = [ pkgs.podman ];
94 | requires = [
95 | "sonarr.service"
96 | "radarr.service"
97 | ];
98 | startAt = "daily";
99 |
100 | };
101 |
102 | };
103 | }
104 |
--------------------------------------------------------------------------------
/nixos/modules/applications/infrastructure/postgresql/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | cfg = config.mySystem.${category}.${app};
10 | app = "postgresql";
11 | category = "services";
12 | description = "Postgres RDMS";
13 | appFolder = config.services.postgresql.dataDir;
14 | in
15 | {
16 | options.mySystem.${category}.${app} = {
17 | enable = mkEnableOption "${app}";
18 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
19 | default = true;
20 | };
21 | prometheus = mkOption {
22 | type = lib.types.bool;
23 | description = "Enable prometheus scraping";
24 | default = true;
25 | };
26 | backup = mkOption {
27 | type = lib.types.bool;
28 | description = "Enable backups";
29 | default = true;
30 | };
31 |
32 | };
33 |
34 | config = mkIf cfg.enable {
35 |
36 | ## Secrets
37 | # sops.secrets."${category}/${app}/env" = {
38 | # sopsFile = ./secrets.sops.yaml;
39 | # owner = user;
40 | # group = group;
41 | # restartUnits = [ "${app}.service" ];
42 | # };
43 |
44 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
45 | lib.mkIf config.mySystem.system.impermanence.enable
46 | {
47 | directories = [
48 | {
49 | directory = appFolder;
50 | user = "postgres";
51 | group = "postgres";
52 | mode = "750";
53 | }
54 | ];
55 | };
56 |
57 | services.postgresql = {
58 | enable = true;
59 | identMap = ''
60 | # ArbitraryMapName systemUser DBUser
61 | superuser_map root postgres
62 | superuser_map postgres postgres
63 | # Let other names login as themselves
64 | superuser_map /^(.*)$ \1
65 | superuser_map root rxresume
66 | '';
67 | authentication = ''
68 |
69 |
70 |
71 | #type database DBuser auth-method optional_ident_map
72 | local all postgres peer map=superuser_map
73 | local sameuser all peer map=superuser_map
74 | local rxresume root peer
75 | '';
76 | settings = {
77 | random_page_cost = 1.1;
78 | shared_buffers = "6GB";
79 | };
80 | };
81 |
82 | # services.restic.backups = mkIf cfg.backup (
83 | # config.lib.mySystem.mkRestic {
84 | # inherit app;
85 | # user = "postgres";
86 | # paths = [ appFolder ];
87 | # inherit appFolder;
88 | # }
89 | # );
90 |
91 | systemd.services.restic-backups-postgresql-local.serviceConfig.ExecStart = mkForce (
92 | pkgs.writeShellScript "restic-backups-postgresql-local-ExecStart" ''
93 | set -o pipefail
94 | ${config.services.postgresql.package}/bin/pg_dumpall -U postgres \
95 | | ${pkgs.restic}/bin/restic backup --stdin --stdin-filename postgres.sql
96 | ''
97 | );
98 |
99 | };
100 | }
101 |
--------------------------------------------------------------------------------
/nixos/modules/applications/misc/home-assistant/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | app = "home-assistant";
9 | image = "ghcr.io/home-operations/home-assistant:2025.12.4";
10 | user = "kah"; # string
11 | group = "kah"; # string
12 | port = 8123; # int
13 | cfg = config.mySystem.services.${app};
14 | appFolder = "/var/lib/${app}";
15 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
16 | in
17 | {
18 | options.mySystem.services.${app} = {
19 | enable = mkEnableOption "${app}";
20 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
21 | default = true;
22 | };
23 | };
24 |
25 | config = mkIf cfg.enable {
26 | # ensure folder exist and has correct owner/group
27 | systemd.tmpfiles.rules = [
28 | "d ${appFolder} 0750 ${user} ${group} -" # The - disables automatic cleanup, so the file wont be removed after a period
29 | ];
30 |
31 | sops.secrets."services/${app}/env" = {
32 |
33 | # configure secret for forwarding rules
34 | sopsFile = ./secrets.sops.yaml;
35 | owner = config.users.users.kah.name;
36 | inherit (config.users.users.kah) group;
37 | restartUnits = [ "podman-${app}.service" ];
38 | };
39 |
40 | virtualisation.oci-containers.containers.${app} = {
41 | image = "${image}";
42 | user = "568:568";
43 | environment = {
44 | HASS_IP = "10.8.20.42";
45 | };
46 | environmentFiles = [ config.sops.secrets."services/${app}/env".path ];
47 | volumes = [
48 | "${appFolder}:/config:rw"
49 | "/etc/localtime:/etc/localtime:ro"
50 | ];
51 | };
52 |
53 | services.nginx.virtualHosts."${app}.${config.networking.domain}" = {
54 | useACMEHost = config.networking.domain;
55 | forceSSL = true;
56 | locations."^~ /" = {
57 | proxyPass = "http://${app}:${builtins.toString port}";
58 | proxyWebsockets = true;
59 | extraConfig = "resolver 10.88.0.1;";
60 | };
61 | };
62 |
63 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
64 | lib.mkIf config.mySystem.system.impermanence.enable
65 | {
66 | directories = [
67 | {
68 | directory = appFolder;
69 | user = "kah";
70 | group = "kah";
71 | mode = "750";
72 | }
73 | ];
74 | };
75 |
76 | mySystem.services.gatus.monitors = [
77 | {
78 |
79 | name = app;
80 | group = "media";
81 | url = "https://${app}.${config.mySystem.domain}";
82 | interval = "1m";
83 | conditions = [
84 | "[CONNECTED] == true"
85 | "[STATUS] == 200"
86 | "[RESPONSE_TIME] < 1500"
87 | ];
88 | }
89 | ];
90 |
91 | services.restic.backups = config.lib.mySystem.mkRestic {
92 | inherit app;
93 | user = builtins.toString user;
94 | excludePaths = [ "Backups" ];
95 | paths = [ appFolder ];
96 | inherit appFolder;
97 | };
98 | };
99 | }
100 |
--------------------------------------------------------------------------------
/nixos/modules/applications/infrastructure/nginx/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.services.nginx;
9 | in
10 | {
11 | options.mySystem.services.nginx.enable = mkEnableOption "nginx";
12 |
13 | config = mkIf cfg.enable {
14 |
15 | services.nginx = {
16 | enable = true;
17 |
18 | recommendedGzipSettings = true;
19 | recommendedOptimisation = true;
20 | recommendedProxySettings = true;
21 | recommendedTlsSettings = true;
22 | recommendedBrotliSettings = true;
23 |
24 | proxyResolveWhileRunning = true; # needed to ensure nginx loads even if it cant resolve vhosts
25 |
26 | statusPage = true;
27 | enableReload = true;
28 |
29 | # Only allow PFS-enabled ciphers with AES256
30 | sslCiphers = "AES256+EECDH:AES256+EDH:!aNULL";
31 |
32 | # Enhanced logging for better log analysis
33 | commonHttpConfig = ''
34 | # JSON-formatted access logs for better parsing
35 | log_format json_combined escape=json
36 | '{'
37 | '"time_local":"$time_local",'
38 | '"remote_addr":"$remote_addr",'
39 | '"method":"$request_method",'
40 | '"request_uri":"$request_uri",'
41 | '"status":$status,'
42 | '"body_bytes_sent":$body_bytes_sent,'
43 | '"request_time":$request_time,'
44 | '"http_referrer":"$http_referer",'
45 | '"http_user_agent":"$http_user_agent",'
46 | '"host":"$host",'
47 | '"upstream_addr":"$upstream_addr",'
48 | '"upstream_response_time":"$upstream_response_time"'
49 | '}';
50 |
51 | # Use JSON format for access logs
52 | access_log /var/log/nginx/access.log json_combined;
53 |
54 | # Enhanced error logging
55 | error_log /var/log/nginx/error.log warn;
56 | '';
57 |
58 | # appendHttpConfig = ''
59 | # # Minimize information leaked to other domains
60 | # add_header 'Referrer-Policy' 'origin-when-cross-origin';
61 |
62 | # # Disable embedding as a frame
63 | # add_header X-Frame-Options SAMEORIGIN always;
64 |
65 | # # Prevent injection of code in other mime types (XSS Attacks)
66 | # add_header X-Content-Type-Options nosniff;
67 |
68 | # '';
69 | # # TODO add cloudflre IP's when/if I ingest internally.
70 | # commonHttpConfig = ''
71 | # add_header X-Clacks-Overhead "GNU Terry Pratchett";
72 | # '';
73 | # provide default host with returning error
74 | # else nginx returns the first server
75 | # in the config file... >:S
76 | virtualHosts = {
77 | "_" = {
78 | default = true;
79 | forceSSL = true;
80 | useACMEHost = config.networking.domain;
81 | extraConfig = "return 444;";
82 | };
83 | };
84 |
85 | };
86 |
87 | networking.firewall = {
88 |
89 | allowedTCPPorts = [
90 | 80
91 | 443
92 | ];
93 | allowedUDPPorts = [
94 | 80
95 | 443
96 | ];
97 | };
98 |
99 | # required for using acme certs
100 | users.users.nginx.extraGroups = [ "acme" ];
101 |
102 | };
103 | }
104 |
--------------------------------------------------------------------------------
/nixos/modules/applications/monitoring/hs110-exporter/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.${category}.${app};
9 | app = "hs110-exporter";
10 | category = "services";
11 | description = "Prometheus exporter for hs110 smartplugs";
12 | image = "docker.io/sdelrio/hs110-exporter:v1.0.0";
13 | user = "kah"; # string
14 | group = "kah"; # string
15 | port = 8110; # int
16 | appFolder = "/var/lib/${app}";
17 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
18 | in
19 | {
20 | options.mySystem.${category}.${app} = {
21 | enable = mkEnableOption "${app}";
22 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
23 | default = true;
24 | };
25 | monitor = mkOption {
26 | type = lib.types.bool;
27 | description = "Enable gatus monitoring";
28 | default = true;
29 | };
30 | prometheus = mkOption {
31 | type = lib.types.bool;
32 | description = "Enable prometheus scraping";
33 | default = true;
34 | };
35 | addToDNS = mkOption {
36 | type = lib.types.bool;
37 | description = "Add to DNS list";
38 | default = true;
39 | };
40 | dev = mkOption {
41 | type = lib.types.bool;
42 | description = "Development instance";
43 | default = false;
44 | };
45 | backup = mkOption {
46 | type = lib.types.bool;
47 | description = "Enable backups";
48 | default = true;
49 | };
50 |
51 | };
52 |
53 | config = mkIf cfg.enable {
54 |
55 | ## Secrets
56 | # sops.secrets."${category}/${app}/env" = {
57 | # sopsFile = ./secrets.sops.yaml;
58 | # owner = user;
59 | # group = group;
60 | # restartUnits = [ "${app}.service" ];
61 | # };
62 |
63 | users.users.truxnell.extraGroups = [ group ];
64 |
65 | # Folder perms - only for containers
66 | # systemd.tmpfiles.rules = [
67 | # "d ${appFolder}/ 0750 ${user} ${group} -"
68 | # ];
69 |
70 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
71 | lib.mkIf config.mySystem.system.impermanence.enable
72 | {
73 | directories = [
74 | {
75 | directory = appFolder;
76 | inherit user;
77 | inherit group;
78 | mode = "750";
79 | }
80 | ];
81 | };
82 |
83 | virtualisation.oci-containers.containers."${app}-rack" = {
84 | inherit image;
85 | ports = [ "${builtins.toString port}:${builtins.toString port}" ];
86 | environment = {
87 | HS110IP = "10.8.30.131";
88 | FREQUENCY = "60";
89 | LISTENPORT = "8110";
90 | LABEL = "location=rack";
91 | };
92 | };
93 |
94 | services.vmagent = {
95 | prometheusConfig = {
96 | scrape_configs = [
97 | {
98 | job_name = "${app}-rack";
99 | # scrape_timeout = "40s";
100 | static_configs = [
101 | {
102 | targets = [ "http://127.0.0.1:${builtins.toString port}" ];
103 | }
104 | ];
105 | }
106 | ];
107 | };
108 | };
109 |
110 | };
111 | }
112 |
--------------------------------------------------------------------------------
/nixos/modules/applications/search/searxng/lower_domains:
--------------------------------------------------------------------------------
1 | huffingtonpost.gr
2 | hk.appledaily.com
3 | parliament.uk
4 | islamqa.info
5 | buzzfeed.com
6 | lifewire.com
7 | thespruce.com
8 | hopenothate.org.uk
9 | timesofindia.com
10 | ballotpedia.org
11 | geonames.nga.mil
12 | giantbomb.com
13 | cato.org
14 | pride.com
15 | destructoid.com
16 | alexa.com
17 | profightdb.com
18 | boingboing.net
19 | scienceblogs.com
20 | offworld.com
21 | mediaite.com
22 | xbox.com
23 | britannica.com
24 | dotdash.com
25 | totalwrestlingmagazine.co.uk
26 | mirror.co.uk
27 | uproxx.com
28 | memri.org
29 | huffpostbrasil.com
30 | allmusic.com
31 | newsweek.com
32 | refinery29.com
33 | huffingtonpost.kr
34 | thegreenpapers.com
35 | thedailybeast.com
36 | usmagazine.com
37 | bloomberg.com
38 | huffingtonpost.in
39 | cripsygamer.com
40 | theguardian.com
41 | quackwatch.org
42 | foxnews.com
43 | insidepulse.com
44 | huffingtonpost.de
45 | caribbean-hotels.org
46 | wwenetworknews.com
47 | tripsavvy.com
48 | morningstaronline.co.uk
49 | xbiz.com
50 | entrepreneur.com
51 | maps.google.com
52 | allsides.com
53 | aninews.in
54 | about.com
55 | cagematch.net
56 | skynews.com.au
57 | verywellmind.com
58 | washingtonexaminer.com
59 | hansard.millbanksystems.com
60 | bleacherreport.com
61 | huffingtonpost.fr
62 | thinkprogress.org
63 | bustle.com
64 | hansard.parliament.uk
65 | huffingtonpost.jp
66 | cosmopolitan.com
67 | thejimquisition.com
68 | xinhuanet.com
69 | huffingtonpost.com
70 | humanevents.com
71 | thoughtco.com
72 | escapistmagazine.com
73 | mlg.com
74 | techcrunch.com
75 | themarysue.com
76 | vgmonline.net
77 | vicetv.com
78 | democracynow.org
79 | skepdic.com
80 | insider.com
81 | wrestling-titles.com
82 | solie.org
83 | arabnews.com
84 | heavy.com
85 | thebalance.com
86 | wrestlingworldnews.com
87 | sixthtone.com
88 | mtv.com
89 | huffingtonpost.ca
90 | vice.com
91 | wrestlingperspective.com
92 | aspi.org.au
93 | askmen.com
94 | theamericanconservative.com
95 | jezebel.com
96 | sbs.com.au
97 | realclearpolitics.com
98 | spectator.co.uk
99 | huffingtonpost.es
100 | townhall.com
101 | sega-16.com
102 | prowrestlinghistory.com
103 | verywell.com
104 | thenextweb.com
105 | verywellhealth.com
106 | genius.com
107 | bitmob.com
108 | sherdog.com
109 | salon.com
110 | huffingtonpost.com.mx
111 | metalsucks.net
112 | mdpi.com
113 | trtworld.com
114 | twingalaxies.com
115 | ijr.com
116 | wsws.org
117 | genickbruch.com
118 | dailydot.com
119 | rian.ru
120 | screenrant.com
121 | straitstimes.com
122 | cliffsnotes.com
123 | api.parliament.uk
124 | dailynk.com
125 | easyallies.com
126 | standard.co.uk
127 | forbes.com
128 | socaluncensored.com
129 | aa.com.tr
130 | chinadaily.com.cn
131 | verywellfamily.com
132 | huffingtonpost.co.uk
133 | washingtontimes.com
134 | guinnessworldrecords.com
135 | investopedia.com
136 | nationalreview.com
137 | fair.org
138 | mediamatters.org
139 | wrestlingdata.com
140 | oowrestling.com
141 | tmz.com
142 | huffpost.com
143 | mashable.com
144 | sparknotes.com
145 | huffpostmaghreb.com
146 | huffingtonpost.it
147 | cepr.net
148 | 411mania.com
149 | 1wrestling.com
150 | huffingtonpost.com.au
151 | theneedledrop.com
152 | bodyslam.net
153 | jayisgames.com
154 | steamspy.com
155 | nzpwi.co.nz
156 | biography.com
157 | mondoweiss.net
158 | puretuber.com
159 | aptoide.com
160 | youtubevanced.com
161 |
--------------------------------------------------------------------------------
/nixos/modules/applications/productivity/rss-bridge/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.${category}.${app};
9 | app = "rss-bridge";
10 | category = "services";
11 | description = "rss feed for sites without";
12 | # image = "%{image}";
13 | inherit (config.services.rss-bridge) user; # string
14 | inherit (config.services.rss-bridge) group; # string #int
15 | appFolder = "/var/lib/${app}";
16 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
17 | host = "${app}" + (if cfg.dev then "-dev" else "");
18 | url = "${host}.${config.networking.domain}";
19 | in
20 | {
21 | options.mySystem.${category}.${app} = {
22 | enable = mkEnableOption "${app}";
23 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
24 | default = true;
25 | };
26 | monitor = mkOption {
27 | type = lib.types.bool;
28 | description = "Enable gatus monitoring";
29 | default = true;
30 | };
31 | prometheus = mkOption {
32 | type = lib.types.bool;
33 | description = "Enable prometheus scraping";
34 | default = true;
35 | };
36 | addToDNS = mkOption {
37 | type = lib.types.bool;
38 | description = "Add to DNS list";
39 | default = true;
40 | };
41 | dev = mkOption {
42 | type = lib.types.bool;
43 | description = "Development instance";
44 | default = false;
45 | };
46 | backup = mkOption {
47 | type = lib.types.bool;
48 | description = "Enable backups";
49 | default = true;
50 | };
51 |
52 | };
53 |
54 | config = mkIf cfg.enable {
55 |
56 | ## Secrets
57 | # sops.secrets."${category}/${app}/env" = {
58 | # sopsFile = ./secrets.sops.yaml;
59 | # owner = user;
60 | # group = group;
61 | # restartUnits = [ "${app}.service" ];
62 | # };
63 |
64 | users.users.truxnell.extraGroups = [ group ];
65 |
66 | ## service
67 | services.rss-bridge = {
68 | enable = true;
69 | config.system.enabled_bridges = [ "*" ];
70 | virtualHost = "${url}";
71 | };
72 |
73 | ### gatus integration
74 | mySystem.services.gatus.monitors = mkIf cfg.monitor [
75 | {
76 | name = app;
77 | group = "${category}";
78 | url = "https://${url}/static/logo.svg";
79 | interval = "1m";
80 | conditions = [
81 | "[CONNECTED] == true"
82 | "[STATUS] == 200"
83 | "[RESPONSE_TIME] < 100"
84 | ];
85 | }
86 | ];
87 |
88 | ### Ingress
89 | services.nginx.virtualHosts.${url} = {
90 | forceSSL = true;
91 | useACMEHost = config.networking.domain;
92 | };
93 |
94 | ### firewall config
95 |
96 | # networking.firewall = mkIf cfg.openFirewall {
97 | # allowedTCPPorts = [ port ];
98 | # allowedUDPPorts = [ port ];
99 | # };
100 |
101 | ### backups
102 | warnings = [
103 | (mkIf (
104 | !cfg.backup && config.mySystem.purpose != "Development"
105 | ) "WARNING: Backups for ${app} are disabled!")
106 | ];
107 |
108 | services.restic.backups = mkIf cfg.backup (
109 | config.lib.mySystem.mkRestic {
110 | inherit app user;
111 | paths = [ appFolder ];
112 | inherit appFolder;
113 | }
114 | );
115 |
116 | # services.postgresqlBackup = {
117 | # databases = [ app ];
118 | # };
119 |
120 | };
121 | }
122 |
--------------------------------------------------------------------------------
/nixos/modules/applications/monitoring/unpoller/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.${category}.${app};
9 | app = "unpoller";
10 | category = "services";
11 | description = "";
12 | user = "unpoller-exporter"; # string
13 | group = "unpoller-exporter"; # string
14 | port = 9130; # int
15 | appFolder = "/var/lib/${app}";
16 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
17 | host = "${app}" + (if cfg.dev then "-dev" else "");
18 | url = "${host}.${config.networking.domain}";
19 | in
20 | {
21 | options.mySystem.${category}.${app} = {
22 | enable = mkEnableOption "${app}";
23 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
24 | default = true;
25 | };
26 | monitor = mkOption {
27 | type = lib.types.bool;
28 | description = "Enable gatus monitoring";
29 | default = true;
30 | };
31 | prometheus = mkOption {
32 | type = lib.types.bool;
33 | description = "Enable prometheus scraping";
34 | default = true;
35 | };
36 | addToDNS = mkOption {
37 | type = lib.types.bool;
38 | description = "Add to DNS list";
39 | default = true;
40 | };
41 | dev = mkOption {
42 | type = lib.types.bool;
43 | description = "Development instance";
44 | default = false;
45 | };
46 | backup = mkOption {
47 | type = lib.types.bool;
48 | description = "Enable backups";
49 | default = true;
50 | };
51 |
52 | };
53 |
54 | config = mkIf cfg.enable {
55 |
56 | ## Secrets
57 | sops.secrets."${category}/${app}/pass" = {
58 | sopsFile = ./secrets.sops.yaml;
59 | owner = user;
60 | inherit group;
61 | restartUnits = [ "${app}.service" ];
62 | };
63 |
64 | users.users.truxnell.extraGroups = [ group ];
65 |
66 | # Folder perms - only for containers
67 | # systemd.tmpfiles.rules = [
68 | # "d ${appFolder}/ 0750 ${user} ${group} -"
69 | # ];
70 |
71 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
72 | lib.mkIf config.mySystem.system.impermanence.enable
73 | {
74 | directories = [
75 | {
76 | directory = appFolder;
77 | inherit user;
78 | inherit group;
79 | mode = "750";
80 | }
81 | ];
82 | };
83 |
84 | ## service
85 | services.prometheus.exporters.unpoller = {
86 | enable = true;
87 | controllers = [
88 | {
89 | url = "https://10.8.10.1";
90 | verify_ssl = false;
91 | user = "unifi_read_only";
92 | pass = config.sops.secrets."${category}/${app}/pass".path;
93 | save_ids = true;
94 | save_events = true;
95 | save_alarms = true;
96 | save_anomalies = true;
97 | # save_dpi = true;
98 | # save_sites=true;
99 |
100 | }
101 | ];
102 | };
103 |
104 | services.vmagent = {
105 | prometheusConfig = {
106 | scrape_configs = [
107 | {
108 | job_name = "unpoller";
109 | # scrape_timeout = "40s";
110 | static_configs = [
111 | {
112 | targets = [ "http://127.0.0.1:${builtins.toString port}" ];
113 | }
114 | ];
115 | }
116 | ];
117 | };
118 | };
119 |
120 | };
121 | }
122 |
--------------------------------------------------------------------------------
/nixos/pkgs/snapraid-btrfs-runner.nix:
--------------------------------------------------------------------------------
1 | {
2 | symlinkJoin,
3 | fetchFromGitHub,
4 | writeScriptBin,
5 | writeTextFile,
6 | makeWrapper,
7 | python311,
8 | snapraid,
9 | snapraid-btrfs,
10 | snapper,
11 | }:
12 | let
13 | name = "snapraid-btrfs-runner";
14 | deps = [
15 | python311
16 | config
17 | snapraid
18 | snapraid-btrfs
19 | snapper
20 | ];
21 | src = fetchFromGitHub {
22 | owner = "fmoledina";
23 | repo = "snapraid-btrfs-runner";
24 | rev = "afb83c67c61fdf3769aab95dba6385184066e119";
25 | sha256 = "M8LXxsc7jEn5GsiXAKykmFUgsij2aOIenw1Dx+/5Rww=";
26 | };
27 | config = writeTextFile {
28 | name = "snapraid-btrfs-runner.conf";
29 | text = ''
30 | [snapraid-btrfs]
31 | ; path to the snapraid-btrfs executable (e.g. /usr/bin/snapraid-btrfs)
32 | executable = ${snapraid-btrfs}/bin/snapraid-btrfs
33 | ; optional: specify snapper-configs and/or snapper-configs-file as specified in snapraid-btrfs
34 | ; only one instance of each can be specified in this config
35 | snapper-configs =
36 | snapper-configs-file =
37 | ; specify whether snapraid-btrfs should run the pool command after the sync, and optionally specify pool-dir
38 | pool = false
39 | pool-dir =
40 | ; specify whether snapraid-btrfs-runner should automatically clean up all but the last snapraid-btrfs sync snapshot after a successful sync
41 | cleanup = true
42 |
43 | [snapper]
44 | ; path to snapper executable (e.g. /usr/bin/snapper)
45 | executable = ${snapper}/bin/snapper
46 |
47 | [snapraid]
48 | ; path to the snapraid executable (e.g. /usr/bin/snapraid)
49 | executable = ${snapraid}/bin/snapraid
50 | ; path to the snapraid config to be used
51 | config = /etc/snapraid.conf
52 | ; abort operation if there are more deletes than this, set to -1 to disable
53 | deletethreshold = -1
54 | ; if you want touch to be ran each time
55 | touch = false
56 |
57 | [logging]
58 | ; logfile to write to, leave empty to disable
59 | file =
60 | ; maximum logfile size in KiB, leave empty for infinite
61 | maxsize = 5000
62 |
63 | [email]
64 | ; when to send an email, comma-separated list of [success, error]
65 | sendon = success,error
66 | ; set to false to get full programm output via email
67 | short = true
68 | subject = [SnapRAID] Status Report:
69 | from =
70 | to =
71 | ; maximum email size in KiB
72 | maxsize = 500
73 |
74 | [smtp]
75 | host =
76 | ; leave empty for default port
77 | port =
78 | ; set to "true" to activate
79 | ssl = false
80 | tls = false
81 | user =
82 | password =
83 |
84 | [scrub]
85 | ; set to true to run scrub after sync
86 | enabled = false
87 | ; plan can be 0-100 percent, new, bad, or full
88 | plan = 12
89 | ; only used for percent scrub plan
90 | older-than = 10
91 | '';
92 | destination = "/etc/${name}";
93 | };
94 | script =
95 | (writeScriptBin name (builtins.readFile (src + "/snapraid-btrfs-runner.py"))).overrideAttrs
96 | (old: {
97 | buildCommand = "${old.buildCommand}\n patchShebangs $out";
98 | });
99 | in
100 | symlinkJoin {
101 | inherit name;
102 | paths = [ script ] ++ deps;
103 | buildInputs = [
104 | makeWrapper
105 | python311
106 | ];
107 | postBuild = "wrapProgram $out/bin/${name} --add-flags '-c ${config}/etc/snapraid-btrfs-runner' --set PATH $out/bin";
108 | }
109 |
--------------------------------------------------------------------------------
/nixos/modules/applications/media/readarr/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.${category}.${app};
9 | app = "readarr";
10 | category = "services";
11 | description = "Book managment";
12 | # image = "";
13 | user = "kah"; # string
14 | group = "kah"; # string
15 | port = 8787; # int
16 | appFolder = "/var/lib/${app}";
17 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
18 | host = "${app}" + (if cfg.dev then "-dev" else "");
19 | url = "${host}.${config.networking.domain}";
20 | in
21 | {
22 | options.mySystem.${category}.${app} = {
23 | enable = mkEnableOption "${app}";
24 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
25 | default = true;
26 | };
27 | monitor = mkOption {
28 | type = lib.types.bool;
29 | description = "Enable gatus monitoring";
30 | default = true;
31 | };
32 | prometheus = mkOption {
33 | type = lib.types.bool;
34 | description = "Enable prometheus scraping";
35 | default = true;
36 | };
37 | addToDNS = mkOption {
38 | type = lib.types.bool;
39 | description = "Add to DNS list";
40 | default = true;
41 | };
42 | dev = mkOption {
43 | type = lib.types.bool;
44 | description = "Development instance";
45 | default = false;
46 | };
47 | backup = mkOption {
48 | type = lib.types.bool;
49 | description = "Enable backups";
50 | default = true;
51 | };
52 |
53 | };
54 |
55 | config = mkIf cfg.enable {
56 |
57 | ## Secrets
58 | sops.secrets."${category}/${app}/env" = {
59 | sopsFile = ./secrets.sops.yaml;
60 | owner = user;
61 | group = "kah";
62 | restartUnits = [ "${app}.service" ];
63 | };
64 |
65 | users.users.truxnell.extraGroups = [ group ];
66 |
67 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
68 | lib.mkIf config.mySystem.system.impermanence.enable
69 | {
70 | directories = [
71 | {
72 | directory = appFolder;
73 | user = "kah";
74 | group = "kah";
75 | mode = "750";
76 | }
77 | ];
78 | };
79 |
80 | ## service
81 | services.readarr = {
82 | enable = true;
83 | dataDir = appFolder;
84 | inherit user group;
85 | };
86 |
87 | ### gatus integration
88 | mySystem.services.gatus.monitors = mkIf cfg.monitor [
89 | {
90 | name = app;
91 | group = "${category}";
92 | url = "https://${url}";
93 | interval = "1m";
94 | conditions = [
95 | "[CONNECTED] == true"
96 | "[STATUS] == 200"
97 | "[RESPONSE_TIME] < 1500"
98 | ];
99 | }
100 | ];
101 |
102 | ### Ingress
103 | services.nginx.virtualHosts.${url} = {
104 | forceSSL = true;
105 | useACMEHost = config.networking.domain;
106 | locations."^~ /" = {
107 | proxyPass = "http://127.0.0.1:${builtins.toString port}";
108 | proxyWebsockets = true;
109 | };
110 | };
111 |
112 | ### backups
113 | warnings = [
114 | (mkIf (
115 | !cfg.backup && config.mySystem.purpose != "Development"
116 | ) "WARNING: Backups for ${app} are disabled!")
117 | ];
118 |
119 | services.restic.backups = mkIf cfg.backup (
120 | config.lib.mySystem.mkRestic {
121 | inherit app user;
122 | paths = [ appFolder ];
123 | inherit appFolder;
124 | }
125 | );
126 |
127 | };
128 | }
129 |
--------------------------------------------------------------------------------
/nixos/modules/applications/monitoring/mcp-grafana/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.services.mcp-grafana;
9 | app = "mcp-grafana";
10 | category = "services";
11 | description = "MCP server for Grafana";
12 | image = "docker.io/grafana/mcp-grafana";
13 | user = "kah";
14 | group = "kah";
15 | port = 9092; # Default port for SSE/HTTP transport
16 | appFolder = "/var/lib/${app}";
17 |
18 | # Get Loki URL from Loki configuration
19 | lokiUrl = "https://loki.${config.networking.domain}";
20 |
21 | # Get Grafana URL
22 | grafanaUrl = "https://grafana.${config.networking.domain}";
23 | in
24 | {
25 | options.mySystem.services.${app} = {
26 | enable = mkEnableOption "${app}";
27 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
28 | default = false; # MCP server doesn't have a web UI
29 | };
30 | monitor = mkOption {
31 | type = lib.types.bool;
32 | description = "Enable gatus monitoring";
33 | default = false; # MCP server doesn't expose HTTP endpoints
34 | };
35 | prometheus = mkOption {
36 | type = lib.types.bool;
37 | description = "Enable prometheus scraping";
38 | default = false;
39 | };
40 | backup = mkOption {
41 | type = lib.types.bool;
42 | description = "Enable backups";
43 | default = false; # MCP server is stateless
44 | };
45 | };
46 |
47 | config = mkIf cfg.enable {
48 | ## Secrets
49 | sops.secrets."${category}/${app}/env" = {
50 | sopsFile = ./secrets.sops.yaml;
51 | owner = config.users.users.kah.name;
52 | inherit (config.users.users.kah) group;
53 | restartUnits = [ "podman-${app}.service" ];
54 | };
55 |
56 | users.users.truxnell.extraGroups = [ group ];
57 |
58 | # Folder perms - only for containers
59 | # systemd.tmpfiles.rules = [
60 | # "d ${appFolder}/ 0750 ${user} ${group} -"
61 | # ];
62 |
63 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
64 | lib.mkIf config.mySystem.system.impermanence.enable
65 | {
66 | directories = [
67 | {
68 | directory = appFolder;
69 | inherit user;
70 | inherit group;
71 | mode = "750";
72 | }
73 | ];
74 | };
75 |
76 | ## Container Configuration
77 | virtualisation.oci-containers.containers = config.lib.mySystem.mkContainer {
78 | inherit app image;
79 | user = "568";
80 | group = "568";
81 | env = {
82 | GRAFANA_URL = grafanaUrl;
83 | # GRAFANA_SERVICE_ACCOUNT_TOKEN will come from secrets file
84 | # LOKI_URL is optional but can be used if needed
85 | LOKI_URL = lokiUrl;
86 | };
87 | envFiles = [ config.sops.secrets."${category}/${app}/env".path ];
88 | # Use SSE transport for systemd-managed service
89 | # MCP clients can connect via SSE or use stdio by spawning the container directly
90 | cmd = [ "-t" "sse" "-address" ":${builtins.toString port}" ];
91 | volumes = [ ];
92 | ports = [ "${builtins.toString port}:${builtins.toString port}" ];
93 | };
94 |
95 | ### backups
96 | warnings = [
97 | (mkIf (
98 | !cfg.backup && config.mySystem.purpose != "Development"
99 | ) "WARNING: Backups for ${app} are disabled!")
100 | ];
101 |
102 | services.restic.backups = mkIf cfg.backup (
103 | config.lib.mySystem.mkRestic {
104 | inherit app user;
105 | paths = [ appFolder ];
106 | inherit appFolder;
107 | }
108 | );
109 | };
110 | }
111 |
112 |
--------------------------------------------------------------------------------
/nixos/modules/applications/media/prowlarr/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | cfg = config.mySystem.${category}.${app};
10 | app = "prowlarr";
11 | category = "services";
12 | description = "Content searcher";
13 | # image = "";
14 | user = app; # string
15 | group = app; # string
16 | port = 9696; # int
17 | appFolder = "/var/lib/private/${app}";
18 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
19 | host = "${app}" + (if cfg.dev then "-dev" else "");
20 | url = "${host}.${config.networking.domain}";
21 | in
22 | {
23 | options.mySystem.${category}.${app} = {
24 | enable = mkEnableOption "${app}";
25 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
26 | default = true;
27 | };
28 | monitor = mkOption {
29 | type = lib.types.bool;
30 | description = "Enable gatus monitoring";
31 | default = true;
32 | };
33 | prometheus = mkOption {
34 | type = lib.types.bool;
35 | description = "Enable prometheus scraping";
36 | default = true;
37 | };
38 | addToDNS = mkOption {
39 | type = lib.types.bool;
40 | description = "Add to DNS list";
41 | default = true;
42 | };
43 | dev = mkOption {
44 | type = lib.types.bool;
45 | description = "Development instance";
46 | default = false;
47 | };
48 | backup = mkOption {
49 | type = lib.types.bool;
50 | description = "Enable backups";
51 | default = true;
52 | };
53 |
54 | };
55 |
56 | config = mkIf cfg.enable {
57 |
58 | ## Secrets
59 | # sops.secrets."${category}/${app}/env" = {
60 | # sopsFile = ./secrets.sops.yaml;
61 | # owner = user;
62 | # group = group;
63 | # restartUnits = [ "${app}.service" ];
64 | # };
65 |
66 | users.users.truxnell.extraGroups = [ group ];
67 |
68 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
69 | lib.mkIf config.mySystem.system.impermanence.enable
70 | {
71 | directories = [ { directory = appFolder; } ];
72 | };
73 |
74 | ## service
75 | services.prowlarr = {
76 | enable = true;
77 | package = pkgs.prowlarr;
78 | };
79 |
80 | ### gatus integration
81 | mySystem.services.gatus.monitors = mkIf cfg.monitor [
82 | {
83 | name = app;
84 | group = "${category}";
85 | url = "https://${url}";
86 | interval = "1m";
87 | conditions = [
88 | "[CONNECTED] == true"
89 | "[STATUS] == 200"
90 | "[RESPONSE_TIME] < 1500"
91 | ];
92 | }
93 | ];
94 |
95 | ### Ingress
96 | services.nginx.virtualHosts.${url} = {
97 | forceSSL = true;
98 | useACMEHost = config.networking.domain;
99 | locations."^~ /" = {
100 | proxyPass = "http://127.0.0.1:${builtins.toString port}";
101 | };
102 | };
103 |
104 | ### firewall config
105 |
106 | # networking.firewall = mkIf cfg.openFirewall {
107 | # allowedTCPPorts = [ port ];
108 | # allowedUDPPorts = [ port ];
109 | # };
110 |
111 | ### backups
112 | warnings = [
113 | (mkIf (
114 | !cfg.backup && config.mySystem.purpose != "Development"
115 | ) "WARNING: Backups for ${app} are disabled!")
116 | ];
117 |
118 | services.restic.backups = mkIf cfg.backup (
119 | config.lib.mySystem.mkRestic {
120 | inherit app user;
121 | paths = [ appFolder ];
122 | inherit appFolder;
123 | }
124 | );
125 |
126 | # services.postgresqlBackup = {
127 | # databases = [ app ];
128 | # };
129 |
130 | };
131 | }
132 |
--------------------------------------------------------------------------------
/nixos/modules/applications/gaming/factorio/space-age.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | app = "factorio";
9 | instance = "space-age";
10 | image = "docker.io/factoriotools/factorio:latest";
11 | user = "845"; # string
12 | group = "845"; # string
13 | port = 34204; # int
14 | port_rcon = 27020; # int
15 | cfg = config.mySystem.services.${app}.${instance};
16 | appFolder = "/var/lib/${app}/${instance}";
17 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
18 | in
19 | {
20 | options.mySystem.services.${app}.${instance} = {
21 | enable = mkEnableOption "${app} - ${instance}";
22 | addToHomepage = mkEnableOption "Add ${app} - ${instance} to homepage" // {
23 | default = true;
24 | };
25 | openFirewall = mkEnableOption "Open firewall for ${app} - ${instance}" // {
26 | default = true;
27 | };
28 | };
29 |
30 | config = mkIf cfg.enable {
31 |
32 | # ensure folder exist and has correct owner/group
33 | systemd.tmpfiles.rules = [
34 | "d ${appFolder} 0755 ${user} ${group} -" # The - disables automatic cleanup, so the file wont be removed after a period
35 | ];
36 | # make user for container
37 | users = {
38 | users.${app} = {
39 | name = app;
40 | uid = lib.strings.toInt user;
41 | group = app;
42 | isSystemUser = true;
43 | };
44 | groups.${app} = {
45 | gid = lib.strings.toInt group;
46 | };
47 | };
48 | # add user to group to view files/storage
49 | users.users.truxnell.extraGroups = [ "${app}" ];
50 |
51 | sops.secrets."services/${app}/env" = {
52 | sopsFile = ./secrets.sops.yaml;
53 | owner = app;
54 | group = app;
55 | restartUnits = [ "podman-${app}-${instance}.service" ];
56 | };
57 |
58 | virtualisation.oci-containers.containers."${app}-${instance}" = {
59 | image = "${image}";
60 | user = "${user}:${group}";
61 | volumes = [
62 | "${appFolder}:/factorio:rw"
63 | "/etc/localtime:/etc/localtime:ro"
64 | ];
65 | environment = {
66 | UPDATE_MODS_ON_START = "false";
67 | PORT = "${builtins.toString port}";
68 | RCON_PORT = "${builtins.toString port_rcon}";
69 |
70 | };
71 | environmentFiles = [ config.sops.secrets."services/${app}/env".path ];
72 | ports = [
73 | "${builtins.toString port}:${builtins.toString port}/UDP"
74 | "${builtins.toString port_rcon}:${builtins.toString port_rcon}/UDP"
75 | ]; # expose port
76 | };
77 | networking.firewall = mkIf cfg.openFirewall {
78 |
79 | allowedTCPPorts = [ port ]; # I dont use rcon so not opening that too.
80 | };
81 |
82 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
83 | lib.mkIf config.mySystem.system.impermanence.enable
84 | {
85 | directories = [
86 | {
87 | directory = appFolder;
88 | inherit user;
89 | inherit group;
90 | mode = "750";
91 | }
92 | ];
93 | };
94 |
95 | mySystem.services.gatus.monitors = mkIf config.mySystem.services.gatus.enable [
96 | {
97 |
98 | name = app;
99 | group = "media";
100 | url = "udp://${config.networking.hostName}:${builtins.toString port}";
101 | interval = "30s";
102 | conditions = [
103 | "[CONNECTED] == true"
104 | "[RESPONSE_TIME] < 1500"
105 | ];
106 | }
107 | ];
108 |
109 | services.restic.backups = config.lib.mySystem.mkRestic {
110 | inherit app user;
111 | paths = [ appFolder ];
112 | inherit appFolder;
113 | };
114 |
115 | };
116 | }
117 |
--------------------------------------------------------------------------------
/nixos/modules/applications/networking/n8n/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | cfg = config.mySystem.${category}.${app};
10 | app = "n8n";
11 | category = "services";
12 | description = "Workflow automation tool";
13 | appFolder = "/var/lib/${app}";
14 | user = app;
15 | group = app;
16 | port = 5678; # int
17 | host = "${app}" + (if cfg.dev then "-dev" else "");
18 | url = "${host}.${config.networking.domain}";
19 | in
20 | {
21 | options.mySystem.${category}.${app} = {
22 | enable = mkEnableOption "${app}";
23 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
24 | default = true;
25 | };
26 | monitor = mkOption {
27 | type = lib.types.bool;
28 | description = "Enable gatus monitoring";
29 | default = true;
30 | };
31 | prometheus = mkOption {
32 | type = lib.types.bool;
33 | description = "Enable prometheus scraping";
34 | default = true;
35 | };
36 | addToDNS = mkOption {
37 | type = lib.types.bool;
38 | description = "Add to DNS list";
39 | default = true;
40 | };
41 | dev = mkOption {
42 | type = lib.types.bool;
43 | description = "Development instance";
44 | default = false;
45 | };
46 | backup = mkOption {
47 | type = lib.types.bool;
48 | description = "Enable backups";
49 | default = true;
50 | };
51 | };
52 |
53 | config = mkIf cfg.enable {
54 |
55 | ## Secrets
56 | # sops.secrets."${category}/${app}/env" = mkIf (builtins.pathExists ./secrets.sops.yaml) {
57 | # sopsFile = ./secrets.sops.yaml;
58 | # owner = user;
59 | # inherit group;
60 | # restartUnits = [ "${app}.service" ];
61 | # };
62 |
63 | users.users.truxnell.extraGroups = [ group ];
64 |
65 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
66 | lib.mkIf config.mySystem.system.impermanence.enable
67 | {
68 | directories = [
69 | {
70 | directory = appFolder;
71 | inherit user;
72 | inherit group;
73 | mode = "750";
74 | }
75 | ];
76 | };
77 |
78 | ## service
79 | services.n8n = {
80 | enable = true;
81 | webhookUrl = "https://${url}/";
82 | };
83 |
84 | ### gatus integration
85 | mySystem.services.gatus.monitors = mkIf cfg.monitor [
86 | {
87 | name = app;
88 | group = "${category}";
89 | url = "https://${url}";
90 | interval = "1m";
91 | conditions = [
92 | "[CONNECTED] == true"
93 | "[STATUS] == 200"
94 | "[RESPONSE_TIME] < 1500"
95 | ];
96 | }
97 | ];
98 |
99 | ### Ingress
100 | services.nginx.virtualHosts.${url} = {
101 | forceSSL = true;
102 | useACMEHost = config.networking.domain;
103 | locations."^~ /" = {
104 | proxyPass = "http://127.0.0.1:${builtins.toString port}";
105 | proxyWebsockets = true;
106 | };
107 | };
108 |
109 | ### firewall config
110 |
111 | # networking.firewall = mkIf cfg.openFirewall {
112 | # allowedTCPPorts = [ port ];
113 | # allowedUDPPorts = [ port ];
114 | # };
115 |
116 | ### backups
117 | warnings = [
118 | (mkIf (
119 | !cfg.backup && config.mySystem.purpose != "Development"
120 | ) "WARNING: Backups for ${app} are disabled!")
121 | ];
122 |
123 | services.restic.backups = mkIf cfg.backup (
124 | config.lib.mySystem.mkRestic {
125 | inherit app;
126 | user = builtins.toString user;
127 | paths = [ appFolder ];
128 | inherit appFolder;
129 | }
130 | );
131 |
132 | };
133 | }
134 |
135 |
--------------------------------------------------------------------------------
/nixos/modules/applications/media/qbittorrent/lts.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | app = "qbittorrent-lts";
10 | user = "kah"; # string
11 | group = "kah"; # string
12 | port = 8080; # int
13 | qbit_port = 32387;
14 | cfg = config.mySystem.services.${app};
15 | appFolder = "/var/lib/${app}";
16 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
17 | xseedShell =
18 | pkgs.writeScriptBin "xseed.sh" # scrit to call cross-seed upon torrent finish
19 | ''
20 | #!/bin/bash
21 | # qbit command: /scripts/xseed.sh "%F"
22 | /usr/bin/curl -X POST --data-urlencode "path=$1" https://cross-seed.trux.dev/api/webhook
23 | '';
24 |
25 | in
26 | {
27 |
28 | options.mySystem.services.${app} = {
29 | enable = mkEnableOption "${app}";
30 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
31 | default = true;
32 | };
33 | qbtools = mkEnableOption "qbtools" // {
34 | default = true;
35 | };
36 | openFirewall = mkEnableOption "Open firewall for ${app}" // {
37 | default = true;
38 | };
39 |
40 | };
41 |
42 | config = mkIf cfg.enable {
43 | # ensure folder exist and has correct owner/group
44 | systemd.tmpfiles.rules = [
45 | "d ${appFolder} 0750 ${user} ${group} -" # The - disables automatic cleanup, so the file wont be removed after a period
46 | ];
47 |
48 | virtualisation.oci-containers.containers.${app} =
49 | let
50 | image = "ghcr.io/home-operations/qbittorrent:5.1.4";
51 | in
52 | {
53 | image = "${image}";
54 | user = "568:568";
55 | environment = {
56 | QBITTORRENT__BT_PORT = builtins.toString qbit_port;
57 | };
58 | ports = [ "${builtins.toString qbit_port}:${builtins.toString qbit_port}" ];
59 | volumes = [
60 | "${appFolder}:/config:rw"
61 | "${xseedShell}/bin/xseed.sh:/scripts/xseed.sh:Z"
62 | "/tank/natflix/downloads/qbittorrent-lts:/tank/natflix/downloads/qbittorrent:rw"
63 | "/tank/natflix/i486:/tank/natflix/i486:rw"
64 | "/mnt/cache:/cache"
65 | "/etc/localtime:/etc/localtime:ro"
66 | ];
67 | };
68 |
69 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
70 | lib.mkIf config.mySystem.system.impermanence.enable
71 | {
72 | directories = [
73 | {
74 | directory = appFolder;
75 | inherit user;
76 | inherit group;
77 | mode = "750";
78 | }
79 | ];
80 | };
81 |
82 | services.nginx.virtualHosts."${app}.${config.networking.domain}" = {
83 | useACMEHost = config.networking.domain;
84 | forceSSL = true;
85 | locations."^~ /" = {
86 | proxyPass = "http://${app}:${builtins.toString port}";
87 | extraConfig = "resolver 10.88.0.1;";
88 |
89 | };
90 | };
91 |
92 | # gotta open up that firewall
93 | networking.firewall = mkIf cfg.openFirewall {
94 |
95 | allowedTCPPorts = [ qbit_port ];
96 | allowedUDPPorts = [ qbit_port ];
97 | };
98 |
99 | mySystem.services.gatus.monitors = [
100 | {
101 |
102 | name = app;
103 | group = "media";
104 | url = "https://${app}.${config.mySystem.domain}";
105 | interval = "1m";
106 | conditions = [
107 | "[CONNECTED] == true"
108 | "[STATUS] == 200"
109 | "[RESPONSE_TIME] < 1500"
110 | ];
111 | }
112 | ];
113 |
114 | services.restic.backups = config.lib.mySystem.mkRestic {
115 | inherit app user;
116 | excludePaths = [ "Backups" ];
117 | paths = [ appFolder ];
118 | inherit appFolder;
119 | };
120 |
121 | };
122 | }
123 |
--------------------------------------------------------------------------------
/nixos/modules/applications/gaming/factorio/freight-forwarding.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | app = "factorio";
9 | instance = "freight-forwarding";
10 | image = "factoriotools/factorio:stable@sha256:feeebedf6754969a43d7c08a48fc9508b2d91b34f5cbf1e120740b3fbdf3a75e";
11 | user = "845"; # string
12 | group = "845"; # string
13 | port = 34203; # int
14 | port_rcon = 27019; # int
15 | cfg = config.mySystem.services.${app}.${instance};
16 | appFolder = "/var/lib/${app}/${instance}";
17 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
18 | in
19 | {
20 |
21 | options.mySystem.services.${app}.${instance} = {
22 | enable = mkEnableOption "${app} - ${instance}";
23 | addToHomepage = mkEnableOption "Add ${app} - ${instance} to homepage" // {
24 | default = true;
25 | };
26 | openFirewall = mkEnableOption "Open firewall for ${app} - ${instance}" // {
27 | default = true;
28 | };
29 | };
30 |
31 | config = mkIf cfg.enable {
32 |
33 | # ensure folder exist and has correct owner/group
34 | systemd.tmpfiles.rules = [
35 | "d ${appFolder} 0755 ${user} ${group} -" # The - disables automatic cleanup, so the file wont be removed after a period
36 | ];
37 | # make user for container
38 | users = {
39 | users.${app} = {
40 | name = app;
41 | uid = lib.strings.toInt user;
42 | group = app;
43 | isSystemUser = true;
44 | };
45 | groups.${app} = {
46 | gid = lib.strings.toInt group;
47 | };
48 | };
49 | # add user to group to view files/storage
50 | users.users.truxnell.extraGroups = [ "${app}" ];
51 |
52 | sops.secrets."services/${app}/env" = {
53 | sopsFile = ./secrets.sops.yaml;
54 | owner = app;
55 | group = app;
56 | restartUnits = [ "podman-${app}-${instance}.service" ];
57 | };
58 |
59 | virtualisation.oci-containers.containers."${app}-${instance}" = {
60 | image = "${image}";
61 | user = "${user}:${group}";
62 | volumes = [
63 | "${appFolder}:/factorio:rw"
64 | "/etc/localtime:/etc/localtime:ro"
65 | ];
66 | environment = {
67 | UPDATE_MODS_ON_START = "false";
68 | PORT = "${builtins.toString port}";
69 | RCON_PORT = "${builtins.toString port_rcon}";
70 | };
71 | environmentFiles = [ config.sops.secrets."services/${app}/env".path ];
72 | ports = [
73 | "${builtins.toString port}:${builtins.toString port}/UDP"
74 | "${builtins.toString port_rcon}:${builtins.toString port_rcon}/UDP"
75 | ]; # expose port
76 | };
77 | networking.firewall = mkIf cfg.openFirewall {
78 |
79 | allowedTCPPorts = [ port ]; # I dont use rcon so not opening that too.
80 | };
81 |
82 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
83 | lib.mkIf config.mySystem.system.impermanence.enable
84 | {
85 | directories = [
86 | {
87 | directory = appFolder;
88 | inherit user;
89 | inherit group;
90 | mode = "750";
91 | }
92 | ];
93 | };
94 |
95 | mySystem.services.gatus.monitors = mkIf config.mySystem.services.gatus.enable [
96 | {
97 |
98 | name = app;
99 | group = "media";
100 | url = "udp://${config.networking.hostName}:${builtins.toString port}";
101 | interval = "30s";
102 | conditions = [
103 | "[CONNECTED] == true"
104 | "[RESPONSE_TIME] < 1500"
105 | ];
106 | }
107 | ];
108 |
109 | services.restic.backups = config.lib.mySystem.mkRestic {
110 | inherit app user;
111 | paths = [ appFolder ];
112 | inherit appFolder;
113 | };
114 |
115 | };
116 | }
117 |
--------------------------------------------------------------------------------
/nixos/modules/nixos/system/motd/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | lib,
4 | pkgs,
5 | ...
6 | }:
7 | let
8 | motd = pkgs.writeShellScriptBin "motd" ''
9 | #! /usr/bin/env bash
10 | source /etc/os-release
11 | service_status=$(systemctl list-units | grep podman-)
12 | RED="\e[31m"
13 | GREEN="\e[32m"
14 | BOLD="\e[1m"
15 | ENDCOLOR="\e[0m"
16 | LOAD1=`cat /proc/loadavg | awk {'print $1'}`
17 | LOAD5=`cat /proc/loadavg | awk {'print $2'}`
18 | LOAD15=`cat /proc/loadavg | awk {'print $3'}`
19 |
20 | MEMORY=`free -m | awk 'NR==2{printf "%s/%sMB (%.2f%%)\n", $3,$2,$3*100 / $2 }'`
21 |
22 | # time of day
23 | HOUR=$(date +"%H")
24 | if [ $HOUR -lt 12 -a $HOUR -ge 0 ]
25 | then TIME="morning"
26 | elif [ $HOUR -lt 17 -a $HOUR -ge 12 ]
27 | then TIME="afternoon"
28 | else
29 | TIME="evening"
30 | fi
31 |
32 |
33 | uptime=`cat /proc/uptime | cut -f1 -d.`
34 | upDays=$((uptime/60/60/24))
35 | upHours=$((uptime/60/60%24))
36 | upMins=$((uptime/60%60))
37 | upSecs=$((uptime%60))
38 |
39 | figlet "$(hostname)" | lolcat -f
40 | printf "$BOLD %-20s$ENDCOLOR %s\n" "Role:" "${config.mySystem.purpose}"
41 | printf "\n"
42 | ${lib.strings.concatStrings (
43 | lib.lists.forEach cfg.networkInterfaces (
44 | x:
45 | "printf \"$BOLD * %-20s$ENDCOLOR %s\\n\" \"IPv4 ${x}\" \"$(ip -4 addr show ${x} | grep -oP '(?<=inet\\s)\\d+(\\.\\d+){3}')\"\n"
46 | )
47 | )}
48 | printf "$BOLD * %-20s$ENDCOLOR %s\n" "Release" "$PRETTY_NAME"
49 | printf "$BOLD * %-20s$ENDCOLOR %s\n" "Kernel" "$(uname -rs)"
50 | [ -f /var/run/reboot-required ] && printf "$RED * %-20s$ENDCOLOR %s\n" "A reboot is required"
51 | printf "\n"
52 | printf "$BOLD * %-20s$ENDCOLOR %s\n" "CPU usage" "$LOAD1, $LOAD5, $LOAD15 (1, 5, 15 min)"
53 | printf "$BOLD * %-20s$ENDCOLOR %s\n" "Memory" "$MEMORY"
54 | printf "$BOLD * %-20s$ENDCOLOR %s\n" "System uptime" "$upDays days $upHours hours $upMins minutes $upSecs seconds"
55 | printf "\n"
56 | if ! type "$zpool" &> /dev/null; then
57 | printf "$BOLD Zpool status: $ENDCOLOR\n"
58 | zpool status -x | sed -e 's/^/ /'
59 | fi
60 | if ! type "$zpool" &> /dev/null; then
61 | printf "$BOLD Zpool usage: $ENDCOLOR\n"
62 | zpool list -Ho name,cap,size | awk '{ printf("%-10s%+3s used out of %+5s\n", $1, $2, $3); }' | sed -e 's/^/ /'
63 | fi
64 | printf "\n"
65 | printf "$BOLDService status$ENDCOLOR\n"
66 |
67 | while IFS= read -r line; do
68 | if [[ $line =~ ".scope" ]]; then
69 | continue
70 | fi
71 | if echo "$line" | grep -q 'failed'; then
72 | service_name=$(echo $line | awk '{print $2;}' | sed 's/podman-//g')
73 | printf "$RED• $ENDCOLOR%-50s $RED[failed]$ENDCOLOR\n" "$service_name"
74 | elif echo "$line" | grep -q 'running'; then
75 | service_name=$(echo $line | awk '{print $1;}' | sed 's/podman-//g')
76 | printf "$GREEN• $ENDCOLOR%-50s $GREEN[active]$ENDCOLOR\n" "$service_name"
77 | else
78 | echo "service status unknown"
79 | fi
80 | done <<< "$service_status"
81 | '';
82 | cfg = config.mySystem.system.motd;
83 | in
84 | {
85 | options.mySystem.system.motd = {
86 | enable = lib.mkEnableOption "MOTD";
87 | networkInterfaces = lib.mkOption {
88 | description = "Network interfaces to monitor";
89 | type = lib.types.listOf lib.types.str;
90 | # default = lib.mapAttrsToList (_: val: val.interface)
91 | default = [ ];
92 | };
93 |
94 | };
95 | config = lib.mkIf cfg.enable {
96 | environment.systemPackages = [
97 | motd
98 | pkgs.lolcat
99 | pkgs.figlet
100 | ];
101 | programs.fish.interactiveShellInit = lib.mkIf config.programs.fish.enable ''
102 | motd
103 | '';
104 | };
105 | }
106 |
--------------------------------------------------------------------------------
/nixos/modules/applications/misc/redlib/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.${category}.${app};
9 | app = "redlib";
10 | category = "services";
11 | description = "reddit alternative frontend";
12 | image = "quay.io/redlib/redlib:latest@sha256:dffb6c5a22f889d47d8e28e33411db0fb6c5694599f72cf740c912c12f5fc1c6";
13 | user = "redlib"; # string
14 | group = "redlib"; # string
15 | port = 8080; # int
16 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
17 | host = "${app}" + (if cfg.dev then "-dev" else "");
18 | url = "${host}.${config.networking.domain}";
19 | in
20 | {
21 | options.mySystem.${category}.${app} = {
22 | enable = mkEnableOption "${app}";
23 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
24 | default = true;
25 | };
26 | monitor = mkOption {
27 | type = lib.types.bool;
28 | description = "Enable gatus monitoring";
29 | default = true;
30 | };
31 | prometheus = mkOption {
32 | type = lib.types.bool;
33 | description = "Enable prometheus scraping";
34 | default = true;
35 | };
36 | addToDNS = mkOption {
37 | type = lib.types.bool;
38 | description = "Add to DNS list";
39 | default = true;
40 | };
41 | dev = mkOption {
42 | type = lib.types.bool;
43 | description = "Development instance";
44 | default = false;
45 | };
46 | backups = mkOption {
47 | type = lib.types.bool;
48 | description = "Enable local backups";
49 | default = true;
50 | };
51 |
52 | };
53 |
54 | config = mkIf cfg.enable {
55 |
56 | ## Secrets
57 | # sops.secrets."${category}/${app}/env" = {
58 | # sopsFile = ./secrets.sops.yaml;
59 | # owner = user;
60 | # group = group;
61 | # restartUnits = [ "${app}.service" ];
62 | # };
63 |
64 | users.users.truxnell.extraGroups = [ group ];
65 |
66 | # Folder perms
67 | # systemd.tmpfiles.rules = [
68 | # "d ${appFolder}/ 0750 ${user} ${group} -"
69 | # ];
70 |
71 | ## service
72 | # services.test= {
73 | # enable = true;
74 | # };
75 |
76 | ## container
77 | virtualisation.oci-containers.containers = config.lib.mySystem.mkContainer {
78 | inherit
79 | app
80 | image
81 | user
82 | group
83 | ;
84 | };
85 |
86 | ### gatus integration
87 | mySystem.services.gatus.monitors = mkIf cfg.monitor [
88 | {
89 | name = app;
90 | group = "${category}";
91 | url = "https://${url}/settings"; # settings page as pinging the main page is slow/creates requests
92 | interval = "1m";
93 | conditions = [
94 | "[CONNECTED] == true"
95 | "[STATUS] == 200"
96 | "[RESPONSE_TIME] < 1500"
97 | ];
98 | }
99 | ];
100 |
101 | ### Ingress
102 | services.nginx.virtualHosts.${url} = {
103 | useACMEHost = config.networking.domain;
104 | forceSSL = true;
105 | locations."^~ /" = {
106 | proxyPass = "http://${app}:${builtins.toString port}";
107 | extraConfig = "resolver 10.88.0.1;";
108 | };
109 | };
110 |
111 | ### firewall config
112 |
113 | # networking.firewall = mkIf cfg.openFirewall {
114 | # allowedTCPPorts = [ port ];
115 | # allowedUDPPorts = [ port ];
116 | # };
117 |
118 | ### backups
119 | # warnings = [
120 | # (mkIf (!cfg.backups && config.mySystem.purpose != "Development")
121 | # "WARNING: Local backups for ${app} are disabled!")
122 | # ];
123 |
124 | # services.restic.backups = config.lib.mySystem.mkRestic
125 | # {
126 | # inherit app user;
127 | # paths = [ appFolder ];
128 | # inherit appFolder;
129 |
130 | # };
131 |
132 | };
133 | }
134 |
--------------------------------------------------------------------------------
/nixos/modules/applications/media/cross-seed/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | cfg = config.mySystem.${category}.${app};
10 | app = "cross-seed";
11 | category = "services";
12 | description = "xseed";
13 | image = "ghcr.io/cross-seed/cross-seed:6.13.6";
14 | user = "568"; # string
15 | group = "568"; # string
16 | port = 2468; # int
17 | appFolder = "/var/lib/${app}";
18 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
19 | in
20 | {
21 | options.mySystem.${category}.${app} = {
22 | enable = mkEnableOption "${app}";
23 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
24 | default = true;
25 | };
26 | monitor = mkOption {
27 | type = lib.types.bool;
28 | description = "Enable gatus monitoring";
29 | default = true;
30 | };
31 | prometheus = mkOption {
32 | type = lib.types.bool;
33 | description = "Enable prometheus scraping";
34 | default = true;
35 | };
36 | addToDNS = mkOption {
37 | type = lib.types.bool;
38 | description = "Add to DNS list";
39 | default = true;
40 | };
41 | dev = mkOption {
42 | type = lib.types.bool;
43 | description = "Development instance";
44 | default = false;
45 | };
46 | backup = mkOption {
47 | type = lib.types.bool;
48 | description = "Enable backups";
49 | default = true;
50 | };
51 |
52 | };
53 |
54 | config = mkIf cfg.enable {
55 |
56 | ## Secrets
57 | sops.secrets."${category}/${app}/config.js" = {
58 | sopsFile = ./secrets.sops.yaml;
59 | owner = "kah";
60 | group = "kah";
61 | restartUnits = [ "podman-${app}.service" ];
62 | };
63 |
64 | users.users.truxnell.extraGroups = [ group ];
65 |
66 | # Folder perms - only for containers
67 | systemd.tmpfiles.rules = [
68 | "d ${appFolder}/ 0750 ${user} ${group} -"
69 | ];
70 |
71 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
72 | lib.mkIf config.mySystem.system.impermanence.enable
73 | {
74 | directories = [
75 | {
76 | directory = appFolder;
77 | inherit user;
78 | inherit group;
79 | mode = "750";
80 | }
81 | ];
82 | };
83 |
84 | ## service
85 | virtualisation.oci-containers.containers.${app} = {
86 | image = "${image}";
87 | user = "568:568";
88 | cmd = [ "daemon" ];
89 | volumes = [
90 | "${appFolder}:/config:rw"
91 | "/tank/natflix/downloads/:/tank/natflix/downloads/:rw"
92 | "/var/lib/qbittorrent/qBittorrent/BT_backup:/qbit-torrents:ro"
93 | ''${config.sops.secrets."${category}/${app}/config.js".path}:/config/config.js:ro''
94 | "/etc/localtime:/etc/localtime:ro"
95 | ];
96 |
97 | };
98 | systemd.services.${app} = {
99 | serviceConfig = {
100 | ExecStartPre = "${pkgs.coreutils}/bin/sleep 30";
101 | };
102 | requires = [
103 | "qbittorrent.service"
104 | "cross-seed.service"
105 | ];
106 | after = [
107 | "qbittorrent.service"
108 | "cross-seed.service"
109 | ];
110 |
111 | };
112 |
113 | services.nginx.virtualHosts."${app}.${config.networking.domain}" = {
114 | useACMEHost = config.networking.domain;
115 | forceSSL = true;
116 | locations."^~ /" = {
117 | proxyPass = "http://${app}:${builtins.toString port}";
118 | extraConfig = "resolver 10.88.0.1;";
119 |
120 | };
121 | };
122 |
123 | services.restic.backups = config.lib.mySystem.mkRestic {
124 | inherit app user;
125 | excludePaths = [ "Backups" ];
126 | paths = [ appFolder ];
127 | inherit appFolder;
128 | };
129 |
130 | };
131 | }
132 |
--------------------------------------------------------------------------------
/nixos/modules/applications/media/sonarr/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.${category}.${app};
9 | app = "sonarr";
10 | category = "services";
11 | description = "TV organisar";
12 | user = "kah"; # string
13 | group = "kah"; # string
14 | port = 8989; # int
15 | appFolder = "/var/lib/${app}";
16 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
17 | host = "${app}" + (if cfg.dev then "-dev" else "");
18 | url = "${host}.${config.networking.domain}";
19 | in
20 | {
21 | options.mySystem.${category}.${app} = {
22 | enable = mkEnableOption "${app}";
23 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
24 | default = true;
25 | };
26 | monitor = mkOption {
27 | type = lib.types.bool;
28 | description = "Enable gatus monitoring";
29 | default = true;
30 | };
31 | prometheus = mkOption {
32 | type = lib.types.bool;
33 | description = "Enable prometheus scraping";
34 | default = true;
35 | };
36 | addToDNS = mkOption {
37 | type = lib.types.bool;
38 | description = "Add to DNS list";
39 | default = true;
40 | };
41 | dev = mkOption {
42 | type = lib.types.bool;
43 | description = "Development instance";
44 | default = false;
45 | };
46 | backup = mkOption {
47 | type = lib.types.bool;
48 | description = "Enable backups";
49 | default = true;
50 | };
51 |
52 | };
53 |
54 | config = mkIf cfg.enable {
55 |
56 | ## Secrets
57 | sops.secrets."${category}/${app}/env" = {
58 | sopsFile = ./secrets.sops.yaml;
59 | owner = user;
60 | inherit group;
61 | restartUnits = [ "${app}.service" ];
62 | };
63 |
64 | users.users.truxnell.extraGroups = [ group ];
65 |
66 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
67 | lib.mkIf config.mySystem.system.impermanence.enable
68 | {
69 | directories = [
70 | {
71 | directory = appFolder;
72 | user = "kah";
73 | group = "kah";
74 | mode = "750";
75 | }
76 | ];
77 | };
78 |
79 | ## service
80 | services.sonarr = {
81 | enable = true;
82 | dataDir = "/var/lib/sonarr";
83 | inherit user group;
84 | };
85 |
86 | ### gatus integration
87 | mySystem.services.gatus.monitors = mkIf cfg.monitor [
88 | {
89 | name = app;
90 | group = "${category}";
91 | url = "https://${url}";
92 | interval = "1m";
93 | conditions = [
94 | "[CONNECTED] == true"
95 | "[STATUS] == 200"
96 | "[RESPONSE_TIME] < 1500"
97 | ];
98 | }
99 | ];
100 |
101 | ### Ingress
102 | services.nginx.virtualHosts.${url} = {
103 | forceSSL = true;
104 | useACMEHost = config.networking.domain;
105 | locations."^~ /" = {
106 | proxyPass = "http://127.0.0.1:${builtins.toString port}";
107 | proxyWebsockets = true;
108 | };
109 | };
110 |
111 | ### firewall config
112 |
113 | # networking.firewall = mkIf cfg.openFirewall {
114 | # allowedTCPPorts = [ port ];
115 | # allowedUDPPorts = [ port ];
116 | # };
117 |
118 | ### backups
119 | warnings = [
120 | (mkIf (
121 | !cfg.backup && config.mySystem.purpose != "Development"
122 | ) "WARNING: Backups for ${app} are disabled!")
123 | ];
124 |
125 | services.restic.backups = mkIf cfg.backup (
126 | config.lib.mySystem.mkRestic {
127 | inherit app user;
128 | paths = [ appFolder ];
129 | inherit appFolder;
130 | }
131 | );
132 |
133 | # services.postgresqlBackup = {
134 | # databases = [ app ];
135 | # };
136 |
137 | };
138 | }
139 |
--------------------------------------------------------------------------------
/nixos/modules/applications/media/lidarr/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | ...
5 | }:
6 | with lib;
7 | let
8 | cfg = config.mySystem.${category}.${app};
9 | app = "lidarr";
10 | category = "services";
11 | description = "Music management";
12 | # image = "";
13 | user = "kah"; # string
14 | group = "kah"; # string
15 | port = 8686; # int
16 | appFolder = "/var/lib/${app}";
17 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
18 | host = "${app}" + (if cfg.dev then "-dev" else "");
19 | url = "${host}.${config.networking.domain}";
20 | in
21 | {
22 | options.mySystem.${category}.${app} = {
23 | enable = mkEnableOption "${app}";
24 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
25 | default = true;
26 | };
27 | monitor = mkOption {
28 | type = lib.types.bool;
29 | description = "Enable gatus monitoring";
30 | default = true;
31 | };
32 | prometheus = mkOption {
33 | type = lib.types.bool;
34 | description = "Enable prometheus scraping";
35 | default = true;
36 | };
37 | addToDNS = mkOption {
38 | type = lib.types.bool;
39 | description = "Add to DNS list";
40 | default = true;
41 | };
42 | dev = mkOption {
43 | type = lib.types.bool;
44 | description = "Development instance";
45 | default = false;
46 | };
47 | backup = mkOption {
48 | type = lib.types.bool;
49 | description = "Enable backups";
50 | default = true;
51 | };
52 |
53 | };
54 |
55 | config = mkIf cfg.enable {
56 |
57 | ## Secrets
58 | sops.secrets."${category}/${app}/env" = {
59 | sopsFile = ./secrets.sops.yaml;
60 | owner = user;
61 | inherit group;
62 | restartUnits = [ "${app}.service" ];
63 | };
64 |
65 | users.users.truxnell.extraGroups = [ group ];
66 |
67 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
68 | lib.mkIf config.mySystem.system.impermanence.enable
69 | {
70 | directories = [
71 | {
72 | directory = appFolder;
73 | inherit user;
74 | inherit group;
75 | mode = "750";
76 | }
77 | ];
78 | };
79 |
80 | ## service
81 | services.lidarr = {
82 | enable = true;
83 | dataDir = appFolder;
84 | inherit user group;
85 | };
86 |
87 | ### gatus integration
88 | mySystem.services.gatus.monitors = mkIf cfg.monitor [
89 | {
90 | name = app;
91 | group = "${category}";
92 | url = "https://${url}";
93 | interval = "1m";
94 | conditions = [
95 | "[CONNECTED] == true"
96 | "[STATUS] == 200"
97 | "[RESPONSE_TIME] < 1500"
98 | ];
99 | }
100 | ];
101 |
102 | ### Ingress
103 | services.nginx.virtualHosts.${url} = {
104 | forceSSL = true;
105 | useACMEHost = config.networking.domain;
106 | locations."^~ /" = {
107 | proxyPass = "http://127.0.0.1:${builtins.toString port}";
108 | proxyWebsockets = true;
109 | };
110 | };
111 |
112 | ### firewall config
113 |
114 | # networking.firewall = mkIf cfg.openFirewall {
115 | # allowedTCPPorts = [ port ];
116 | # allowedUDPPorts = [ port ];
117 | # };
118 |
119 | ### backups
120 | warnings = [
121 | (mkIf (
122 | !cfg.backup && config.mySystem.purpose != "Development"
123 | ) "WARNING: Backups for ${app} are disabled!")
124 | ];
125 |
126 | services.restic.backups = mkIf cfg.backup (
127 | config.lib.mySystem.mkRestic {
128 | inherit app user;
129 | paths = [ appFolder ];
130 | inherit appFolder;
131 | }
132 | );
133 |
134 | # services.postgresqlBackup = {
135 | # databases = [ app ];
136 | # };
137 |
138 | };
139 | }
140 |
--------------------------------------------------------------------------------
/nixos/modules/applications/media/qbittorrent/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | app = "qbittorrent";
10 | user = "kah"; # string
11 | group = "kah"; # string
12 | port = 8080; # int
13 | qbit_port = 32189;
14 | cfg = config.mySystem.services.${app};
15 | appFolder = "/var/lib/${app}";
16 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
17 | xseedShell =
18 | pkgs.writeScriptBin "xseed.sh" # scrit to call cross-seed upon torrent finish
19 | ''
20 | #!/bin/bash
21 | # qbit command: /scripts/xseed.sh "%F"
22 | /usr/bin/curl -H "X-Api-Key: 39934d99311cdb0695bf13b10dde639d892d0cb13c615ddb" -X POST --data-urlencode "infoHash=$1" https://cross-seed.trux.dev/api/webhook
23 | '';
24 |
25 | in
26 | {
27 |
28 | imports = [
29 | # ./qbtools.nix
30 | ./lts.nix
31 | ];
32 |
33 | options.mySystem.services.${app} = {
34 | enable = mkEnableOption "${app}";
35 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
36 | default = true;
37 | };
38 | qbtools = mkEnableOption "qbtools" // {
39 | default = true;
40 | };
41 | openFirewall = mkEnableOption "Open firewall for ${app}" // {
42 | default = true;
43 | };
44 |
45 | };
46 |
47 | config = mkIf cfg.enable {
48 | # ensure folder exist and has correct owner/group
49 | systemd.tmpfiles.rules = [
50 | "d ${appFolder} 0750 ${user} ${group} -" # The - disables automatic cleanup, so the file wont be removed after a period
51 | ];
52 |
53 | virtualisation.oci-containers.containers.${app} =
54 | let
55 | image = "ghcr.io/home-operations/qbittorrent:5.1.4";
56 | in
57 | {
58 | image = "${image}";
59 | user = "568:568";
60 | environment = {
61 | QBITTORRENT__BT_PORT = builtins.toString qbit_port;
62 | };
63 | ports = [ "${builtins.toString qbit_port}:${builtins.toString qbit_port}" ];
64 | volumes = [
65 | "${appFolder}:/config:rw"
66 | "${xseedShell}/bin/xseed.sh:/scripts/xseed.sh:Z"
67 | "/tank/natflix/downloads/qbittorrent:/tank/natflix/downloads/qbittorrent:rw"
68 | "/mnt/cache:/cache"
69 | "/etc/localtime:/etc/localtime:ro"
70 | ];
71 | };
72 |
73 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
74 | lib.mkIf config.mySystem.system.impermanence.enable
75 | {
76 | directories = [
77 | {
78 | directory = appFolder;
79 | inherit user;
80 | inherit group;
81 | mode = "750";
82 | }
83 | ];
84 | };
85 |
86 | services.nginx.virtualHosts."${app}.${config.networking.domain}" = {
87 | useACMEHost = config.networking.domain;
88 | forceSSL = true;
89 | locations."^~ /" = {
90 | proxyPass = "http://${app}:${builtins.toString port}";
91 | extraConfig = "resolver 10.88.0.1;";
92 |
93 | };
94 | };
95 |
96 | # gotta open up that firewall
97 | networking.firewall = mkIf cfg.openFirewall {
98 |
99 | allowedTCPPorts = [ qbit_port ];
100 | allowedUDPPorts = [ qbit_port ];
101 | };
102 |
103 | mySystem.services.gatus.monitors = [
104 | {
105 |
106 | name = app;
107 | group = "media";
108 | url = "https://${app}.${config.mySystem.domain}";
109 | interval = "1m";
110 | conditions = [
111 | "[CONNECTED] == true"
112 | "[STATUS] == 200"
113 | "[RESPONSE_TIME] < 1500"
114 | ];
115 | }
116 | ];
117 |
118 | services.restic.backups = config.lib.mySystem.mkRestic {
119 | inherit app user;
120 | excludePaths = [ "Backups" ];
121 | paths = [ appFolder ];
122 | inherit appFolder;
123 | };
124 |
125 | };
126 | }
127 |
--------------------------------------------------------------------------------
/nixos/modules/applications/media/radarr/default.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | config,
4 | pkgs,
5 | ...
6 | }:
7 | with lib;
8 | let
9 | cfg = config.mySystem.${category}.${app};
10 | app = "radarr";
11 | category = "services";
12 | description = "Movie organisar";
13 | user = "kah"; # string
14 | group = "kah"; # string
15 | port = 7878; # int
16 | appFolder = "/var/lib/${app}";
17 | # persistentFolder = "${config.mySystem.persistentFolder}/var/lib/${appFolder}";
18 | host = "${app}" + (if cfg.dev then "-dev" else "");
19 | url = "${host}.${config.networking.domain}";
20 | in
21 | {
22 | options.mySystem.${category}.${app} = {
23 | enable = mkEnableOption "${app}";
24 | addToHomepage = mkEnableOption "Add ${app} to homepage" // {
25 | default = true;
26 | };
27 | monitor = mkOption {
28 | type = lib.types.bool;
29 | description = "Enable gatus monitoring";
30 | default = true;
31 | };
32 | prometheus = mkOption {
33 | type = lib.types.bool;
34 | description = "Enable prometheus scraping";
35 | default = true;
36 | };
37 | addToDNS = mkOption {
38 | type = lib.types.bool;
39 | description = "Add to DNS list";
40 | default = true;
41 | };
42 | dev = mkOption {
43 | type = lib.types.bool;
44 | description = "Development instance";
45 | default = false;
46 | };
47 | backup = mkOption {
48 | type = lib.types.bool;
49 | description = "Enable backups";
50 | default = true;
51 | };
52 |
53 | };
54 |
55 | config = mkIf cfg.enable {
56 |
57 | ## Secrets
58 | sops.secrets."${category}/${app}/env" = {
59 | sopsFile = ./secrets.sops.yaml;
60 | owner = user;
61 | inherit group;
62 | restartUnits = [ "${app}.service" ];
63 | };
64 |
65 | users.users.truxnell.extraGroups = [ group ];
66 |
67 | environment.persistence."${config.mySystem.system.impermanence.persistPath}" =
68 | lib.mkIf config.mySystem.system.impermanence.enable
69 | {
70 | directories = [
71 | {
72 | directory = appFolder;
73 | user = "kah";
74 | group = "kah";
75 | mode = "750";
76 | }
77 | ];
78 | };
79 |
80 | ## service
81 | services.radarr = {
82 | enable = true;
83 | dataDir = "/var/lib/${app}";
84 | inherit user group;
85 | package = pkgs.radarr;
86 | };
87 |
88 | ### gatus integration
89 | mySystem.services.gatus.monitors = mkIf cfg.monitor [
90 | {
91 | name = app;
92 | group = "${category}";
93 | url = "https://${url}";
94 | interval = "1m";
95 | conditions = [
96 | "[CONNECTED] == true"
97 | "[STATUS] == 200"
98 | "[RESPONSE_TIME] < 1500"
99 | ];
100 | }
101 | ];
102 |
103 | ### Ingress
104 | services.nginx.virtualHosts.${url} = {
105 | forceSSL = true;
106 | useACMEHost = config.networking.domain;
107 | locations."^~ /" = {
108 | proxyPass = "http://127.0.0.1:${builtins.toString port}";
109 | proxyWebsockets = true;
110 | };
111 | };
112 |
113 | ### firewall config
114 |
115 | # networking.firewall = mkIf cfg.openFirewall {
116 | # allowedTCPPorts = [ port ];
117 | # allowedUDPPorts = [ port ];
118 | # };
119 |
120 | ### backups
121 | warnings = [
122 | (mkIf (
123 | !cfg.backup && config.mySystem.purpose != "Development"
124 | ) "WARNING: Backups for ${app} are disabled!")
125 | ];
126 |
127 | services.restic.backups = mkIf cfg.backup (
128 | config.lib.mySystem.mkRestic {
129 | inherit app user;
130 | paths = [ appFolder ];
131 | inherit appFolder;
132 | }
133 | );
134 |
135 | # services.postgresqlBackup = {
136 | # databases = [ app ];
137 | # };
138 |
139 | };
140 | }
141 |
--------------------------------------------------------------------------------