├── .gitattributes
├── .gitignore
├── .vscode
├── extensions.json
└── settings.json
├── CITATION.cff
├── LICENSE
├── README.md
├── checks
├── container-sizes.nix.md
├── default.nix
├── demo.nix.md
├── install-size.nix.md
├── nix-copy-update.nix.md
├── nix_store_send.nix.md
├── reconfig-time.nix.md
├── stream-rsync.nix.md
├── stream-size.nix.md
├── stream-update.nix.md
├── test.nix.md
└── update-glibc.nix.md
├── containers
├── foreign.nix.md
└── native.nix.md
├── docs
└── relations.drawio.svg
├── flake.lock
├── flake.nix
├── hosts
├── efi
│ ├── README.md
│ ├── default.nix
│ ├── machine.nix
│ ├── minify.lsmod
│ └── systems
├── imx
│ ├── README.md
│ ├── default.nix
│ ├── machine.nix
│ ├── minify.lsmod
│ └── systems
└── rpi
│ ├── README.md
│ ├── default.nix
│ ├── machine.nix
│ ├── minify.lsmod
│ └── systems
│ └── test1.nix.md
├── lib
├── data
│ ├── data.py
│ ├── dref.py
│ ├── fig-oci_combined.py
│ ├── fig-reboot.py
│ ├── fig-update-size.py
│ ├── parse_logs.py
│ └── system-listing.py
├── default.nix
├── misc.nix
├── testing.nix
└── util.ts
├── modules
├── README.md
├── default.nix
├── hermetic-bootloader.nix.md
├── hermetic-bootloader.sh
├── minify.nix.md
└── target
│ ├── containers.nix.md
│ ├── default.nix
│ ├── defaults.nix.md
│ ├── fs.nix.md
│ ├── specs.nix.md
│ └── watchdog.nix.md
├── out
└── .gitignore
├── overlays
├── README.md
├── default.nix
├── fixes.nix.md
├── nar-hash.cc
├── nix-store-recv.sh
├── nix-store-send.nix.md
└── nix-store-send.sh
├── patches
├── README.md
├── default.nix
├── nix-store-send.patch
└── nixpkgs
│ ├── default.nix
│ ├── make-bootable-optional.patch
│ ├── make-required-packages-optional.patch
│ ├── make-switchable-optional-22.11.patch
│ └── make-switchable-optional-23.05.patch
└── utils
├── container-nix
├── lctes23-artifact
├── README.md
├── TODO.md
└── artifact
├── res
├── README.md
├── dropbear_ecdsa_host_key
├── dropbear_ecdsa_host_key.pub
├── niklas-gollenstede.pub
├── root.sha256-pass
├── root.yescrypt-pass
├── ssh_dummy_1
├── ssh_dummy_1.pub
├── ssh_testkey_1
├── ssh_testkey_1.pub
├── ssh_testkey_2
└── ssh_testkey_2.pub
└── setup.sh
/.gitattributes:
--------------------------------------------------------------------------------
1 | # treat lock file as binary (collapsed diff, no line count, no EOL treatment)
2 | flake.lock binary
3 | modules/lsmod.out binary
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | /result
3 | /result-*
4 | /.nix/
5 | __pycache__/
6 |
--------------------------------------------------------------------------------
/.vscode/extensions.json:
--------------------------------------------------------------------------------
1 | {
2 | "recommendations": [
3 | "jnoortheen.nix-ide",
4 | "streetsidesoftware.code-spell-checker"
5 | ]
6 | }
7 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | message: If you use this software, please cite it using these metadata.
3 | title: 'reUpNix: Reconfigurable and Updateable Embedded Systems'
4 | authors:
5 | - given-names: Niklas
6 | family-names: Gollenstede
7 | orcid: https://orcid.org/0009-0004-9395-7318
8 | - given-names: Christian
9 | family-names: Dietrich
10 | orcid: https://orcid.org/0000-0001-9258-0513
11 | version: lctes23-crv
12 | date-released: 2023-05-12
13 | identifiers:
14 | - description: Artifact reference for the LCTES 2023 publication.
15 | type: doi
16 | value: 10.5281/zenodo.7929610
17 | license: MIT
18 | repository-code: https://github.com/tuhhosg/reupnix
19 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2022 - 2023 Niklas Gollenstede
2 | Copyright (c) 2023 Christian Dietrich
3 |
4 | Permission is hereby granted, free of charge, to any person obtaining
5 | a copy of this software and associated documentation files (the
6 | "Software"), to deal in the Software without restriction, including
7 | without limitation the rights to use, copy, modify, merge, publish,
8 | distribute, sublicense, and/or sell copies of the Software, and to
9 | permit persons to whom the Software is furnished to do so, subject to
10 | the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be
13 | included in all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | # reUpNix: Reconfigurable and Updateable Embedded Systems
3 |
4 | This repository contains the practical contributions of (my master thesis and) the [paper named above](https://doi.org/10.1145/3589610.3596273).
5 | The abstract:
6 |
7 | > Managing the life cycle of an embedded Linux stack is difficult, as we have to integrate in-house and third-party services, prepare firmware images, and update the devices in the field.
8 | > Further, if device deployment is expensive (e.g. in space), our stack should support multi-mission setups to make the best use of our investment.
9 | >
10 | > With reUpNix, we propose a methodology based on NixOS that provides reproducible, updateable, and reconfigurable embedded Linux stacks.
11 | > For this, we identify the shortcomings of NixOS for use on embedded devices, reduce its basic installation size by up to 86 percent, and make system updates failure atomic and significantly smaller.
12 | > We also allow integration of third-party OCI images, which, due to fine-grained file deduplication, require up to 27 percent less on-disk space.
13 |
14 | The differential update transfer mechanism `nix store sent` is implemented as part of Nix, and is included here as one big [patch](./patches/nix-store-send.patch) applied to the "`nix`" [flake input](./flake.nix) ([`nix-store-send`](./overlays/nix-store-send.nix.md) implements a previous version).
15 |
16 | [`modules/hermetic-bootloader.nix.md`](./modules/hermetic-bootloader.nix.md) implements the bootloader configuration, and [`modules/minify.nix.md`](./modules/minify.nix.md) realizes the reduction in installation size.
17 |
18 | Container integration is implemented in [`modules/target/containers.nix.md`](./modules/target/containers.nix.md), and the configuration model (Machine Config / System Profile) by the layout of the individual [hosts](./hosts/), [`lib/misc.nix#importMachineConfig`](./lib/misc.nix), and [`modules/target/specs.nix.md`](./modules/target/specs.nix.md).
19 |
20 | The description for the LCTES 2023 Artifact Submission can be found in [`utils/lctes23-artifact`](./utils/lctes23-artifact). [](https://zenodo.org/badge/latestdoi/639381697)
21 |
22 |
23 | ## Repo Layout
24 |
25 | This is a nix flake repository, so [`./flake.nix`](./flake.nix) is the entry point and export mechanism for almost everything.
26 |
27 | [`./lib/`](./lib/) adds some additional library functions as `.th` to the default `nixpkgs.lib`.
28 | These get passed to all other files as `inputs.self.lib.__internal__.th`.
29 |
30 | [`./hosts/`](./hosts/) contains the entry point NixOS config modules for each host(-type).
31 | The `default.nix` specifies the names of multiple `instances` of the host type. The ones with `-minimal` suffix have a standard set of [system minifications](./modules/minify.nix.md) applied, the `-baseline` ones are without minification, and the ones without suffix have some additional debugging bloat enabled.
32 | The `checks` (see below) may further modify the host definitions, but those modifications are not directly exposed as flake outputs.
33 |
34 | [`./modules/`](./modules/) contains NixOS configuration modules. Added options' names start with `th.` (unless they are meant as fixes to the existing options set).
35 | [`./modules/default.nix`](./modules/default.nix) exports an attr set of the modules defined in the individual files, which is also what is exported as `flake#outputs.nixosModules` and merged as `flake#outputs.nixosModules.default`.
36 |
37 | [`./overlays/`](./overlays/) contains nixpkgs overlays. Some modify packages from `nixpkgs`, others add packages not in there (yet).
38 | [`./overlays/default.nix`](./overlays/default.nix) exports an attr set of the overlays defined in the individual files, which is also what is exported as `flake#outputs.overlays` and merged as `flake#outputs.overlays.default`. Additionally, the added or modified packages are exported as `flake#outputs.packages..*`.
39 |
40 | [`./utils/`](./utils/) contains the installation and maintenance scripts/functions. These are wrapped by the flake to have access to variables describing a specific host, and thus (with few exceptions) shouldn't be called directly.
41 | See `apps` and `devShells` exported by the flake, plus the [installation](#host-installation--initial-setup) section below.
42 |
43 | [`./checks/`](./checks/) contains tests and evaluations. These are built as part of `nix flake check` and can individually be built and executed by running `nix run .#check: -- `.
44 | Some checks produce output files in [`./out/`](./out/). These contain the data for publications and can be copied to the `data/` dir of the papers.
45 |
46 |
47 | ## Host Installation / Initial Setup
48 |
49 | The installation of the configured hosts is completely scripted and should work on any Linux with KVM enabled (or root access), and nix installed for the current user (or root).
50 | See [WibLib's `install-system`](https://github.com/NiklasGollenstede/nix-wiplib/blob/master/lib/setup-scripts/README.md#install-system-documentation) for more details.
51 |
52 |
53 | ## Concepts
54 |
55 | ### `.xx.md` files
56 |
57 | Often, the concept expressed by a source code file is at least as important as the concrete implementation of it.
58 | `nix` unfortunately isn't super readable and also does not have documentation tooling support nearly on par with languages like TypeScript.
59 |
60 | Embedding the source code "file" within a MarkDown file emphasizes the importance of textual expressions of the motivation and context of each piece of source code, and should thus incentivize writing sufficient documentation
61 |
62 | Technically, Nix (and most other code files) don't need to have any specific file extension. By embedding the MarkDown header in a block comment, the file can still be a valid source code file, while the MarDown header ending in a typed code block ensures proper syntax highlighting of the source code in editors or online repos.
63 |
64 |
65 | ## Notepad
66 |
67 | ### Nix store deduplication
68 |
69 | To measure the effectiveness of deduplication on a `/nix/store/`, run:
70 | ```bash
71 | is=0 ; would=0 ; while read perm links user group size rest ; do is=$(( is + size )) ; would=$(( would + (links - 1) * size )) ; done < <( \ls -Al /nix/store/.links | tail -n +2 ) ; echo "Actual size: $is ; without dedup: $would ; gain: $( bc <<< "scale=2 ; $would/$is" )"
72 | ```
73 |
74 |
75 | ## Authors / License
76 |
77 | All files in this repository ([`reupnix`](https://github.com/tuhhosg/reupnix)) (except LICENSE) are authored/created by the authors of this repository, and are copyright 2023 [Christian Dietrich](https://github.com/stettberger) (`lib/data/*`) and copyright 2022 - 2023 [Niklas Gollenstede](https://github.com/NiklasGollenstede) (the rest).
78 |
79 | See [`patches/README.md#license`](./patches/README.md#license) for the licensing of the included [patches](./patches/).
80 | All other parts of this software may be used under the terms of the MIT license, as detailed in [`./LICENSE`](./LICENSE).
81 |
82 | This license applies to the files in this repository only.
83 | Any external packages are built from sources that have their own licenses, which should be the ones indicated in the package's metadata.
84 |
--------------------------------------------------------------------------------
/checks/default.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs: pkgs: let
2 | lib = inputs.self.lib.__internal__; test = lib.th.testing pkgs;
3 | imports = lib.fun.importFilteredFlattened dirname inputs { except = [ "default" ]; };
4 |
5 | wrap = script: ''
6 | set -eu
7 | PATH=${lib.makeBinPath (lib.unique (map (p: p.outPath) (lib.filter lib.isDerivation pkgs.stdenv.allowedRequisites)))}
8 | export out=$PWD/out ; cd /"$(mktemp -d)" && [[ $PWD != / ]] || exit 1
9 | if [[ $(id -u) == 0 ]] ; then
10 | ${pkgs.util-linux}/bin/mount -t tmpfs tmpfs $PWD ; cd $PWD ; trap "${pkgs.util-linux}/bin/umount -l $PWD ; rmdir $PWD" exit # requires root
11 | else
12 | trap "find $PWD -type d -print0 | xargs -0 chmod 700 ; rm -rf $PWD" exit # can be slow
13 | fi
14 | source ${lib.fun.bash.generic-arg-parse}
15 | unset SUDO_USER ; generic-arg-parse "$@"
16 | ( trap - exit
17 | if [[ ''${args[debug]:-} ]] ; then set -x ; fi
18 | ${script}
19 | )
20 | '';
21 |
22 | checks = let checks = let
23 | mkCheck = name: script: attrs: (pkgs.writeShellScriptBin "check-${name}.sh" (wrap script)).overrideAttrs (old: {
24 | passthru = (old.passthru or { }) // attrs;
25 | });
26 | in lib.fun.mapMerge (name: _import': let _import = _import' pkgs; in if (_import?script) then (
27 | { ${name} = mkCheck name _import.script _import; }
28 | ) else lib.fun.mapMerge (suffix: script: (
29 | { "${name}-${suffix}" = mkCheck "${name}:${suffix}" script _import; }
30 | )) _import.scripts) imports; in checks // {
31 | all = pkgs.writeShellScriptBin "check-all.sh" ''
32 | failed=0 ; warning='"!!!!!!!!!!!!!!!!!!!!!!!'
33 | ${(lib.concatStringsSep "\n" (lib.mapAttrsToList (k: v: ''
34 | echo ; echo ; ${test.frame ''echo "Running check:${k}:"''} ; echo
35 | ${v} || { failed=$? ; echo "$warning check:${k} failed with $failed $warning" 1>&2 ; }
36 | '') (builtins.removeAttrs checks [ "demo" "test" ])))}
37 | exit $failed
38 | '';
39 | };
40 |
41 | apps = (lib.fun.mapMerge (k: v: {
42 | "check:${k}" = { type = "app"; program = "${v}"; };
43 | }) checks) // (let inherit (pkgs) system; in let
44 | pkgs = import inputs.nixpkgs-unstable { inherit system; };
45 | mkPipPackage = name: version: sha256: deps: extra: pkgs.python3Packages.buildPythonPackage ({
46 | pname = name; version = version; propagatedBuildInputs = deps;
47 | src = pkgs.python3Packages.fetchPypi { pname = name; version = version; sha256 = sha256; };
48 | } // extra);
49 | python3 = pkgs.buildPackages.python3.withPackages (pip3: (builtins.attrValues rec {
50 | inherit (pip3) ipykernel python-magic numpy pandas patsy plotnine mizani matplotlib setuptools statsmodels;
51 | plydata = mkPipPackage "plydata" "0.4.3" "Lq2LbAzy+fSDtvAXAmeJg5Qlg466hAsWDXRkOVap+xI=" [ pip3.pandas pip3.pytest ] { };
52 | versuchung = mkPipPackage "versuchung" "1.4.1" "iaBuJczQiJHLL6m8yh3RXFMrG9astbwIT+V/sWuUQW4=" [ pip3.papermill ] { doCheck = false; };
53 | osgpy = mkPipPackage "osgpy" "0.1.3" "ogEtmqOYKJ+7U6QE63qVR8Z8fofBApThu66QsbYpLio=" [ pip3.pandas plotnine plydata versuchung ] { };
54 | }));
55 | in lib.fun.mapMerge (name: { "eval:${name}" = rec { type = "app"; derivation = pkgs.writeShellScriptBin "${name}.py.sh" ''
56 | exec ${python3}/bin/python3 ${dirname}/../lib/data/${name}.py ./out/
57 | ''; program = "${derivation}"; }; }) [ "dref" "fig-oci_combined" "fig-reboot" "fig-update-size" ]);
58 |
59 | in { inherit checks apps; packages = checks; }
60 |
--------------------------------------------------------------------------------
/checks/demo.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # Update via `nix-store-send`
4 |
5 | An example of the send stream, substantially updating `nixpkgs`.
6 |
7 |
8 | ## Notes
9 |
10 | * The closure installed ("installing system") is different from the "old system" (and bigger) because the former also includes the test scripts, and some dependencies they pull in
11 | * "installing system" does completely contain "old system", though, so this should not matter.
12 | * Actually, it's odd that applying the stream decreases the disk utilization. Anything in "installing system" and not in "old system" should remain.
13 |
14 |
15 | ## Implementation
16 |
17 | ```nix
18 | #*/# end of MarkDown, beginning of Nix test:
19 | dirname: inputs: pkgs: let
20 | lib = inputs.self.lib.__internal__;
21 | inherit (lib.th.testing pkgs) toplevel override unpinInputs resize dropRefs time disk-usage nix-store-send run-in-vm;
22 |
23 | new = override (resize "512M" (unpinInputs inputs.self.nixosConfigurations."new:x64-minimal")) {
24 | wip.services.dropbear.rootKeys = lib.readFile "${inputs.self}/utils/res/ssh_testkey_1.pub";
25 | environment.etc.version.text = "new";
26 | };
27 | old = override (resize "512M" (unpinInputs inputs.self.nixosConfigurations."old:x64-minimal")) {
28 | wip.services.dropbear.rootKeys = lib.readFile "${inputs.self}/utils/res/ssh_testkey_1.pub";
29 | environment.etc.version.text = "old";
30 | };
31 |
32 | # * read-input: Receive instructions and new files
33 | # * restore-links: Restore hardlinks to existing files
34 | # * install: Install new components to the store
35 | # * save-links: Save hardlinks to new files
36 | # * delete: Remove old components from the store
37 | # * prune-links: Prune hardlinks to files no longer needed
38 | # * cleanup: Remove instructions and temporary hardlinks
39 | update-cmds = [
40 | { pre = ''
41 | echo -n continue? ; read
42 | ( set -x ; cat ${dropRefs (nix-store-send old new "")}/stream ) | $ssh -- '${time "nix-store-recv --only-read-input --status"}'
43 |
44 | ''; test = ''
45 | echo "total traffic:" $( ${pkgs.inetutils}/bin/ifconfig --interface=ens3 | ${pkgs.gnugrep}/bin/grep -Pe 'RX bytes:' )
46 |
47 | echo "This is version $(cat /etc/version)" ; if [[ $(cat /etc/version) != old ]] ; then echo "dang ..." ; false ; fi
48 |
49 | echo -n continue? ; read
50 | ${time "nix-store-recv --only-restore-links"}
51 | echo -n continue? ; read
52 | ${time "nix-store-recv --only-install"}
53 | echo -n continue? ; read
54 | ${time "nix-store-recv --only-save-links"}
55 |
56 | echo -n continue? ; read
57 | ${time "${dropRefs (toplevel new)}/install-bootloader 1"}
58 |
59 | echo -n continue? ; read
60 | ''; }
61 | { test = ''
62 | echo "This is version $(cat /etc/version)" ; if [[ $(cat /etc/version) != new ]] ; then echo "dang ..." ; false ; fi
63 |
64 | echo -n continue? ; read
65 | ${time "nix-store-recv --only-delete"}
66 | ${time "nix-store-recv --only-prune-links"}
67 | ${time "nix-store-recv --only-cleanup"}
68 |
69 | echo -n continue? ; read
70 | ''; }
71 | ];
72 |
73 | in { script = '''
74 | echo "old system: ${toplevel old}"
75 | echo "new system: ${toplevel new}"
76 | echo "Update stream stats (old -> new)"
77 | cat ${nix-store-send old new ""}/stats
78 | echo "stream size: $(du --apparent-size --block-size=1 ${nix-store-send old new ""}/stream | cut -f1)"
79 | echo "stream path: ${nix-store-send old new ""}"
80 | echo
81 | echo -n continue? ; read
82 | ${run-in-vm old { } update-cmds}
83 | ''; }
84 |
--------------------------------------------------------------------------------
/checks/install-size.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # Installation Size
4 |
5 | Measurement of the size of a minimal, bare-bones system.
6 |
7 |
8 | ## Implementation
9 |
10 | ```nix
11 | #*/# end of MarkDown, beginning of Nix test:
12 | dirname: inputs: pkgs: let mkExport = target: let
13 | lib = inputs.self.lib.__internal__;
14 | inherit (lib.th.testing pkgs) toplevel override overrideBase unpinInputs measure-installation collect-deps merge-deps;
15 | cross-compile = localSystem: targetSystem: system: if (localSystem == targetSystem) then system else /* override */ (overrideBase system ({
16 | nixpkgs = lib.mkForce { localSystem.system = localSystem; crossSystem.system = targetSystem; };
17 | imports = [ { nixpkgs.overlays = [ (final: prev: { # When cross-compiling (from »nixpkgs-old«), some package pulled in by the default perl env fails to build. This manually applies #182457. Unfortunately, it does not seem to affect the specializations (probably related to the order of module content and argument evaluation), so building still fails. The 2nd import also does not help. This is not a problem for the »minimal« systems, since they don't include perl.
18 | perl534 = prev.perl534.overrideAttrs (old: {
19 | passthru = old.passthru // { pkgs = old.passthru.pkgs.override {
20 | overrides = (_: with (lib.recurseIntoAttrs prev.perl534.pkgs); {
21 | LWP = LWP.overrideAttrs (old: {
22 | propagatedBuildInputs = [ FileListing HTMLParser HTTPCookies HTTPNegotiate NetHTTP TryTiny WWWRobotRules ];
23 | buildInputs = [ ];
24 | checkInputs = [ HTTPDaemon TestFatal TestNeeds TestRequiresInternet ];
25 | });
26 | });
27 | }; };
28 | });
29 | }) ]; } ({ config, pkgs, ... }: /* lib.mkIf (config.specialisation == { }) */ {
30 | specialisation.default.configuration.nixpkgs.pkgs = lib.mkForce pkgs;
31 | specialisation.test1.configuration.nixpkgs.pkgs = lib.mkForce pkgs;
32 | }) ];
33 | }));
34 |
35 | targetSystem = { rpi = "aarch64-linux"; x64 = "x86_64-linux"; }.${target};
36 | baseline = cross-compile pkgs.system targetSystem (unpinInputs inputs.self.nixosConfigurations."new:${target}-baseline");
37 | minimal = cross-compile pkgs.system targetSystem (unpinInputs inputs.self.nixosConfigurations."new:${target}-minimal");
38 | #x64 = unpinInputs inputs.self.nixosConfigurations."new:x64-minimal";
39 | #rpi = unpinInputs inputs.self.nixosConfigurations."new:rpi-minimal";
40 |
41 | in { inherit baseline minimal; script = ''
42 | ${lib.concatStringsSep "\n" (lib.mapAttrsToList (size: system: ''
43 | echo
44 | echo "Installation measurements (${target}-${size}):"
45 | ${measure-installation system ''
46 | echo "du --apparent-size /nix/store: $( du --apparent-size --block-size=1 --summarize $mnt/system/nix/store | cut -f1 )"
47 | rm -rf $mnt/system/nix/store/.links
48 | storeSize=$( du --apparent-size --block-size=1 --summarize $mnt/system/nix/store | cut -f1 )
49 | echo "(without /nix/store/.links): $storeSize"
50 | ${pkgs.util-linux}/bin/mount --fstab ${system.config.system.build.toplevel}/etc/fstab --target-prefix $mnt/ /boot
51 | df --block-size=1 $mnt/boot $mnt/system
52 | bootSize=$( df --block-size=1 --output=used -- $mnt/boot | tail -n+2 )
53 | systemSize=$( df --block-size=1 --output=used -- $mnt/system | tail -n+2 )
54 | mkdir -p $out/dref-ext
55 | echo "\drefset{/${target}/${size}/store/used}{$storeSize}" >>$out/dref-ext/install-${target}-${size}.tex
56 | echo "\drefset{/${target}/${size}/boot/used}{$bootSize}" >>$out/dref-ext/install-${target}-${size}.tex
57 | echo "\drefset{/${target}/${size}/system/used}{$systemSize}" >>$out/dref-ext/install-${target}-${size}.tex
58 |
59 | mkdir -p $out/systems
60 | ${pkgs.python3.withPackages (pip3: (builtins.attrValues rec { inherit (pip3) python-magic pandas; }))}/bin/python3 ${dirname}/../lib/data/system-listing.py ${target}/${size} $mnt/system/nix/store $out/systems/${target}_${size}.csv || { echo python script failed with $? ; false ; }
61 | ''}
62 | echo
63 | echo "normal installation: ${collect-deps (toplevel system)}"
64 | echo "number of files: $( find ${collect-deps (toplevel system)} -type f | wc -l )"
65 | echo "number of dirs: $( find ${collect-deps (toplevel system)} -type d | wc -l )"
66 | echo "number of symlinks: $( find ${collect-deps (toplevel system)} -type l | wc -l )"
67 | echo "overall size: $( du --apparent-size --block-size=1 --summarize ${collect-deps (toplevel system)} | cut -f1 )"
68 | echo "thereof symlinks: $(( $( du --apparent-size --block-size=1 --summarize ${collect-deps (toplevel system)} | cut -f1 ) - $( du --apparent-size --block-size=1 --summarize ${(collect-deps (toplevel system)).clean} | cut -f1 ) ))"
69 | echo
70 | echo "merged components: ${merge-deps (toplevel system)}"
71 | echo "number of files: $( find ${merge-deps (toplevel system)} -type f | wc -l )"
72 | echo "number of dirs: $( find ${merge-deps (toplevel system)} -type d | wc -l )"
73 | echo
74 | '') ({ inherit minimal; } // (
75 | if pkgs.system == targetSystem then { inherit baseline; } else { } # Since cross-compiling of the baseline system fails (see above), only evaluate it when doing native compilation.
76 | )))}
77 | ''; }; in {
78 | scripts = { x64 = (mkExport "x64").script; rpi = (mkExport "rpi").script; };
79 | systems = { x64 = { inherit (mkExport "x64") baseline minimal; }; rpi = { inherit (mkExport "rpi") baseline minimal ; }; };
80 | }
81 | /*
82 | # (not sure that these work:)
83 | echo "Transfer list (old -> new): ${pkgs.runCommandLocal "transfer-list-old-new-${target}-${size}" { requiredSystemFeatures = [ "recursive-nix" ]; } ''
84 | before=$( ${pkgs.nix}/bin/nix --extra-experimental-features nix-command --offline path-info -r ${toplevel old} )
85 | after=$( ${pkgs.nix}/bin/nix --extra-experimental-features nix-command --offline path-info -r ${toplevel new} )
86 | <<< "$after"$'\n'"before"$'\n'"before" LC_ALL=C sort | uniq -u >$out
87 | ''}"
88 | echo "Transfer list (clb -> new): ${pkgs.runCommandLocal "transfer-list-clb-new-${target}-${size}" { requiredSystemFeatures = [ "recursive-nix" ]; } ''
89 | before=$( ${pkgs.nix}/bin/nix --extra-experimental-features nix-command --offline path-info -r ${toplevel clb} )
90 | after=$( ${pkgs.nix}/bin/nix --extra-experimental-features nix-command --offline path-info -r ${toplevel new} )
91 | <<< "$after"$'\n'"before"$'\n'"before" LC_ALL=C sort | uniq -u >$out
92 | ''}"
93 | */
94 |
--------------------------------------------------------------------------------
/checks/nix-copy-update.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # Update via `nix-store-send`
4 |
5 | An example of the send stream, substantially updating `nixpkgs`.
6 |
7 |
8 | ## Implementation
9 |
10 | ```nix
11 | #*/# end of MarkDown, beginning of Nix test:
12 | dirname: inputs: pkgs: let
13 | lib = inputs.self.lib.__internal__;
14 | inherit (lib.th.testing pkgs) toplevel override unpinInputs resize dropRefs time disk-usage frame nix-store-send run-in-vm;
15 |
16 | keep-nix = { pkgs, config, ... }: { # got to keep Nix (and it's DBs) for this
17 | nix.enable = lib.mkForce true;
18 | systemd.services.nix-daemon.path = lib.mkForce ([ config.nix.package.out pkgs.util-linux ] ++ lib.optionals config.nix.distributedBuilds [ pkgs.gzip ]); # remove »config.programs.ssh.package«
19 | fileSystems."/system" = { options = lib.mkForce [ "noatime" ]; }; # remove »ro«
20 | fileSystems."/nix/var" = { options = [ "bind" "rw" "private" ]; device = "/system/nix/var"; };
21 | environment.systemPackages = [ pkgs.nix ];
22 | };
23 |
24 | new = override (resize "4G" (unpinInputs inputs.self.nixosConfigurations."new:x64-minimal")) {
25 | wip.services.dropbear.rootKeys = lib.readFile "${inputs.self}/utils/res/ssh_testkey_1.pub";
26 | environment.etc.version.text = "new";
27 | imports = [ keep-nix ];
28 | };
29 | old = override (resize "4G" (unpinInputs inputs.self.nixosConfigurations."old:x64-minimal")) {
30 | environment.etc.version.text = "old";
31 | imports = [ keep-nix ];
32 | };
33 | clb = override old ({ config, ... }: {
34 | nixpkgs.overlays = lib.mkIf (!config.system.build?isVmExec) [ (final: prev: {
35 | glibc = prev.glibc.overrideAttrs (old: { trivialChange = 42 ; });
36 | libuv = prev.libuv.overrideAttrs (old: { doCheck = false; });
37 | }) ];
38 | system.nixos.tags = [ "glibc" ];
39 | environment.etc.version.text = lib.mkForce "clb";
40 | });
41 |
42 | systems = { inherit new old clb; };
43 |
44 | nix-copy-closure = before: after: ''
45 | ${frame ''echo "Update (${before} -> ${after})"''}
46 | echo "Update stream stats (${before} -> ${after})"
47 | cat ${nix-store-send systems.${before} systems.${after} ""}/stats
48 | echo "stream size: $(du --apparent-size --block-size=1 ${nix-store-send systems.${before} systems.${after} ""}/stream | cut -f1)"
49 | echo "stream path: ${nix-store-send systems.${before} systems.${after} ""}"
50 | ${run-in-vm systems.${before} { } (let
51 | in [
52 | { pre = ''
53 | $ssh -- '${disk-usage}'
54 | ( PATH=$PATH:${pkgs.openssh}/bin ; ${time "bash -c 'NIX_SSHOPTS=$sshOpts ${pkgs.nix}/bin/nix-copy-closure --to root@127.0.0.1 ${toplevel systems.${after}} 2>&1 | head -n1'"} )
55 | ''; test = ''
56 | echo "This is version $(cat /etc/version)" ; if [[ $(cat /etc/version) != ${before} ]] ; then echo "dang ..." ; false ; fi
57 | ${disk-usage}
58 | echo "total traffic of »nix-copy-closure«:" $( ${pkgs.inetutils}/bin/ifconfig --interface=ens3 | ${pkgs.gnugrep}/bin/grep -Pe 'RX bytes:' )
59 | ( set -x ; ${dropRefs (toplevel systems.${after})}/install-bootloader 1 )
60 | #( ${time "nix-store --gc"} ) 2>&1 | tail -n2 # delete the new version, to see how long GC takes (only the old version is registered with Nix and thus won't be GCed)
61 | ''; }
62 | { test = ''
63 | echo "This is version $(cat /etc/version)" ; if [[ $(cat /etc/version) != ${after} ]] ; then echo "dang ..." ; false ; fi
64 | ''; }
65 | { pre = ''
66 | ( set -x ; cat ${dropRefs (nix-store-send systems.${before} systems.${after} "")}/stream ) | $ssh -- '${time "nix-store-recv --only-read-input --status"}'
67 | ''; test = ''
68 | echo "total traffic of »nix-store-recv«:" $( ${pkgs.inetutils}/bin/ifconfig --interface=ens3 | ${pkgs.gnugrep}/bin/grep -Pe 'RX bytes:' )
69 | ''; }
70 | ])}
71 | '';
72 |
73 | in { script = '''
74 | echo "old system: ${toplevel old}"
75 | echo "new system: ${toplevel new}"
76 | echo "clb system: ${toplevel clb}"
77 | echo
78 | ${nix-copy-closure "old" "new"}
79 | echo
80 | ${nix-copy-closure "old" "clb"}
81 |
82 | ''; }
83 | #${frame "echo nix copy"}
84 | #${run-in-vm old { } (let
85 | #in [
86 | # { pre = ''
87 | # ( PATH=$PATH:${pkgs.openssh}/bin ; set -x ; NIX_SSHOPTS=$sshOpts ${pkgs.nix}/bin/nix --extra-experimental-features nix-command copy --no-check-sigs --to ssh://127.0.0.1 ${toplevel new} )
88 | # ''; test = ''
89 | # echo "This is version $(cat /etc/version)" ; if [[ $(cat /etc/version) != old ]] ; then echo "dang ..." ; false ; fi
90 | # ${disk-usage}
91 | # ( set -x ; ${dropRefs (toplevel new)}/install-bootloader 1 )
92 | # ''; }
93 | # { test = ''
94 | # echo "This is version $(cat /etc/version)" ; if [[ $(cat /etc/version) != new ]] ; then echo "dang ..." ; false ; fi
95 | # ''; }
96 | #])}
97 |
--------------------------------------------------------------------------------
/checks/reconfig-time.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # ...
4 |
5 |
6 | ## System Definitions
7 |
8 | ```nix
9 | #*/# end of MarkDown, beginning of Nix test:
10 | dirname: inputs: pkgs: let
11 | lib = inputs.self.lib.__internal__; test = lib.th.testing pkgs;
12 | toplevels = lib.mapAttrs (n: v: test.toplevel v);
13 | flatten = attrs: lib.fun.mapMerge (k1: v1: lib.fun.mapMerge (k2: v2: { "${k1}_${k2}" = v2; }) v1) attrs;
14 |
15 | prep-system = system: test.override (test.unpinInputs system) ({ config, ... }: {
16 | # »override« does not affect containers, stacking »overrideBase« (for some reason) only works on the repl, and targeting the containers explicitly also doesn't work ...
17 | specialisation.test1.configuration.th.target.containers.containers = lib.mkForce { };
18 |
19 | # Can't reliably find references in compressed files:
20 | boot.initrd.compressor = "cat";
21 | # This didn't quite work (still uses gzip):
22 | #th.minify.shrinkKernel.overrideConfig.KERNEL_ZSTD = "n";
23 | #th.minify.shrinkKernel.overrideConfig.HAVE_KERNEL_UNCOMPRESSED = "y";
24 | #th.minify.shrinkKernel.overrideConfig.KERNEL_UNCOMPRESSED = "y";
25 |
26 | boot.loader.timeout = lib.mkForce 0; # boot without waiting for user input
27 |
28 | disableModule."system/boot/kexec.nix" = lib.mkForce false;
29 | });
30 |
31 | mqttNixConfig = updated: {
32 | th.target.containers.enable = true;
33 | th.target.containers.containers.mosquitto = {
34 | modules = [ ({ pkgs, ... }: {
35 |
36 | th.minify.staticUsers = pkgs.system != "aarch64-linux"; # Without this, systemd inside the container fails to start the service (exits with »226/NAMESPACE«).
37 |
38 | services.mosquitto.enable = true;
39 | services.mosquitto.listeners = [ ]; # (bugfix)
40 | services.mosquitto.package = lib.mkIf updated (pkgs.mosquitto.overrideAttrs (old: rec { # from v2.0.14
41 | pname = "mosquitto"; version = "2.0.15"; src = pkgs.fetchFromGitHub { owner = "eclipse"; repo = pname; rev = "v${version}"; sha256 = "sha256-H2oaTphx5wvwXWDDaf9lLSVfHWmb2rMlxQmyRB4k5eg="; };
42 | }));
43 | }) ];
44 | };
45 | th.target.containers.containers.zigbee2mqtt = {
46 | modules = [ ({ pkgs, ... }: {
47 |
48 | th.minify.staticUsers = pkgs.system != "aarch64-linux";
49 |
50 | services.zigbee2mqtt.enable = true;
51 | services.zigbee2mqtt.package = lib.mkIf updated (pkgs.callPackage "${inputs.new-nixpkgs}/pkgs/servers/zigbee2mqtt/default.nix" { }); # 1.25.0 -> 1.25.2 (it's an npm package with old packaging, and thus very verbose to update explicitly)
52 |
53 | systemd.services.zigbee2mqtt.serviceConfig.Restart = lib.mkForce "no";
54 | }) ];
55 | sshKeys.root = [ (lib.readFile "${inputs.self}/utils/res/ssh_testkey_2.pub") ];
56 | };
57 | };
58 |
59 | minimal = {
60 | # »old« on the iPI for manual testing:
61 | rpi = test.override (prep-system inputs.self.nixosConfigurations."old:rpi-minimal") {
62 | th.hermetic-bootloader.uboot.env.bootdelay = lib.mkForce "0";
63 | #nixpkgs.overlays = [ (final: prev: { systemd = (prev.systemd.override { withAnalyze = true; }); }) ];
64 | wip.services.dropbear.rootKeys = lib.readFile "${inputs.self}/utils/res/ssh_testkey_1.pub";
65 | };
66 | };
67 |
68 | systems = {
69 | withUpdate = (lib.mapAttrs (k: system: test.override system ({ config, ... }: {
70 | specialisation.mqtt-old.configuration = mqttNixConfig false;
71 | specialisation.mqtt-new.configuration = mqttNixConfig true;
72 | system.nixos.tags = [ "withUpdate" ];
73 | })) minimal);
74 | };
75 |
76 | installers = lib.mapAttrs (k1: v: lib.mapAttrs (k2: system: pkgs.writeShellScriptBin "scripts-${k2}-${k1}" ''exec ${lib.installer.writeSystemScripts { inherit system pkgs; }} "$@"'') v) systems;
77 |
78 | in { inherit systems installers; script = ''
79 | echo 'no-op' ; exit true
80 | ''; }
81 | /*# end of nix
82 | ```
83 |
84 |
85 | ## System Testing
86 |
87 | Boot the x64 version in qemu:
88 | ```bash
89 | nix run .'#'checks.x86_64-linux.nix_store_send.passthru.installers.withMqtt.old -- run-qemu --efi --install=always
90 | nix run .'#'checks.x86_64-linux.nix_store_send.passthru.installers.withOci.old -- run-qemu --efi --install=always
91 |
92 | # no completion, history or editing, so here are all the commands I used:
93 | next-boot mqtt && reboot
94 | systemctl status container@mosquitto.service
95 | systemctl cat container@mosquitto.service
96 | systemctl restart container@mosquitto.service
97 | journalctl -b -f -u container@mosquitto.service
98 | journalctl -b -f --lines=80 -u container@mosquitto.service
99 | machinectl shell mosquitto
100 | systemctl status
101 | systemctl list-units --failed
102 | systemctl status mosquitto.service
103 | systemctl cat mosquitto.service
104 | journalctl -b -f -u mosquitto.service
105 |
106 | systemctl status container@zigbee2mqtt.service
107 | systemctl restart container@zigbee2mqtt.service
108 | journalctl -b -f -u container@zigbee2mqtt.service
109 | journalctl -b -f --lines=80 -u container@zigbee2mqtt.service
110 | machinectl shell zigbee2mqtt
111 | systemctl status zigbee2mqtt.service
112 | systemctl restart zigbee2mqtt.service
113 | systemctl cat zigbee2mqtt.service
114 | journalctl -b -f -u zigbee2mqtt.service
115 | journalctl -b -f --lines=80 -u zigbee2mqtt.service
116 | systemctl show --property=StateChangeTimestampMonotonic --value zigbee2mqtt.service
117 | systemctl show --property=StateChangeTimestamp --value zigbee2mqtt.service
118 | ```
119 |
120 | Install e.g the rPI version of a system to a microSD card (on an x64 system):
121 | ```bash
122 | nix run .'#'checks.x86_64-linux.nix_store_send.passthru.installers.withMqtt.rpi -- install-system /dev/mmcblk0
123 |
124 | ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i utils/res/ssh_testkey_1 root@192.168.8.85
125 | next-boot mqtt && reboot
126 | systemctl show --property=StateChangeTimestampMonotonic --value container@zigbee2mqtt.service
127 | machinectl --quiet shell zigbee2mqtt /run/current-system/sw/bin/systemctl show --property=ActiveEnterTimestampMonotonic --value zigbee2mqtt.service
128 |
129 | systemctl show --property=StateChangeTimestamp --value container@zigbee2mqtt.service
130 | systemctl show --property=StateChangeTimestampMonotonic --value container@mosquitto.service
131 | systemctl show --property=ActiveEnterTimestampMonotonic --value container@zigbee2mqtt.service
132 | systemctl show --property=ActiveEnterTimestampMonotonic --value container@mosquitto.service
133 | machinectl shell zigbee2mqtt
134 | machinectl shell mosquitto
135 | systemctl show --property=StateChangeTimestampMonotonic --value zigbee2mqtt.service
136 | systemctl show --property=ActiveEnterTimestampMonotonic --value multi-user.target
137 | ```
138 |
139 | And here is the semi-automated boot performance test:
140 | ```bash
141 | # Install the system to a microSD card:
142 | nix run .'#'checks.x86_64-linux.nix_store_send.passthru.installers.withUpdate.rpi -- install-system /dev/mmcblk0 # (adjust the /dev/* path as needed)
143 | #nix run .'#'checks.x86_64-linux.nix_store_send.passthru.installers.withUpdate.x64:rpi -- install-system /dev/mmcblk0 # or this for the cross-compiled version (not recommended, also does not compile ...)
144 | # Then boot the system on a rPI4, and make sure that »$ssh« works to log in and that the PI logs to this host's »/dev/ttyUSB0«:
145 | mkdir -p out/logs ; LC_ALL=C nix-shell -p openssh -p tio -p moreutils --run bash # open a shell with the required programs, then in that shell:
146 | ssh='ssh -o LogLevel=QUIET -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i utils/res/ssh_testkey_1 root@192.168.8.85'
147 | function wait4boot () { for i in $(seq 20) ; do sleep 1 ; if $ssh -- true &>/dev/null ; then return 0 ; fi ; printf . ; done ; return 1 ; }
148 | wait4boot
149 | ( set -x ; logPid= ; trap '[[ ! $logPid ]] || kill $logPid' EXIT ; for run in $(seq 20) ; do
150 | target=mqtt-new ; (( run % 2 )) || target=mqtt-old
151 | [[ ! $logPid ]] || kill $logPid ; logPid=
152 | log=out/logs/reboot-$(( run - 1 ))-to-$target.txt ; rm -f $log
153 | sleep infinity | tio --timestamp --timestamp-format 24hour-start /dev/ttyUSB0 --log --log-strip --log-file $log >/dev/null & logPid=$!
154 | $ssh -- "echo 'next-boot $target' >/dev/ttyS1 ; set -x ; next-boot $target && reboot" || true
155 | wait4boot || exit ; echo
156 | done ) || { echo 'oh noo' ; false ; }
157 | ```
158 | `LC_ALL=C nix-shell -p openssh -p tio -p moreutils --run 'tio --timestamp --timestamp-format 24hour-start /dev/ttyUSB0' # exit with ctrl-t q`
159 |
160 | Test to "reboot" with kexec (but that does not work on the iPI):
161 | ```bash
162 | # x64
163 | type=bzImage
164 | linux=/nix/store/s9x83hgh7bgsz0kyfg3jvb39rj678k5n-linux-5.15.36/bzImage
165 | initrd=/nix/store/sdvc543ysdw3720cnmr44hcxhkm9gv4h-initrd-linux-5.15.36/initrd
166 | options='init=/nix/store/4m8v2bjbpdwnbfgwrypr8rnkia919hnk-nixos-system-x64-minimal-test1-withMqtt-22.05.20220505.c777cdf/init boot.shell_on_fail console=ttyS0 panic=10 hung_task_panic=1 hung_task_timeout_secs=30 loglevel=4'
167 |
168 | # rpi
169 | type=Image
170 | linux=/nix/store/1qqjvky6dla1rvr2aw1bvclwzr50byi7-linux-5.15.36/Image
171 | initrd=/nix/store/2xhayk1adfajwcbn1zzdmnxpv1mc1blb-initrd-linux-5.15.36/initrd
172 | options='init=/nix/store/w7w00z062llgzcxai12zwg7psrfn1zzp-nixos-system-rpi-minimal-test1-withOci-22.05.20220505.c777cdf/init boot.shell_on_fail console=tty1 panic=10 hung_task_panic=1 hung_task_timeout_secs=30 loglevel=4'
173 | fdtdir=/nix/store/f7647aw7vpvjiw7bpz7h47wgigjfm592-device-tree-overlays
174 |
175 | kexec --load --type=$type $linux --initrd=$initrd --command-line="$options" && systemctl kexec
176 | ```
177 |
178 | */
179 |
--------------------------------------------------------------------------------
/checks/stream-rsync.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # `nix-store-send` Rsync Transfer
4 |
5 | Testing how much could be gained by transferring the send stream differentially via rsync.
6 |
7 | Notes:
8 | * The client could in principle reconstruct the (null -> old) stream, though one would not want to do that explicitly.
9 | * The files in both streams are sorted alphabetically by their hashes, meaning they are essentially shuffled, since only changed files are included in the (old -> new) stream.
10 |
11 | Rsync works by splitting the old version of the target file into fixed-size chunks, and building a cheap hash sum for each. On the senders side, it then uses a rolling algorithm to compute the same hash sum at every byte offset in the new file. If matches are found with the old chunks, they are verified with a stronger hash sum, and if they match, the chunk can be reused from the old file, instead of transferring it.
12 | The advantage of checking every position in the new file is that chunks will be found even if they moved.
13 |
14 | Other algorithms (like `casync`) divide files into chunks wherever they find certain byte sequences (essentially by computing efficient rolling hash sums over short sequences of bytes at every byte offset, and then chunking when the hash has a certain form). They then hash the chunks with a stronger algorithm and only transfers those chunks that don't exist on the target yet.
15 | This also ensures that chunks that move in their entirety, e.g. because a file moved within the stream, will be recognized just the same.
16 |
17 | The problem with shuffling the files, however, is that any chunk that spanned across the divide of two files will not occur anymore, even if neither file changed in the regions within the chunk, as it is extremely unlikely that the same two files will again occur as neighbors in that order.
18 | How much that matters depends on the number of files and the average size of the chunks, since every file invalidates one potentially unchanged chunk.
19 |
20 |
21 | ## Implementation
22 |
23 | ```nix
24 | #*/# end of MarkDown, beginning of Nix test:
25 | dirname: inputs: pkgs: let
26 | lib = inputs.self.lib.__internal__; test = lib.th.testing pkgs;
27 |
28 | remove-containers = system: test.override system { # »override« (for some reason) does not affect containers, and targeting it explicitly also doesn't work ...
29 | specialisation.test1.configuration.th.target.containers.containers = lib.mkForce { };
30 | };
31 |
32 | old = remove-containers (test.unpinInputs inputs.self.nixosConfigurations."old:x64-minimal");
33 | new = remove-containers (test.unpinInputs inputs.self.nixosConfigurations."new:x64-minimal");
34 | clb = test.override new ({ config, ... }: { nixpkgs.overlays = lib.mkIf (!config.system.build?isVmExec) [ (final: prev: {
35 | glibc = prev.glibc.overrideAttrs (old: { trivialChange = 42 ; });
36 | libuv = prev.libuv.overrideAttrs (old: { doCheck = false; });
37 | }) ]; system.nixos.tags = [ "glibc" ]; });
38 |
39 | in { script = '''
40 | # Using »--dry-run« invalidates the measurement, so the old file needs to be copied.
41 |
42 | ( ${test.frame "echo 'real update'"} ) 1>&2
43 | echo -n "\addplot coordinates {" > plotUp
44 | echo -n "\addplot coordinates {" > plotDt
45 | echo -n "\addplot coordinates {" > plotDw
46 |
47 | for size in 8 16 32 64 128 256 512 1k 2k 4k 8k 16k 32k 64k 128k ; do
48 | echo $'\n'"Differential rsync transfer of update stream onto initial image (with names, block size $size)" 1>&2
49 | rm -rf ./prev ; cp ${test.nix-store-send null old ""}/stream ./prev
50 | data=$( ${pkgs.rsync}/bin/rsync --no-whole-file --stats --block-size=$size ${test.nix-store-send old new ""}/stream ./prev )
51 | <<<"$data" grep -Pe 'Total|data' 1>&2
52 |
53 | data=$( <<<"$data" sed s/,//g )
54 | echo -n " ($size,$( <<<"$data" grep -oPe 'Total bytes sent: \K\d+' ))" >>plotUp
55 | echo -n " ($size,$( <<<"$data" grep -oPe 'Literal data: \K\d+' ))" >>plotDt
56 | echo -n " ($size,$( <<<"$data" grep -oPe 'Total bytes received: \K\d+' ))" >>plotDw
57 | done
58 |
59 | echo " }; % real up" >>plotUp
60 | echo " }; % real new" >>plotDt
61 | echo " }; % real down" >>plotDw
62 | if [[ ,''${args[plot]:-}, == *,1,* ]] ; then cat plot* ; fi
63 |
64 | echo $'\n'"Differential rsync transfer of update stream onto initial image (without names, block size 512)" 1>&2
65 | rm -rf ./prev ; cp ${test.nix-store-send null old "--no-names"}/stream ./prev
66 | ${pkgs.rsync}/bin/rsync --no-whole-file --stats --block-size=700 ${test.nix-store-send old new "--no-names"}/stream ./prev | grep -Pe 'Total|data' 1>&2
67 |
68 | ( echo ; echo ) 1>&2
69 | ( ${test.frame "echo 'invalidate glibc'"} ) 1>&2
70 | echo -n "\addplot coordinates {" > plotUp
71 | echo -n "\addplot coordinates {" > plotDt
72 | echo -n "\addplot coordinates {" > plotDw
73 |
74 | for size in 8 16 32 64 128 256 512 1k 2k 4k 8k 16k 32k 64k 128k ; do
75 | echo $'\n'"Differential rsync transfer of update stream onto initial image (with names, block size $size)" 1>&2
76 | rm -rf ./prev ; cp ${test.nix-store-send null new ""}/stream ./prev
77 | data=$( ${pkgs.rsync}/bin/rsync --no-whole-file --stats --block-size=$size ${test.nix-store-send new clb ""}/stream ./prev )
78 | <<<"$data" grep -Pe 'Total|data' 1>&2
79 |
80 | data=$( <<<"$data" sed s/,//g )
81 | echo -n " ($size,$( <<<"$data" grep -oPe 'Total bytes sent: \K\d+' ))" >>plotUp
82 | echo -n " ($size,$( <<<"$data" grep -oPe 'Literal data: \K\d+' ))" >>plotDt
83 | echo -n " ($size,$( <<<"$data" grep -oPe 'Total bytes received: \K\d+' ))" >>plotDw
84 | done
85 |
86 | echo " }; % glibc up" >>plotUp
87 | echo " }; % glibc new" >>plotDt
88 | echo " }; % glibc down" >>plotDw
89 | if [[ ,''${args[plot]:-}, == *,2,* ]] ; then cat plot* ; fi
90 |
91 | echo $'\n'"Differential rsync transfer of update stream onto initial image (without names, block size 512)" 1>&2
92 | rm -rf ./prev ; cp ${test.nix-store-send null new "--no-names"}/stream ./prev
93 | ${pkgs.rsync}/bin/rsync --no-whole-file --stats --block-size=700 ${test.nix-store-send new clb "--no-names"}/stream ./prev | grep -Pe 'Total|data' 1>&2
94 | ''; }
95 |
--------------------------------------------------------------------------------
/checks/stream-size.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # `nix-store-send` Stream Size
4 |
5 | An example of the send stream, substantially updating `nixpkgs`.
6 |
7 |
8 | ## Implementation
9 |
10 | ```nix
11 | #*/# end of MarkDown, beginning of Nix test:
12 | dirname: inputs: pkgs: let
13 | lib = inputs.self.lib.__internal__;
14 | test = lib.th.testing pkgs;
15 |
16 | new = test.unpinInputs inputs.self.nixosConfigurations."new:x64-minimal";
17 | old = test.unpinInputs inputs.self.nixosConfigurations."old:x64-minimal";
18 |
19 | in { script = '''
20 | echo "Update stream stats (old -> new)"
21 | cat ${test.nix-store-send old new ""}/stats
22 | echo
23 | echo "Initial image (stream null -> old)"
24 | cat ${test.nix-store-send null old ""}/stats
25 | ''; }
26 |
--------------------------------------------------------------------------------
/checks/stream-update.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # Update via `nix-store-send`
4 |
5 | An example of the send stream, substantially updating `nixpkgs`.
6 |
7 |
8 | ## Notes
9 |
10 | * The closure installed ("installing system") is different from the "old system" (and bigger) because the former also includes the test scripts, and some dependencies they pull in
11 | * "installing system" does completely contain "old system", though, so this should not matter.
12 | * Actually, it's odd that applying the stream decreases the disk utilization. Anything in "installing system" and not in "old system" should remain.
13 |
14 |
15 | ## Implementation
16 |
17 | ```nix
18 | #*/# end of MarkDown, beginning of Nix test:
19 | dirname: inputs: pkgs: let
20 | lib = inputs.self.lib.__internal__;
21 | inherit (lib.th.testing pkgs) toplevel override unpinInputs resize dropRefs time disk-usage nix-store-send run-in-vm;
22 |
23 | new = override (resize "512M" (unpinInputs inputs.self.nixosConfigurations."new:x64-minimal")) {
24 | wip.services.dropbear.rootKeys = lib.readFile "${inputs.self}/utils/res/ssh_testkey_1.pub";
25 | environment.etc.version.text = "new";
26 | };
27 | old = override (resize "512M" (unpinInputs inputs.self.nixosConfigurations."old:x64-minimal")) {
28 | environment.etc.version.text = "old";
29 | };
30 |
31 | # * read-input: Receive instructions and new files
32 | # * restore-links: Restore hardlinks to existing files
33 | # * install: Install new components to the store
34 | # * save-links: Save hardlinks to new files
35 | # * delete: Remove old components from the store
36 | # * prune-links: Prune hardlinks to files no longer needed
37 | # * cleanup: Remove instructions and temporary hardlinks
38 | update-cmds = [
39 | { pre = ''
40 | $ssh -- '${disk-usage}'
41 | ( set -x ; cat ${dropRefs (nix-store-send old new "")}/stream ) | $ssh -- '${time "nix-store-recv --only-read-input --status"}'
42 |
43 | ''; test = ''
44 | echo "total traffic:" $( ${pkgs.inetutils}/bin/ifconfig --interface=ens3 | ${pkgs.gnugrep}/bin/grep -Pe 'RX bytes:' )
45 |
46 | echo "This is version $(cat /etc/version)" ; if [[ $(cat /etc/version) != old ]] ; then echo "dang ..." ; false ; fi
47 | ${disk-usage}
48 |
49 | ${time "nix-store-recv --only-restore-links"}
50 | ${time "nix-store-recv --only-install"}
51 | ${time "nix-store-recv --only-save-links"}
52 |
53 | ( set -x ; ${dropRefs (toplevel new)}/install-bootloader 1 )
54 | ''; }
55 | { test = ''
56 | echo "This is version $(cat /etc/version)" ; if [[ $(cat /etc/version) != new ]] ; then echo "dang ..." ; false ; fi
57 | ${disk-usage}
58 |
59 | ${time "nix-store-recv --only-delete"}
60 | ${time "nix-store-recv --only-prune-links"}
61 | ${time "nix-store-recv --only-cleanup"}
62 |
63 | ${disk-usage}
64 | ''; }
65 | { pre = ''
66 | $ssh -- '${disk-usage}'
67 | ( set -x ; cat ${dropRefs (nix-store-send old new "")}/stream ) | $ssh -- '${time "nix-store-recv --status"}'
68 | $ssh -- '${disk-usage}'
69 |
70 | ''; test = ''
71 | echo "total traffic:" $( ${pkgs.inetutils}/bin/ifconfig --interface=ens3 | ${pkgs.gnugrep}/bin/grep -Pe 'RX bytes:' )
72 | echo "This is version $(cat /etc/version)" ; if [[ $(cat /etc/version) != new ]] ; then echo "dang ..." ; false ; fi
73 | ''; }
74 | ];
75 |
76 | in { script = '''
77 | echo "old system: ${toplevel old}"
78 | echo "new system: ${toplevel new}"
79 | echo "Update stream stats (old -> new)"
80 | cat ${nix-store-send old new ""}/stats
81 | echo "stream size: $(du --apparent-size --block-size=1 ${nix-store-send old new ""}/stream | cut -f1)"
82 | echo "stream path: ${nix-store-send old new ""}"
83 | echo
84 | ${run-in-vm old { } update-cmds}
85 | echo
86 | ${run-in-vm old { override = {
87 | installer.commands.postInstall = ''true || rm -rf $mnt/system/nix/store/.links'';
88 | }; } update-cmds}
89 | ''; }
90 |
--------------------------------------------------------------------------------
/checks/test.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # Testing the Tests
4 |
5 | ## Implementation
6 |
7 | ```nix
8 | #*/# end of MarkDown, beginning of Nix test:
9 | dirname: inputs: pkgs: let
10 | lib = inputs.self.lib.__internal__;
11 | inherit (lib.th.testing pkgs) toplevel override unpinInputs measure-installation nix-store-send run-in-vm;
12 |
13 | new = unpinInputs inputs.self.nixosConfigurations."new:x64-minimal";
14 | old = unpinInputs inputs.self.nixosConfigurations."old:x64-minimal";
15 |
16 | frame = script: ''
17 | echo "================="
18 | ${script}
19 | echo "================="
20 | '';
21 |
22 | in { script = '''
23 |
24 | ${run-in-vm new { } (let
25 | set-the-bar = { pre = "$ssh -- 'echo foo >/tmp/bar'"; };
26 | try-the-bar = { test = frame "if [[ $(cat /tmp/bar) == foo ]] ; then echo yay ; else echo 'oh no!' ; false ; fi "; };
27 | in [
28 | (set-the-bar // try-the-bar)
29 | (set-the-bar // { test = "true"; })
30 | (try-the-bar)
31 | (frame "uname -a")
32 | (frame ''echo "this one fails" ; false'')
33 | (frame ''echo "this shouldn't run"'')
34 | ])}
35 |
36 | ''; }
37 |
--------------------------------------------------------------------------------
/checks/update-glibc.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # Update `glibc`
4 |
5 | ... with a trivial change to see what update stream that creates.
6 |
7 |
8 | ## Implementation
9 |
10 | ```nix
11 | #*/# end of MarkDown, beginning of Nix test:
12 | dirname: inputs: pkgs: let
13 | lib = inputs.self.lib.__internal__;
14 | inherit (lib.th.testing pkgs) toplevel override unpinInputs measure-installation nix-store-send;
15 |
16 | old = override (unpinInputs inputs.self.nixosConfigurations."old:x64-minimal") {
17 | # »override« (for some reason) does not affect containers, and targeting it explicitly also doesn't work ...
18 | specialisation.test1.configuration.th.target.containers.containers = lib.mkForce { };
19 | };
20 | new = override old ({ config, ... }: { nixpkgs.overlays = lib.mkIf (!config.system.build?isVmExec) [ (final: prev: {
21 | glibc = prev.glibc.overrideAttrs (old: { trivialChange = 42 ; });
22 | libuv = prev.libuv.overrideAttrs (old: { doCheck = false; });
23 | }) ]; system.nixos.tags = [ "glibc" ]; });
24 | #specialisation.test1.configuration.th.target.containers.containers.native.modules = [ (_: config) ]; # this creates an infinite recursion
25 |
26 | in { script = ''
27 | echo "Update stream when trivially changing glibc"
28 | : old ${toplevel old} : new ${toplevel new}
29 | cat ${nix-store-send old new ""}/stats
30 | ''; }
31 |
--------------------------------------------------------------------------------
/containers/foreign.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # Non-NixOS HTTP Container
4 |
5 | TODO: this is super outdated and uses Nix-antipatterns.
6 | See `mqttOciConfig` in [](../checks/nix_store_send.nix.md) for a working example of including ICO containers.
7 |
8 |
9 | ## Implementation
10 |
11 | ```nix
12 | #*/# end of MarkDown, beginning of NixOS config:
13 | dirname: inputs: let lib = inputs.self.lib.__internal__; in pkgs: let
14 | in {
15 |
16 | rootFS = [
17 | # How to get a rootfs layer:
18 | # First, find or build an appropriate image:
19 | # $ printf 'FROM ubuntu:20.04 \nRUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y ubuntu-server busybox' | docker build --pull -t local/ubuntu-server -
20 | # Then (fetch and) unpack it and add it to the nix store (podman can also do this):
21 | # $ ( image=local/ubuntu-server ; set -eux ; id=$(docker container create ${image/_\//}) ; trap "docker rm --volumes $id" EXIT ; rm -rf ../images/$image ; mkdir -p ../images/$image ; cd ../images/$image ; docker export $id | pv | tar x ; echo "$(nix eval --impure --expr '"${./.}"'):$(nix hash path --type sha256 .)" )
22 | # If »$image« to remains constant or is reproducible, then (and only then) this will reproduce the same (content-addressed) store path.
23 | # If this store path does not exist locally (e.g. because it can't be reproduced), then both evaluation and building will fail, but only if the path is actually being evaluated.
24 | # The layer could also be specified with any of Nix(OS)' fetchers (if it is hosted somewhere nix can reach and authenticate against).
25 | # Adding layers as flake inputs is not a good idea, since those will always be fetched even when they are not being accessed, which would be the case for all layers from older builds when chaining previous builds as flake input.
26 | "/nix/store/3387hzbl34z2plj3cvfghp4jlvgc2jn5-ubuntu-server:sha256-PPVOPyQGbkgoFkERodVcEyTI84/rG4MhjIuPcjHll98="
27 | #"/nix/store/plqajm9ma7by4h0wmz35x6gkqgbwbzp5-android-setup:sha256-+MjVIiL36rQ9ldJa7HyOn3AXgSprZeWOCfKKU4knWa0=" # A path where the hashes match, but that doesn't exist. Creating it as empty dir does not make a difference.
28 |
29 | # This image's systemd starts just fine, but any »machinectl shell« results in »systemd-coredump[...]: [🡕] Process ... ((sh)) of user 0 dumped core.«
30 | # (lib.th.extract-docker-image pkgs (pkgs.dockerTools.pullImage {
31 | # imageName = "busybox"; finalImageTag = "1.34.1-glibc"; imageDigest = "sha256:5b1ae0bda2e3beb70cb3884c05c2c0d3d542db2fa4ce27fc191e84091361d6eb"; sha256 = "1nw8r3yl8bxzafaqi1gb2rf6f2b2hl39cdl7dgs6f467p38sh9dh";
32 | # }))
33 | # (lib.th.extract-docker-image pkgs (pkgs.dockerTools.pullImage {
34 | # imageName = "jrei/systemd-ubuntu"; finalImageTag = "20.04"; imageDigest = "sha256:a54deb990d26b6bc7e3b2ab907a0dbb3e45f506a367794a4b6df545bfe41cfed"; sha256 = "0ywa0yrqgs2w5zk4f8rd42b8d6bdywniah6nnkhqmkpy2w8fdi78";
35 | # }))
36 |
37 | (pkgs.runCommandLocal "layer-prepare-systemd" { } ''
38 | mkdir -p $out
39 | ln -sT /usr/lib/systemd/systemd $out/init
40 |
41 | mkdir -p $out/etc/systemd/system/
42 | printf '[Service]\nExecStart=/bin/busybox httpd -f -v -p 8001 -h /web-root/\n' > $out/etc/systemd/system/http.service
43 | mkdir -p $out/etc/systemd/system/multi-user.target.wants
44 | ln -sT ../http.service $out/etc/systemd/system/multi-user.target.wants/http.service
45 | mkdir -p $out/web-root/ ; printf "\nI'm not a NixOS container, but I pretend to be\n" > $out/web-root/index.html
46 | '')
47 | ];
48 |
49 |
50 | }
51 |
--------------------------------------------------------------------------------
/containers/native.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # NixOS-Native HTTP Container
4 |
5 | ## Implementation
6 |
7 | ```nix
8 | #*/# end of MarkDown, beginning of NixOS config:
9 | dirname: inputs: let lib = inputs.self.lib.__internal__; in pkgs: let
10 | in {
11 |
12 | modules = [ ({ config, pkgs, ... }: {
13 |
14 | systemd.services.http = {
15 | serviceConfig.ExecStart = "${pkgs.busybox}/bin/httpd -f -v -p 8000 -h ${pkgs.writeTextDir "index.html" ''
16 |
17 | I'm running inside a NixOS native container
18 | ''}";
19 | wantedBy = [ "multi-user.target" ];
20 | serviceConfig.Restart = "always"; serviceConfig.RestartSec = 5; unitConfig.StartLimitIntervalSec = 0;
21 | serviceConfig.DynamicUser = "yes";
22 | };
23 |
24 | }) ];
25 |
26 |
27 | }
28 |
--------------------------------------------------------------------------------
/flake.lock:
--------------------------------------------------------------------------------
1 | {
2 | "nodes": {
3 | "config": {
4 | "locked": {
5 | "dir": "example/defaultConfig",
6 | "lastModified": 1701430145,
7 | "narHash": "sha256-JQIFcFZID/uWrdriInXsfuAxJlGaQksQ5bMsYP18wjw=",
8 | "owner": "NiklasGollenstede",
9 | "repo": "nixos-installer",
10 | "rev": "9e02cd59520c5ac90daeb987f2a369d6a5c2c3e3",
11 | "type": "github"
12 | },
13 | "original": {
14 | "dir": "example/defaultConfig",
15 | "owner": "NiklasGollenstede",
16 | "repo": "nixos-installer",
17 | "type": "github"
18 | }
19 | },
20 | "config_2": {
21 | "locked": {
22 | "dir": "example/defaultConfig",
23 | "lastModified": 1701428487,
24 | "narHash": "sha256-d7mXqyoMWC0eG81y0JEWf91ZXnW8YGWnpq72sitTASc=",
25 | "owner": "NiklasGollenstede",
26 | "repo": "nix-wiplib",
27 | "rev": "1149b6853ebaa6155cdfdaededd3c2119b01669a",
28 | "type": "github"
29 | },
30 | "original": {
31 | "dir": "example/defaultConfig",
32 | "owner": "NiklasGollenstede",
33 | "repo": "nix-wiplib",
34 | "type": "github"
35 | }
36 | },
37 | "functions": {
38 | "inputs": {
39 | "nixpkgs": [
40 | "nixpkgs"
41 | ]
42 | },
43 | "locked": {
44 | "lastModified": 1706557688,
45 | "narHash": "sha256-UXylOaV6fvJq5ph0KTf93nk9v37uezMJT8nfKObQLRo=",
46 | "owner": "NiklasGollenstede",
47 | "repo": "nix-functions",
48 | "rev": "c176ccb324740182c9d9d3f032cae5146254f5ce",
49 | "type": "github"
50 | },
51 | "original": {
52 | "owner": "NiklasGollenstede",
53 | "repo": "nix-functions",
54 | "type": "github"
55 | }
56 | },
57 | "installer": {
58 | "inputs": {
59 | "config": "config",
60 | "functions": [
61 | "functions"
62 | ],
63 | "nixpkgs": [
64 | "nixpkgs"
65 | ]
66 | },
67 | "locked": {
68 | "lastModified": 1701714601,
69 | "narHash": "sha256-lc1+qEVTYSfxGG7Q0ZG8E1Y/JT8hQ82TouyYXrTuxuY=",
70 | "owner": "NiklasGollenstede",
71 | "repo": "nixos-installer",
72 | "rev": "6f44532be5e2561f2772d056bdaeba6c39ecca9f",
73 | "type": "github"
74 | },
75 | "original": {
76 | "owner": "NiklasGollenstede",
77 | "repo": "nixos-installer",
78 | "type": "github"
79 | }
80 | },
81 | "lowdown-src": {
82 | "flake": false,
83 | "locked": {
84 | "lastModified": 1633514407,
85 | "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
86 | "owner": "kristapsdz",
87 | "repo": "lowdown",
88 | "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
89 | "type": "github"
90 | },
91 | "original": {
92 | "owner": "kristapsdz",
93 | "repo": "lowdown",
94 | "type": "github"
95 | }
96 | },
97 | "new-nixpkgs": {
98 | "locked": {
99 | "lastModified": 1659052185,
100 | "narHash": "sha256-TUbwbzCbprtWB9EtXPM52cWuKETuCV3H+cMXjLRbwTw=",
101 | "owner": "NixOS",
102 | "repo": "nixpkgs",
103 | "rev": "9370544d849be8a07193e7611d02e6f6f1b10768",
104 | "type": "github"
105 | },
106 | "original": {
107 | "owner": "NixOS",
108 | "repo": "nixpkgs",
109 | "rev": "9370544d849be8a07193e7611d02e6f6f1b10768",
110 | "type": "github"
111 | }
112 | },
113 | "nix": {
114 | "inputs": {
115 | "lowdown-src": "lowdown-src",
116 | "nixpkgs": [
117 | "nixpkgs"
118 | ],
119 | "nixpkgs-regression": [
120 | "nixpkgs"
121 | ]
122 | },
123 | "locked": {
124 | "lastModified": 1674123130,
125 | "narHash": "sha256-zVrzfz3wKNUYo/Qv9ts1B/ECWtKTreP/Pl6rb2fkgmc=",
126 | "owner": "NixOS",
127 | "repo": "nix",
128 | "rev": "38b90c618f5ce4334b89c0124c5a54f339a23db6",
129 | "type": "github"
130 | },
131 | "original": {
132 | "owner": "NixOS",
133 | "repo": "nix",
134 | "rev": "38b90c618f5ce4334b89c0124c5a54f339a23db6",
135 | "type": "github"
136 | }
137 | },
138 | "nixos-hardware": {
139 | "locked": {
140 | "lastModified": 1702453208,
141 | "narHash": "sha256-0wRi9SposfE2wHqjuKt8WO2izKB/ASDOV91URunIqgo=",
142 | "owner": "NixOS",
143 | "repo": "nixos-hardware",
144 | "rev": "7763c6fd1f299cb9361ff2abf755ed9619ef01d6",
145 | "type": "github"
146 | },
147 | "original": {
148 | "owner": "NixOS",
149 | "ref": "master",
150 | "repo": "nixos-hardware",
151 | "type": "github"
152 | }
153 | },
154 | "nixos-imx": {
155 | "inputs": {
156 | "functions": [
157 | "functions"
158 | ],
159 | "installer": [
160 | "installer"
161 | ],
162 | "nixpkgs": [
163 | "nixpkgs"
164 | ],
165 | "wiplib": [
166 | "wiplib"
167 | ]
168 | },
169 | "locked": {
170 | "lastModified": 1690155825,
171 | "narHash": "sha256-4Hvo9GdmKGLOxaIAolErhJrx2bc2cH5K9CLS85ahBaw=",
172 | "owner": "NiklasGollenstede",
173 | "repo": "nixos-imx",
174 | "rev": "1d42b7865e4036a3a0a3115ed47e91d8d7049064",
175 | "type": "github"
176 | },
177 | "original": {
178 | "owner": "NiklasGollenstede",
179 | "repo": "nixos-imx",
180 | "type": "github"
181 | }
182 | },
183 | "nixpkgs": {
184 | "locked": {
185 | "lastModified": 1701802827,
186 | "narHash": "sha256-wTn0lpV75Uv6tU6haEypNsmnJJPb0hpaMIy/4uf5AiQ=",
187 | "owner": "NixOS",
188 | "repo": "nixpkgs",
189 | "rev": "a804fc878d7ba1558b960b4c64b0903da426ac41",
190 | "type": "github"
191 | },
192 | "original": {
193 | "owner": "NixOS",
194 | "ref": "nixos-23.11",
195 | "repo": "nixpkgs",
196 | "type": "github"
197 | }
198 | },
199 | "nixpkgs-unstable": {
200 | "locked": {
201 | "lastModified": 1701718080,
202 | "narHash": "sha256-6ovz0pG76dE0P170pmmZex1wWcQoeiomUZGggfH9XPs=",
203 | "owner": "NixOS",
204 | "repo": "nixpkgs",
205 | "rev": "2c7f3c0fb7c08a0814627611d9d7d45ab6d75335",
206 | "type": "github"
207 | },
208 | "original": {
209 | "owner": "NixOS",
210 | "ref": "nixos-unstable",
211 | "repo": "nixpkgs",
212 | "type": "github"
213 | }
214 | },
215 | "old-nixpkgs": {
216 | "locked": {
217 | "lastModified": 1651726670,
218 | "narHash": "sha256-dSGdzB49SEvdOJvrQWfQYkAefewXraHIV08Vz6iDXWQ=",
219 | "owner": "NixOS",
220 | "repo": "nixpkgs",
221 | "rev": "c777cdf5c564015d5f63b09cc93bef4178b19b01",
222 | "type": "github"
223 | },
224 | "original": {
225 | "owner": "NixOS",
226 | "repo": "nixpkgs",
227 | "rev": "c777cdf5c564015d5f63b09cc93bef4178b19b01",
228 | "type": "github"
229 | }
230 | },
231 | "root": {
232 | "inputs": {
233 | "functions": "functions",
234 | "installer": "installer",
235 | "new-nixpkgs": "new-nixpkgs",
236 | "nix": "nix",
237 | "nixos-imx": "nixos-imx",
238 | "nixpkgs": "nixpkgs",
239 | "nixpkgs-unstable": "nixpkgs-unstable",
240 | "old-nixpkgs": "old-nixpkgs",
241 | "systems": "systems",
242 | "wiplib": "wiplib"
243 | }
244 | },
245 | "systems": {
246 | "locked": {
247 | "lastModified": 1689347949,
248 | "narHash": "sha256-12tWmuL2zgBgZkdoB6qXZsgJEH9LR3oUgpaQq2RbI80=",
249 | "owner": "nix-systems",
250 | "repo": "default-linux",
251 | "rev": "31732fcf5e8fea42e59c2488ad31a0e651500f68",
252 | "type": "github"
253 | },
254 | "original": {
255 | "owner": "nix-systems",
256 | "repo": "default-linux",
257 | "type": "github"
258 | }
259 | },
260 | "wiplib": {
261 | "inputs": {
262 | "config": "config_2",
263 | "functions": [
264 | "functions"
265 | ],
266 | "installer": [
267 | "installer"
268 | ],
269 | "nixos-hardware": "nixos-hardware",
270 | "nixpkgs": [
271 | "nixpkgs"
272 | ],
273 | "systems": [
274 | "systems"
275 | ]
276 | },
277 | "locked": {
278 | "lastModified": 1703152501,
279 | "narHash": "sha256-m6Vlqu0wX4Jg1fcejqf257N41QizI49SrXyU8Dhx9Yw=",
280 | "owner": "NiklasGollenstede",
281 | "repo": "nix-wiplib",
282 | "rev": "7614b86dc87531a11e7c25e43a78bb6530b63af1",
283 | "type": "github"
284 | },
285 | "original": {
286 | "owner": "NiklasGollenstede",
287 | "repo": "nix-wiplib",
288 | "type": "github"
289 | }
290 | }
291 | },
292 | "root": "root",
293 | "version": 7
294 | }
295 |
--------------------------------------------------------------------------------
/flake.nix:
--------------------------------------------------------------------------------
1 | { description = (
2 | "NixOS Configuration for lightweight container systems"
3 | /**
4 | * This flake file defines the main inputs (all except for some files/archives fetched by hardcoded hash) and exports almost all usable results.
5 | * It should always pass »nix flake check« and »nix flake show --allow-import-from-derivation«, which means inputs and outputs comply with the flake convention.
6 | */
7 | ); inputs = {
8 |
9 | # To update »./flake.lock«: $ nix flake update
10 | nixpkgs = { url = "github:NixOS/nixpkgs/nixos-23.11"; };
11 | nixpkgs-unstable = { url = "github:NixOS/nixpkgs/nixos-unstable"; };
12 | old-nixpkgs = { url = "github:NixOS/nixpkgs/c777cdf5c564015d5f63b09cc93bef4178b19b01"; }; # 22.05 @ 2022-05-05
13 | new-nixpkgs = { url = "github:NixOS/nixpkgs/9370544d849be8a07193e7611d02e6f6f1b10768"; }; # 22.05 @ 2022-07-29
14 | functions = { url = "github:NiklasGollenstede/nix-functions"; inputs.nixpkgs.follows = "nixpkgs"; };
15 | installer = { url = "github:NiklasGollenstede/nixos-installer"; inputs.nixpkgs.follows = "nixpkgs"; inputs.functions.follows = "functions"; };
16 | wiplib = { url = "github:NiklasGollenstede/nix-wiplib"; inputs.nixpkgs.follows = "nixpkgs"; inputs.installer.follows = "installer"; inputs.functions.follows = "functions"; inputs.systems.follows = "systems"; };
17 | nixos-imx = { url = "github:NiklasGollenstede/nixos-imx"; inputs.nixpkgs.follows = "nixpkgs"; inputs.installer.follows = "installer"; inputs.functions.follows = "functions"; inputs.wiplib.follows = "wiplib"; };
18 | nix = { url = "github:NixOS/nix/38b90c618f5ce4334b89c0124c5a54f339a23db6"; inputs.nixpkgs.follows = "nixpkgs"; inputs.nixpkgs-regression.follows = "nixpkgs"; };
19 | #aziot-nixos = { url = "github:ef4203/ba-aziot-nixos"; inputs.nixpkgs.follows = "nixpkgs"; inputs.installer.follows = "installer"; inputs.functions.follows = "functions"; inputs.systems.follows = "systems"; };
20 | systems.url = "github:nix-systems/default-linux";
21 |
22 | }; outputs = inputs@{ wiplib, ... }: let patches = let
23 | base = [
24 | inputs.wiplib.patches.nixpkgs.test
25 | inputs.wiplib.patches.nixpkgs.fix-systemd-boot-install
26 | ./patches/nixpkgs/make-required-packages-optional.patch
27 | ];
28 | in rec {
29 |
30 | nixpkgs = base ++ [
31 | # make-switchable is implemented in 23.11
32 | ];
33 | new-nixpkgs = base ++ [
34 | ./patches/nixpkgs/make-bootable-optional.patch
35 | ];
36 | old-nixpkgs = new-nixpkgs;
37 |
38 | nix = [ ./patches/nix-store-send.patch ]; # Applying this patch to the »nix« input (above) implements »nix store send«.
39 |
40 | }; in inputs.functions.lib.patchFlakeInputsAndImportRepo inputs patches ./. (all-inputs@{ self, nixpkgs, ... }: repo@{ overlays, ... }: let
41 | lib = repo.lib.__internal__;
42 |
43 | inputs = builtins.removeAttrs all-inputs [ "new-nixpkgs" "old-nixpkgs" ];
44 |
45 | # The normal build of all hosts:
46 | systemsFlake = lib.installer.mkSystemsFlake {
47 | inputs = inputs; overlayInputs = builtins.removeAttrs inputs [ "nix" ];
48 | };
49 |
50 | # All hosts cross compiled from x64 (which is irrelevant for those already x64):
51 | x64-systemsFlake = lib.installer.mkSystemsFlake {
52 | inputs = inputs; overlayInputs = builtins.removeAttrs inputs [ "nix" ];
53 | buildPlatform = "x86_64-linux";
54 | renameOutputs = key: "x64:${key}";
55 | };
56 |
57 | # The "normal" hosts, but built with a "new"(er) and an "old"(er) version of `nixpkgs`, for update tests:
58 | inherit (lib.fun.mapMerge (age: let
59 | legacyFix = { nixosArgs.modules = [ (args: (let inherit (args) config options; in { # (older versions of nixpkgs require this to be passed)
60 | disabledModules = [ "nixos/lib/eval-config.nix" ]; # this uses now-undefined arguments
61 | options.nixpkgs.hostPlatform = lib.mkOption { }; # cross-building (buildPlatform) not supported here
62 | config.nixpkgs.system = config.nixpkgs.hostPlatform; config.nixpkgs.initialSystem = config.nixpkgs.hostPlatform;
63 | })) ]; };
64 | age-inputs = inputs // { nixpkgs = all-inputs."${age}-nixpkgs"; };
65 | # Note: Any »inputs.nixpkgs.follows = "nixpkgs"« above will always point at the "current" version of »nixpkgs«. »wiplib« and »nixos-imx« use »inputs.nixpkgs.lib« (explicitly, but nothing else).
66 | # »repo«, which gets merged into the outputs, which are also »inputs.self«, used the new »nixpkgs« to import its stuff, so that import has to be repeated:
67 | in lib.fun.importRepo age-inputs ./. (repo: { "${age}-systemsFlake" = lib.installer.mkSystemsFlake (rec {
68 | inputs = age-inputs // { self = self // repo; }; overlayInputs = builtins.removeAttrs inputs [ "nix" ];
69 | renameOutputs = key: "${age}:${key}";
70 | } // legacyFix); })) [ "new" "old" ]) new-systemsFlake old-systemsFlake;
71 |
72 | in [ # Run »nix flake show --allow-import-from-derivation« to see what this merges to:
73 | repo systemsFlake new-systemsFlake old-systemsFlake x64-systemsFlake
74 | (lib.fun.forEachSystem (import inputs.systems) (localSystem: let
75 | pkgs = lib.fun.importPkgs (builtins.removeAttrs inputs [ "nix" ]) { system = localSystem; };
76 | checks = (lib.fun.importWrapped all-inputs "${self}/checks").required pkgs;
77 | packages = lib.fun.getModifiedPackages pkgs overlays;
78 | everything = checks.packages // (lib.genAttrs [ "all-systems" "new:all-systems" "old:all-systems" ] (name: self.packages.${localSystem}.${name})); # ("x64:all-systems" doesn't quite build completely yet)
79 | defaultPackage = pkgs.symlinkJoin { name = "everything"; paths = builtins.attrValues everything; };
80 | #defaultPackage = pkgs.linkFarm "everything" everything;
81 | in {
82 | packages = { default = defaultPackage; } // { nix = inputs.nix.packages.${pkgs.system}.nix; };
83 | checks = packages // checks.checks; inherit (checks) apps;
84 | }))
85 | ]); }
86 |
--------------------------------------------------------------------------------
/hosts/efi/README.md:
--------------------------------------------------------------------------------
1 |
2 | # x86_64/aarch64 EFI Target
3 |
4 | Test configuration of (emulated) x64/arm target devices.
5 |
6 |
7 | ## Installation
8 |
9 | To prepare the virtual machine disk, with `nix` installed (with sandbox support), run:
10 | ```bash
11 | nix run '.#x64' -- install-system /tmp/x64.img
12 | ```
13 | When not running as toot (or when additionally supplying the `--vm` flag), the installation is performed in a qemu VM.
14 |
15 | Then as user that can use KVM (or very slowly without KVM) to run the VM(s):
16 | ```bash
17 | nix run '.#x64' -- run-qemu --efi /tmp/x64.img
18 | ```
19 |
20 | Replace the `x64` with `arm` or any of the names of variants listed in [`wip.preface.instances`](./default.nix) to install/start those hosts instead.
21 |
--------------------------------------------------------------------------------
/hosts/efi/default.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs: { config, pkgs, lib, name, ... }: let lib = inputs.self.lib.__internal__; in { preface = {
2 | instances = [
3 | "x64" "x64-baseline" "x64-minimal"
4 | "arm" "arm-baseline" "arm-minimal"
5 | ]; # "x64-debug-withForeign" "x64-minimal-withForeign"
6 | }; imports = [ (
7 | lib.th.importMachineConfig inputs dirname
8 | ) ]; }
9 |
--------------------------------------------------------------------------------
/hosts/efi/machine.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs: { config, pkgs, lib, name, ... }: let lib = inputs.self.lib.__internal__; in let
2 | flags = lib.tail (lib.splitString "-" name); hasFlag = flag: builtins.elem flag flags;
3 | isArm = (lib.head (lib.splitString "-" name)) == "arm";
4 | in { imports = [ ({ ## Hardware
5 |
6 | nixpkgs.hostPlatform = if isArm then "aarch64-linux" else "x86_64-linux";
7 | system.stateVersion = "22.05";
8 |
9 | #setup.disks.devices.primary.size = 128035676160;
10 |
11 | boot.kernelParams = [ "console=ttyS0" ];
12 | networking.interfaces.${if isArm then "enp0s1" else "ens3"}.ipv4.addresses = [ { # vBox: enp0s3 ; qemu-x64: ens3 ; qemu-aarch64: enp0s3
13 | address = "10.0.2.15"; prefixLength = 24;
14 | } ];
15 | networking.defaultGateway = "10.0.2.2";
16 | networking.nameservers = [ "1.1.1.1" ]; # [ "10.0.2.3" ];
17 |
18 | th.hermetic-bootloader.loader = "systemd-boot";
19 |
20 | th.minify.shrinkKernel.usedModules = ./minify.lsmod;
21 |
22 | boot.initrd.availableKernelModules = [ "virtio_net" "virtio_pci" "virtio_blk" "virtio_scsi" ];
23 |
24 |
25 | }) (lib.mkIf (false && hasFlag "minimal") { ## Super-minification
26 |
27 | # Just to prove that this can be installed very small (with a 256MiB disk, the »/system« partition gets ~170MiB):
28 | setup.disks.devices.primary.size = "256M"; setup.disks.devices.primary.alignment = 8;
29 | th.hermetic-bootloader.slots.size = lib.mkForce "${toString (32 + 1)}M"; # VBox EFI only supports FAT32
30 | th.target.fs.dataSize = "1K"; fileSystems."/data" = lib.mkForce { fsType = "tmpfs"; device = "tmpfs"; }; # don't really need /data
31 | #fileSystems."/system".formatArgs = lib.mkForce [ "-E" "nodiscard" ]; # (remove »-O inline_data«, which does not work for too small inodes used as a consequence of the tiny FS size, but irrelevant now that we use a fixed inode size)
32 |
33 |
34 | }) (lib.mkIf true { ## Temporary Test Stuff
35 |
36 | services.getty.autologinUser = "root"; users.users.root.hashedPassword = "${lib.fun.removeTailingNewline (lib.readFile "${inputs.self}/utils/res/root.${if (builtins.substring 0 5 inputs.nixpkgs.lib.version) == "22.05" then "sha256" else "yescrypt"}-pass")}"; # .password = "root";
37 |
38 | boot.kernelParams = [ "boot.shell_on_fail" ]; wip.base.panic_on_fail = false;
39 |
40 | wip.services.dropbear.rootKeys = lib.readFile "${inputs.self}/utils/res/niklas-gollenstede.pub";
41 | wip.services.dropbear.hostKeys = [ ../../utils/res/dropbear_ecdsa_host_key ];
42 |
43 | boot.initrd.preLVMCommands = lib.mkIf false (let
44 | inherit (config.system.build) extraUtils;
45 | order = 501; # after console initialization, just before opening luks devices
46 | in lib.mkOrder order ''
47 | setsid ${extraUtils}/bin/ash -c "exec ${extraUtils}/bin/ash < /dev/$console >/dev/$console 2>/dev/$console"
48 | '');
49 |
50 |
51 | }) (lib.mkIf (!hasFlag "minimal") { ## Bloat Test Stuff
52 |
53 | th.minify.enable = lib.mkForce false; th.minify.etcAsOverlay = lib.mkForce false;
54 | environment.systemPackages = lib.mkIf ((flags == [ ]) || (hasFlag "debug")) [ pkgs.curl pkgs.nano pkgs.gptfdisk pkgs.tmux pkgs.htop pkgs.libubootenv ];
55 |
56 | th.hermetic-bootloader.slots.size = lib.mkIf isArm "256M"; # The default arm kernel is much bigger.
57 |
58 |
59 | }) ]; }
60 |
--------------------------------------------------------------------------------
/hosts/efi/minify.lsmod:
--------------------------------------------------------------------------------
1 | aesni_intel 380928 0
2 | agpgart 49152 4 intel_agp,intel_gtt,ttm,drm
3 | ata_generic 16384 0
4 | ata_piix 40960 2
5 | atkbd 36864 0
6 | autofs4 53248 0
7 | backlight 24576 2 drm_kms_helper,drm
8 | bochs 16384 0
9 | bridge 294912 0
10 | button 24576 0
11 | cdrom 77824 1 sr_mod
12 | configfs 57344 1
13 | crc_t10dif 20480 1 t10_pi
14 | crc16 16384 1 ext4
15 | crc32_pclmul 16384 0
16 | crc32c_generic 16384 0
17 | crc32c_intel 24576 3
18 | crct10dif_common 16384 3 crct10dif_generic,crc_t10dif,crct10dif_pclmul
19 | crct10dif_generic 16384 0
20 | crct10dif_pclmul 16384 1
21 | cryptd 24576 2 crypto_simd,ghash_clmulni_intel
22 | crypto_simd 16384 1 aesni_intel
23 | deflate 16384 1
24 | dm_mod 151552 0
25 | drm 643072 6 drm_kms_helper,bochs,drm_vram_helper,drm_ttm_helper,ttm
26 | drm_kms_helper 307200 4 bochs,drm_vram_helper
27 | drm_ttm_helper 16384 2 bochs,drm_vram_helper
28 | drm_vram_helper 24576 1 bochs
29 | efi_pstore 16384 0
30 | efivarfs 16384 1
31 | evdev 24576 2
32 | ext4 913408 1
33 | failover 16384 1 net_failover
34 | fat 86016 1 vfat
35 | fb_sys_fops 16384 1 drm_kms_helper
36 | floppy 90112 0
37 | fuse 151552 1
38 | ghash_clmulni_intel 16384 0
39 | i2c_core 102400 4 drm_kms_helper,psmouse,i2c_piix4,drm
40 | i2c_piix4 28672 0
41 | i8042 36864 0
42 | input_leds 16384 0
43 | intel_agp 20480 0
44 | intel_gtt 24576 1 intel_agp
45 | intel_pmc_core 53248 0
46 | intel_rapl_common 28672 1 intel_rapl_msr
47 | intel_rapl_msr 20480 0
48 | ip_tables 32768 1
49 | ip6_tables 36864 2
50 | ip6t_rpfilter 16384 1
51 | ipt_rpfilter 16384 1
52 | irqbypass 16384 1 kvm
53 | jbd2 167936 1 ext4
54 | joydev 28672 0
55 | kvm 1032192 1 kvm_intel
56 | kvm_intel 344064 0
57 | led_class 20480 1 input_leds
58 | libaes 16384 1 aesni_intel
59 | libata 294912 3 ata_piix,pata_acpi,ata_generic
60 | libcrc32c 16384 2 nf_conntrack,nf_tables
61 | libps2 20480 2 atkbd,psmouse
62 | llc 16384 2 bridge,stp
63 | loop 40960 0
64 | mac_hid 16384 0
65 | macvlan 28672 0
66 | mbcache 16384 1 ext4
67 | mousedev 24576 0
68 | net_failover 24576 1 virtio_net
69 | nf_conntrack 176128 1 xt_conntrack
70 | nf_defrag_ipv4 16384 1 nf_conntrack
71 | nf_defrag_ipv6 24576 1 nf_conntrack
72 | nf_log_syslog 20480 2
73 | nf_tables 262144 65 nft_compat,nft_counter
74 | nfnetlink 20480 2 nft_compat,nf_tables
75 | nft_compat 20480 18
76 | nft_counter 16384 33
77 | nls_cp437 20480 1
78 | nls_iso8859_1 16384 1
79 | overlay 151552 1
80 | parport 45056 2 parport_pc,ppdev
81 | parport_pc 32768 0
82 | pata_acpi 16384 0
83 | ppdev 24576 0
84 | psmouse 167936 0
85 | pstore 28672 2 efi_pstore
86 | qemu_fw_cfg 20480 0
87 | rtc_cmos 28672 1
88 | sch_fq_codel 20480 2
89 | scsi_common 16384 3 scsi_mod,libata,sr_mod
90 | scsi_mod 266240 3 sd_mod,libata,sr_mod
91 | sd_mod 53248 2
92 | serio 28672 6 serio_raw,atkbd,psmouse,i8042
93 | serio_raw 20480 0
94 | sr_mod 28672 0
95 | stp 16384 1 bridge
96 | syscopyarea 16384 1 drm_kms_helper
97 | sysfillrect 16384 1 drm_kms_helper
98 | sysimgblt 16384 1 drm_kms_helper
99 | t10_pi 16384 1 sd_mod
100 | tap 28672 0
101 | tiny_power_button 16384 0
102 | ttm 81920 2 drm_vram_helper,drm_ttm_helper
103 | tun 61440 0
104 | vfat 20480 1
105 | virtio 16384 2 virtio_pci,virtio_net
106 | virtio_net 61440 0
107 | virtio_pci 24576 0
108 | virtio_pci_modern_dev 20480 1 virtio_pci
109 | virtio_ring 36864 2 virtio_pci,virtio_net
110 | x_tables 53248 9 xt_conntrack,ip6t_rpfilter,nft_compat,xt_LOG,xt_tcpudp,ip6_tables,ipt_rpfilter,xt_pkttype,ip_tables
111 | xt_conntrack 16384 2
112 | xt_LOG 20480 2
113 | xt_pkttype 16384 2
114 | xt_tcpudp 20480 7
115 |
--------------------------------------------------------------------------------
/hosts/efi/systems:
--------------------------------------------------------------------------------
1 | ../rpi/systems
--------------------------------------------------------------------------------
/hosts/imx/README.md:
--------------------------------------------------------------------------------
1 |
2 | # i.MX 8M PlusTarget
3 |
4 | Test configuration of an i.MX 8M Plus (EVK) as target device.
5 |
6 |
7 | ## Installation
8 |
9 | To prepare the microSD card, adjust the `fs.disks.devices.primary.size` in `./machine.nix` to match the card, and, as `root` and with `nix` installed, run:
10 | ```bash
11 | nix run '.#imx' -- install-system /dev/sdX
12 | ```
13 | Then put the card in a PI and boot it.
14 |
15 | Alternative to running directly as `root` (esp. if `nix` is not installed for root), the above commands can also be run with `sudo` as additional argument before the `--`.
16 |
17 | To see the serial console during boot, connect the "debug" microUSB port to a different host, and before booting run:
18 | ```bash
19 | nix-shell -p tio --run 'tio /dev/ttyUSB2' # (tio uses the correct settings by default)
20 | ```
21 |
--------------------------------------------------------------------------------
/hosts/imx/default.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs: { config, pkgs, lib, name, ... }: let lib = inputs.self.lib.__internal__; in { preface = {
2 | instances = [
3 | "imx" "imx-baseline" "imx-minimal"
4 | ];
5 | }; imports = [ (
6 | lib.th.importMachineConfig inputs dirname
7 | ) ]; }
8 |
--------------------------------------------------------------------------------
/hosts/imx/machine.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs: { config, pkgs, lib, name, ... }: let lib = inputs.self.lib.__internal__; in let
2 | flags = lib.tail (lib.splitString "-" name); hasFlag = flag: builtins.elem flag flags;
3 | hash = builtins.substring 0 8 (builtins.hashString "sha256" name);
4 | in { imports = [ ({ ## Hardware
5 |
6 | nixpkgs.hostPlatform = "aarch64-linux";
7 | system.stateVersion = "22.05";
8 |
9 | setup.disks.devices.primary.size = 31914983424; #31657558016; #31914983424; #64021856256; #63864569856;
10 |
11 | ## Firmware/bootloader:
12 | th.hermetic-bootloader.loader = "uboot-extlinux";
13 | th.hermetic-bootloader.uboot.base = pkgs.uboot-imx.override { platform = lib.toLower config.nxp.imx8-boot.soc; };
14 | th.hermetic-bootloader.uboot.mmcDev = 1;
15 | th.hermetic-bootloader.uboot.env = config.nxp.imx8-boot.uboot.envVars;
16 | th.hermetic-bootloader.uboot.extraConfig = [ "CONFIG_IMX_WATCHDOG=y" ]; # required on i.MX devices (up to apparently including i.MX8) to enable the watchdog hardware
17 | nxp.imx8-boot.uboot.package = config.th.hermetic-bootloader.uboot.result;
18 | nxp.imx8-boot.enable = true; nxp.imx8-boot.soc = "iMX8MP";
19 | nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "firmware-imx" ];
20 | boot.loader.generic-extlinux-compatible.enable = lib.mkForce false;
21 | setup.bootpart.enable = lib.mkForce false; # (Why) Do we need to force this?
22 | # The i.MX expects the boot image starting at sector 64. The multiple copies of the GPT would usually conflict with that, so move them:
23 | th.hermetic-bootloader.extraGptOffset = 3 * 1024 * 2; # (3M in sectors)
24 | setup.disks.partitions."bootloader-${hash}" = lib.mkForce null;
25 | boot.kernelParams = [ "console=ttymxc1" ];
26 |
27 | th.minify.shrinkKernel.baseKernel = if ((lib.fileContents "${pkgs.path}/.version") <= "23.05") then pkgs.linux-imx_v8 else pkgs.linux-imx_v8.override (old: { src = pkgs.linux-imx_v8.src.override (old: { postPatch = (old.postPatch or "") + ''
28 | printf '%s\n%s\n' 'CONFIG_AUTOFS_FS=m' "$( cat arch/arm64/configs/imx_v8_defconfig )" > arch/arm64/configs/imx_v8_defconfig
29 | ''; }); }); # The above does not actually propagate into the build b/c the merging order of the kernel args is wrong, so: (EDIT: Well, this option is also disabled. And applying config file changes like »minify.shrinkKernel« does it should work.)
30 | system.requiredKernelConfig = lib.mkForce [ ];
31 | #th.minify.shrinkKernel.usedModules = ./minify.lsmod; # (There are build errors in at least »drivers/usb/typec/mux/gpio-switch« (undefined symbols).)
32 |
33 | ## Networking:
34 | networking.interfaces.eth0.ipv4.addresses = [ {
35 | address = "192.168.8.86"; prefixLength = 24;
36 | } ];
37 | networking.defaultGateway = "192.168.8.1";
38 | networking.nameservers = [ "1.1.1.1" ];
39 |
40 | #boot.kernelPackages = lib.mkForce pkgs.linuxPackages; # building the i.MX kernel on x64 is quite time consuming
41 | disableModule."tasks/swraid.nix" = true; # The kernel is missing modules required by this.
42 |
43 | system.build.vmExec = lib.mkForce null; # (NixOS thinks that) the »pkgs.linux-imx_v8« kernel is not compatible with the installer VM.
44 |
45 |
46 | }) (lib.mkIf true { ## Test Stuff
47 |
48 | services.getty.autologinUser = "root"; users.users.root.hashedPassword = "${lib.fun.removeTailingNewline (lib.readFile "${inputs.self}/utils/res/root.${if (builtins.substring 0 5 inputs.nixpkgs.lib.version) == "22.05" then "sha256" else "yescrypt"}-pass")}"; # .password = "root";
49 |
50 | boot.kernelParams = [ "boot.shell_on_fail" ]; wip.base.panic_on_fail = false;
51 |
52 | wip.services.dropbear.rootKeys = lib.readFile "${inputs.self}/utils/res/niklas-gollenstede.pub";
53 | wip.services.dropbear.hostKeys = [ ../../utils/res/dropbear_ecdsa_host_key ];
54 |
55 | boot.initrd.preLVMCommands = lib.mkIf false (let inherit (config.system.build) extraUtils; in ''
56 | setsid ${extraUtils}/bin/ash -c "exec ${extraUtils}/bin/ash < /dev/$console >/dev/$console 2>/dev/$console"
57 | '');
58 |
59 |
60 | }) (lib.mkIf (!hasFlag "minimal") { ## Bloat Test Stuff
61 |
62 | th.minify.enable = lib.mkForce false; th.minify.etcAsOverlay = lib.mkForce false;
63 | environment.systemPackages = lib.mkIf ((flags == [ ]) || (hasFlag "debug")) [ pkgs.curl pkgs.nano pkgs.gptfdisk pkgs.tmux pkgs.htop pkgs.libubootenv ];
64 |
65 |
66 | }) ]; }
67 |
--------------------------------------------------------------------------------
/hosts/imx/minify.lsmod:
--------------------------------------------------------------------------------
1 | authenc 16384 1 caam_jr
2 | caam 28672 1 caam_jr
3 | caam_jr 180224 0
4 | caamalg_desc 40960 1 caam_jr
5 | caamhash_desc 16384 1 caam_jr
6 | caamkeyblob_desc 16384 1 caam_jr
7 | can_dev 36864 1 flexcan
8 | crct10dif_ce 20480 1
9 | crypto_engine 20480 1 caam_jr
10 | dw_hdmi_cec 16384 0
11 | error 24576 7 caamalg_desc,secvio,caamkeyblob_desc,caamhash_desc,caam,caam_jr,fsl_jr_uio
12 | flexcan 32768 0
13 | fsl_jr_uio 20480 0
14 | fuse 131072 1
15 | imx_dsp_rproc 20480 0
16 | imx8_media_dev 20480 0
17 | libdes 24576 1 caam_jr
18 | macvlan 32768 0
19 | overlay 122880 1
20 | rng_core 24576 1 caam_jr
21 | secvio 20480 0
22 | snd_soc_fsl_asoc_card 28672 0
23 | snd_soc_fsl_asrc 40960 1 snd_soc_fsl_easrc
24 | snd_soc_fsl_aud2htx 16384 2
25 | snd_soc_fsl_easrc 45056 2
26 | snd_soc_fsl_micfil 40960 2
27 | snd_soc_fsl_sai 40960 4
28 | snd_soc_fsl_xcvr 32768 2
29 | snd_soc_imx_audmux 16384 1 snd_soc_fsl_asoc_card
30 | snd_soc_imx_card 20480 0
31 | snd_soc_imx_hdmi 16384 0
32 | snd_soc_wm8960 49152 1
33 | tap 32768 0
34 |
--------------------------------------------------------------------------------
/hosts/imx/systems:
--------------------------------------------------------------------------------
1 | ../rpi/systems
--------------------------------------------------------------------------------
/hosts/rpi/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Raspberry Pi Target
3 |
4 | Test configuration of a Raspberry PI 4 as target device.
5 |
6 |
7 | ## Installation
8 |
9 | To prepare the **microSD card** (for other boot media, see below), adjust the `fs.disks.devices.primary.size` in `./machine.nix` to match the card, and, as `root` and with `nix` installed, run:
10 | ```bash
11 | nix run '.#rpi' -- install-system /dev/sdX
12 | ```
13 | Then put the card in a PI and boot it.
14 |
15 | Alternative to running directly as `root` (esp. if `nix` is not installed for root), the above commands can also be run with `sudo` as additional argument before the `--`.
16 |
17 | To see the serial console during boot, connect the RXD pin of a 3.3V UART adapter to pin 08 (GPIO14 -- TXD) of the PI, TXD to pin 10 (GPIO15 -- RXD), and ground to ground. Then, before booting the PI, run this on the host where the other (USB) end of the adapter is plugged in:
18 | ```bash
19 | nix-shell -p tio --run 'tio /dev/ttyUSBx' # (tio uses the correct settings by default)
20 | ```
21 |
22 |
23 | ### Other Boot Media
24 |
25 | To boot from something other than a microSD (or eMMC on a CM), some things would need to be adjusted:
26 | * The eeprom has to have the medium in its boot order. Newer rPI4 have this by default.
27 | * u-boot has to load its "env" from the boot medium, which is required to work to switch to other system configurations. For microSD/eMMC, this is configured via the build-time defines `CONFIG_ENV_IS_IN_MMC=y` and `CONFIG_SYS_MMC_ENV_DEV=X`.
28 | * The `bootcmd` in u-boot's env has to use the correct device. For a USB SSD, this would be: `sysboot usb 0:1 fat ${scriptaddr} /extlinux/extlinux.conf`.
29 | * The kernel may need additional features/modules (in the initrd) to open the device.
30 |
--------------------------------------------------------------------------------
/hosts/rpi/default.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs: { config, pkgs, lib, name, ... }: let lib = inputs.self.lib.__internal__; in { preface = {
2 | instances = [
3 | "rpi" "rpi-baseline" "rpi-minimal"
4 | ];
5 | }; imports = [ (
6 | lib.th.importMachineConfig inputs dirname
7 | ) ]; }
8 |
--------------------------------------------------------------------------------
/hosts/rpi/machine.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs: { config, pkgs, lib, name, ... }: let lib = inputs.self.lib.__internal__; in let
2 | flags = lib.tail (lib.splitString "-" name); hasFlag = flag: builtins.elem flag flags;
3 | in { imports = [ ({ ## Hardware
4 |
5 | nixpkgs.hostPlatform = "aarch64-linux";
6 | system.stateVersion = "22.05";
7 |
8 | setup.disks.devices.primary.size = 31914983424; #63864569856; #31657558016; #64021856256; #31914983424;
9 |
10 | networking.interfaces.eth0.ipv4.addresses = [ {
11 | address = "192.168.8.85"; prefixLength = 24;
12 | } ];
13 | networking.defaultGateway = "192.168.8.1";
14 | networking.nameservers = [ "1.1.1.1" ];
15 |
16 | th.hermetic-bootloader.loader = "uboot-extlinux";
17 | th.hermetic-bootloader.uboot.base = pkgs.ubootRaspberryPi4_64bit;
18 | th.hermetic-bootloader.uboot.mmcDev = 1;
19 | th.hermetic-bootloader.uboot.env = {
20 | # From u-boot/u-boot/include/configs/rpi.h:
21 | /* kernel_addr_r = "0x00080000";
22 | scriptaddr = "0x02400000";
23 | pxefile_addr_r = "0x02500000";
24 | fdt_addr_r = "0x02600000";
25 | ramdisk_addr_r = "0x02700000"; */
26 | # 1MB = 0x100000
27 | kernel_addr_r = "0x00200000"; # (u-boot/u-boot/include/configs/rpi.h) suggests 0x00080000, but then uboot moves it here
28 | scriptaddr = "0x03200000"; # +48MB
29 | pxefile_addr_r = "0x03300000";
30 | fdt_addr_r = "0x03400000";
31 | ramdisk_addr_r = "0x03800000";
32 | fdtfile = "-"; # "broadcom/bcm2711-rpi-4-b.dtb"; # If »fdtfile« is not set here, then it seems to default to »broadcom/bcm2711-rpi-4-b.dtb«. If it is not set at all, uboot tries to guess it (and guesses wrong, also the path structure in »config.hardware.deviceTree.package« is not what u-boot expects). If the file fails to load (e.g. because it is set to »-«), u-boot assumes that there is a device tree at »fdt_addr=2eff8e00« already, where indeed the GPU firmware has put the device tree it created from the ».dtb« file and »config.txt«.
33 | /*
34 | setenv kernel_addr_r 0x00200000
35 | setenv scriptaddr 0x03200000
36 | setenv pxefile_addr_r 0x03300000
37 | setenv fdt_addr_r 0x03400000
38 | setenv ramdisk_addr_r 0x03800000
39 | sysboot mmc 0:1 fat ${scriptaddr} /extlinux/extlinux.conf
40 | */
41 | };
42 | hardware.deviceTree.filter = "bcm2711-rpi-4-b.dtb"; # bcm2711-rpi-cm4.dtb
43 | #hardware.deviceTree.overlays = [ { name = "i2c-rtc"; dtboFile = "${pkgs.raspberrypifw}/share/raspberrypi/boot/overlays/i2c-rtc.dtbo"; } ]; # Doesn't do anything (with the »hardware.deviceTree.package« DTB).
44 | th.hermetic-bootloader.slots.size = "128M";
45 | th.hermetic-bootloader.extraFiles = (lib.th.copy-files pkgs "rpi-firmware-slice" ({
46 | "config.txt" = pkgs.writeText "config.txt" (''
47 | avoid_warnings=1
48 | arm_64bit=1
49 | kernel=u-boot.bin
50 | enable_uart=1
51 | disable_splash=1
52 | boot_delay=0
53 | '');
54 | # force_turbo=1 # could try this
55 | # gpu_mem=16 # these three options in combination reduce the GPUs capabilities (or something like that), but may also reduce boot time a bit
56 | # start_file=start4cd.elf
57 | # fixup_file=fixup4cd.dat
58 | #dtparam=i2c_vc=on # Doesn't do anything (with the »hardware.deviceTree.package« DTB).
59 | #dtoverlay=i2c-rtc,pcf85063a,i2c_csi_dsi,addr=0x51
60 | # (»gpu_mem=16« would enable the use of "cut down" (»_cd«-suffixed) GPU firmware (minimal GPU support))
61 | # »enable_uart=1« is also required for u-boot to use the UART (and work at all?).
62 | # TODO: Something is wrong with bluetooth, could disable it (»dtoverlay=disable-bt«, if that overlay exists), but that may affect serial console output as well.
63 | "u-boot.bin" = "${config.th.hermetic-bootloader.uboot.result}/u-boot.bin";
64 | "bcm2711-rpi-4-b.dtb" = "${config.hardware.deviceTree.package}/broadcom/bcm2711-rpi-4-b.dtb"; # This is from the kernel build (and also works for the CM4).
65 |
66 | # With these, the PI (CM4) does not boot:
67 | #"bcm2711-rpi-4-b.dtb" = "${pkgs.raspberrypifw}/share/raspberrypi/boot/bcm2711-rpi-4-b.dtb";
68 | #"bcm2711-rpi-cm4.dtb" = "${pkgs.raspberrypifw}/share/raspberrypi/boot/bcm2711-rpi-cm4.dtb";
69 | #"overlays/i2c-rtc.dtbo" = "${pkgs.raspberrypifw}/share/raspberrypi/boot/overlays/i2c-rtc.dtbo";
70 |
71 | # The rPI4 does not need a »bootcode.bin« since it has the code in its eeprom.
72 | # https://www.raspberrypi.com/documentation/computers/configuration.html#start-elf-start_x-elf-start_db-elf-start_cd-elf-start4-elf-start4x-elf-start4cd-elf-start4db-elf
73 | } // (lib.fun.mapMerge (name: {
74 | ${name} = "${pkgs.raspberrypifw}/share/raspberrypi/boot/${name}";
75 | }) [ # Only one pair (with 4) of these is necessary:
76 | /* "start.elf" "start_cd.elf" "start_x.elf" "start_db.elf" */ "start4.elf" /* "start4cd.elf" "start4x.elf" "start4db.elf" */
77 | /* "fixup.dat" "fixup_cd.dat" "fixup_x.dat" "fixup_db.dat" */ "fixup4.dat" /* "fixup4cd.dat" "fixup4x.dat" "fixup4db.dat" */
78 | ])));
79 | boot.kernelParams = [ "console=tty1" /* "console=ttyS0,115200" */ "console=ttyS1,115200" /* "console=ttyAMA0,115200" */ ]; # (With bluetooth present) »ttyS0« connects to the (mini) uart1 at pins 08+10, which needs the »enable_uart=1« (which may limit system performance) in »config.txt« to work. Without any »console=...«, initrd and the kernel log to uart1.
80 |
81 | th.minify.shrinkKernel.usedModules = ./minify.lsmod; # (this works fine when compiling natively / through qemu, but when cross-compiling, the ./dtbs/ dir is missing)
82 |
83 | #th.target.watchdog.enable = lib.mkForce false;
84 |
85 |
86 | }) /* (lib.mkIf (hasFlag "minimal") { ## Super-minification
87 |
88 | # Just to prove that this can be installed very small disk (with a 640MiB disk, the »/system« partition gets ~360MiB):
89 | setup.disks.devices.primary.size = lib.mkForce "640M"; setup.disks.devices.primary.alignment = 8;
90 | th.target.fs.dataSize = "1K"; fileSystems."/data" = lib.mkForce { fsType = "tmpfs"; device = "tmpfs"; }; # don't really need /data
91 | #fileSystems."/system".formatArgs = lib.mkForce [ "-E" "nodiscard" ]; # (remove »-O inline_data«, which does not work for too small inodes used as a consequence of the tiny FS size, but irrelevant now that we use a fixed inode size)
92 |
93 |
94 | }) */ (lib.mkIf true { ## Temporary Test Stuff
95 |
96 | services.getty.autologinUser = "root"; users.users.root.hashedPassword = "${lib.fun.removeTailingNewline (lib.readFile "${inputs.self}/utils/res/root.${if (builtins.substring 0 5 inputs.nixpkgs.lib.version) == "22.05" then "sha256" else "yescrypt"}-pass")}"; # .password = "root";
97 |
98 | boot.kernelParams = [ "boot.shell_on_fail" ]; wip.base.panic_on_fail = false;
99 |
100 | wip.services.dropbear.rootKeys = lib.readFile "${inputs.self}/utils/res/niklas-gollenstede.pub";
101 | wip.services.dropbear.hostKeys = [ ../../utils/res/dropbear_ecdsa_host_key ];
102 |
103 |
104 | }) (lib.mkIf (!hasFlag "minimal") { ## Bloat Test Stuff
105 |
106 | th.minify.enable = lib.mkForce false; th.minify.etcAsOverlay = lib.mkForce false;
107 | environment.systemPackages = lib.mkIf ((flags == [ ]) || (hasFlag "debug")) [ pkgs.curl pkgs.nano pkgs.gptfdisk pkgs.tmux pkgs.htop pkgs.libubootenv ];
108 |
109 |
110 | }) ]; }
111 |
--------------------------------------------------------------------------------
/hosts/rpi/minify.lsmod:
--------------------------------------------------------------------------------
1 | bcm_phy_lib 28672 1 broadcom
2 | bcm2711_thermal 16384 0
3 | bcm2835_mmal_vchiq 36864 1 bcm2835_v4l2
4 | bcm2835_v4l2 45056 0
5 | bluetooth 647168 5 btqca,btsdio,hci_uart,btbcm
6 | brcmfmac 323584 0
7 | brcmutil 20480 1 brcmfmac
8 | bridge 278528 0
9 | broadcom 24576 1
10 | btbcm 28672 1 hci_uart
11 | btqca 24576 1 hci_uart
12 | btsdio 20480 0
13 | cec 57344 1 vc4
14 | cfg80211 892928 1 brcmfmac
15 | clk_raspberrypi 16384 7
16 | crct10dif_ce 20480 1
17 | dm_mod 143360 0
18 | drm 634880 3 drm_kms_helper,vc4
19 | drm_kms_helper 294912 2 vc4
20 | ecc 36864 1 ecdh_generic
21 | ecdh_generic 16384 1 bluetooth
22 | fuse 147456 1
23 | genet 69632 0
24 | hci_uart 118784 0
25 | i2c_bcm2835 20480 0
26 | ip_tables 32768 1
27 | ip6_tables 32768 2
28 | ip6t_rpfilter 16384 1
29 | iproc_rng200 16384 0
30 | ipt_rpfilter 16384 1
31 | libcrc32c 16384 2 nf_conntrack,nf_tables
32 | llc 20480 2 bridge,stp
33 | macvlan 32768 0
34 | mc 65536 3 videodev,videobuf2_v4l2,videobuf2_common
35 | mdio_bcm_unimac 20480 0
36 | nf_conntrack 180224 1 xt_conntrack
37 | nf_defrag_ipv4 16384 1 nf_conntrack
38 | nf_defrag_ipv6 24576 1 nf_conntrack
39 | nf_log_syslog 28672 2
40 | nf_tables 217088 65 nft_compat,nft_counter
41 | nfnetlink 20480 2 nft_compat,nf_tables
42 | nft_compat 20480 18
43 | nft_counter 16384 33
44 | nls_cp437 20480 1
45 | nls_iso8859_1 16384 1
46 | nvmem_rmem 16384 0
47 | overlay 139264 1
48 | pcie_brcmstb 24576 0
49 | pwm_bcm2835 16384 0
50 | raspberrypi_cpufreq 16384 0
51 | raspberrypi_hwmon 16384 0
52 | reset_raspberrypi 16384 1
53 | rfkill 40960 3 bluetooth,cfg80211
54 | rng_core 24576 1 iproc_rng200
55 | sch_fq_codel 28672 6
56 | snd_bcm2835 32768 0
57 | snd_soc_hdmi_codec 24576 2
58 | stp 20480 1 bridge
59 | tap 32768 0
60 | uio 24576 1 uio_pdrv_genirq
61 | uio_pdrv_genirq 20480 0
62 | vc4 249856 4
63 | vchiq 344064 2 snd_bcm2835,bcm2835_mmal_vchiq
64 | videobuf2_common 65536 4 videobuf2_vmalloc,videobuf2_v4l2,bcm2835_v4l2,videobuf2_memops
65 | videobuf2_memops 20480 1 videobuf2_vmalloc
66 | videobuf2_v4l2 32768 1 bcm2835_v4l2
67 | videobuf2_vmalloc 20480 1 bcm2835_v4l2
68 | videodev 270336 3 videobuf2_v4l2,bcm2835_v4l2,videobuf2_common
69 | x_tables 49152 9 xt_conntrack,ip6t_rpfilter,nft_compat,xt_LOG,xt_tcpudp,ip6_tables,ipt_rpfilter,xt_pkttype,ip_tables
70 | xhci_pci 24576 0
71 | xhci_pci_renesas 20480 1 xhci_pci
72 | xt_conntrack 16384 2
73 | xt_LOG 20480 2
74 | xt_pkttype 16384 2
75 | xt_tcpudp 20480 7
76 |
--------------------------------------------------------------------------------
/hosts/rpi/systems/test1.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | #
4 |
5 | ## Implementation
6 |
7 | ```nix
8 | #*/# end of MarkDown, beginning of NixOS config:
9 | dirname: inputs: { config, pkgs, lib, name, ... }: let lib = inputs.self.lib.__internal__; in let
10 | suffix = builtins.elemAt (builtins.match ''[^-]+(-(.*))?'' name) 1;
11 | flags = if suffix == null then [ ] else lib.splitString "-" suffix; hasFlag = flag: builtins.elem flag flags;
12 | in { imports = [ ({ ## Hardware
13 |
14 | th.target.containers.enable = true;
15 | th.target.containers.containers.native = (
16 | (lib.fun.importWrapped inputs "${inputs.self}/containers/native.nix.md").required pkgs
17 | ) // {
18 | sshKeys.root = [ (lib.readFile "${inputs.self}/utils/res/ssh_dummy_1.pub") ];
19 | # ssh -o "IdentitiesOnly=yes" -i res/ssh_dummy_1 target -> root@native
20 |
21 | # ... system integration ...
22 | };
23 |
24 | th.target.containers.containers.foreign = lib.mkIf (hasFlag "withForeign") (( # (remove the dependency on this while working on other stuff)
25 | (lib.fun.importWrapped inputs "${inputs.self}/containers/foreign.nix.md").required pkgs
26 | ) // {
27 | # ... system integration ...
28 | });
29 |
30 | networking.firewall.allowedTCPPorts = [ 8000 8001 ];
31 |
32 | }) ]; }
33 |
--------------------------------------------------------------------------------
/lib/data/data.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | import pandas as pd
4 | from pathlib import Path
5 | from plydata import *
6 | from osg.pandas import *
7 | from parse_logs import read_log
8 |
9 | _dir = Path(__file__).parent if len(sys.argv) < 2 else Path(sys.argv[1]) # Well. This is ugly. Should have used an env var instead ...
10 |
11 | update_nix_cat = """
12 | none|FD|#0570b0
13 | 64|FD+64|#74a9cf
14 | 4k|FD+4K|#bdc9e1
15 | refs|FD+R|#d7301f
16 | refs+64|FD+R+64|#fc8d59
17 | refs+4k|FD+R+4K|#fdcc8a
18 | bsd-nar+none|BSD(Comp)|#238b45
19 | bsd+none|FD+BSD(File)|#66c2a4
20 | bsd+refs+4k|FD+R+4K+BSD(Chunk)|#b2e2e2
21 | """
22 | update_nix_cat = [line.strip().split("|") for line in update_nix_cat.strip().split("\n")]
23 |
24 |
25 | update_nix: pd.DataFrame = pd.read_csv(_dir / 'nix_store_send.csv')
26 | update_nix = (
27 | update_nix
28 | >> define(transferWeight='comp_p')
29 | >> define(
30 | originalChunkingType='chunkingType',
31 | chunkingType=mapvalues('chunkingType', [x[0] for x in update_nix_cat], [x[1] or x[0] for x in update_nix_cat])
32 | )
33 | >> query('~chunkingType.isna()')
34 | )
35 |
36 | systems: pd.DataFrame = read_directory(_dir / 'systems')
37 | systems = (systems >> define(
38 | is_elf='file_desc.str.contains("ELF") | file_desc.str.contains("Linux kernel")',
39 | is_file='~mime_type.isin(["os/directory", "os/symlink"])',
40 | is_dir='mime_type.isin(["os/directory"])',
41 | is_symlink='mime_type.isin(["os/symlink"])',
42 | ))
43 | systems['files'] = 1
44 | systems['component_slug'] = systems.component.transform(lambda x: x.split("-", 1)[1])
45 | systems['component_slug'] = systems.component_slug.str.replace("raspberry", "r")
46 | #for pat in "-bin -2.34-210 -1.20220331 -78.15.0 -2.37.4".split():
47 | for pat in "-bin -210".split():
48 | systems['component_slug'] = systems.component_slug.str.replace(pat, "", regex=False)
49 |
50 | #
51 | #y = [6390883977.900553, 4727900552.486188, 3606353591.1602216, 6332872928.176796, 4631215469.61326, 3944751381.2154694, 4805248618.784531, 4147790055.248619, 3364640883.9779005, 3683701657.458564, 2301104972.3756914, 1682320441.9889507, 1053867403.3149176, 986187845.3038673, 879834254.1436462, 3325966850.8287296, 3287292817.679558, 3055248618.784531]
52 | #distributions = ['default', 'alt', 'slim', 'bullseye', 'alpine', 'alpine+']
53 | #data = [[distributions[i // 3]] + y[i:i+3] for i in range(0, len(y), 3)]
54 | #methods = ['Uncompressed', "Shared Layers", "Shared Files"]
55 |
56 | #df = pd.DataFrame(data=data,columns=['Base Image'] + methods)
57 | # df.to_csv('/tmp/test.csv',index=False)
58 | oci_combined = pd.read_csv(_dir/'oci-combined.csv')
59 | oci_individual = pd.read_csv(_dir/'oci-individual.csv')
60 |
61 | reboot_logs : pd.DataFrame = read_directory(_dir / 'logs', read=read_log)
62 |
--------------------------------------------------------------------------------
/lib/data/dref.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from pathlib import Path
4 | from plydata import *
5 | from plydata.tidy import *
6 | from plydata.cat_tools import *
7 | import data
8 | import sys
9 | from scipy.stats.mstats import gmean# from scipy.stats import geom
10 | from versuchung.tex import DatarefDict
11 | import numpy as np
12 | import osg
13 |
14 | outDir = data._dir if len(sys.argv) < 2 else Path(sys.argv[1])
15 |
16 | dref = DatarefDict(outDir / "dref.tex")
17 | dref.clear()
18 |
19 | base_systems = (
20 | data.systems
21 | >> group_by('variant')
22 | >> summarize(
23 | total_size='sum(st_size)',
24 | elf_size='sum(st_size * is_elf)',
25 | components='len(set(component))',
26 | files='sum(is_file)',
27 | directories='sum(is_dir)',
28 | symlinks='sum(mime_type == "os/symlink")',
29 | )
30 | >> rename_all(lambda x: 'store/' + x)
31 | )
32 |
33 | dref.pandas(base_systems.set_index(['store/variant']))
34 |
35 |
36 | # TOP X Components
37 | data.systems['files'] = 1
38 | df = data.systems.groupby(['variant', 'component_slug']).agg(dict(st_size=sum, files=sum)).reset_index()
39 |
40 | df = (df
41 | >> group_by('variant')
42 | >> arrange('-st_size')
43 | >> head(10)
44 | >> define(rank='range(0,10)')
45 | )
46 |
47 | dref.pandas(df.set_index(['rank','variant'])[['component_slug', 'st_size']],
48 | prefix='top-n')
49 |
50 | top_files = (
51 | data.systems
52 | >> group_by('variant', 'component_slug')
53 | >> summarize(files="sum(files)")
54 | >> pivot_wider(names_from="variant", values_from="files")
55 | >> arrange('-Q("x64/baseline")')
56 | >> head(5)
57 | >> rename_all(lambda x: x + "/files")
58 | )
59 | dref.pandas(top_files.set_index('component_slug/files').T)
60 |
61 | dref.pandas(data.oci_combined.set_index('Base Image'), 'oci-combined')
62 |
63 | df = (data.oci_individual
64 | >> pivot_wider(names_from="service", values_from="size"))
65 |
66 | dref.pandas(df.set_index("variant"), 'oci-individual')
67 |
68 |
69 | dref.pandas(data.update_nix.set_index(['systemType', 'after', 'originalChunkingType'])[['comp_p']],
70 | 'update')
71 |
72 | for g, df in data.update_nix.groupby(['originalChunkingType']):
73 | x = osg.pandas.select_quantiles(df, q=[0,1], columns=['comp_p'],
74 | q_labels=True, value_cols=['systemType', 'after', 'comp_p'], value_col='value')
75 | dref.pandas(x, f'update/{g}')
76 |
77 | base_update=(data.update_nix
78 | >> group_by('systemType', 'after')
79 | >> summarize(file_s='file_s.values[0]/2**20'))
80 |
81 | dref.pandas(base_update.set_index(['systemType', 'after']),
82 | 'update/baseline')
83 |
84 | dref.pandas(data.reboot_logs.describe().T,
85 | 'reconf')
86 |
87 | dref.flush()
88 |
89 |
--------------------------------------------------------------------------------
/lib/data/fig-oci_combined.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from pathlib import Path
4 | import sys
5 | #import numpy as np
6 | import pandas as pd
7 | from plotnine import *
8 | from plydata import *
9 | from plydata.tidy import *
10 | from osg.pandas import reorder_by, mapvalues
11 |
12 | import data
13 |
14 | outDir = data._dir / '..' / 'fig' if len(sys.argv) < 2 else Path(sys.argv[1])
15 |
16 | distributions = ['default', 'alt', 'slim', 'bullseye', 'alpine']#s, 'alpine+']
17 | methods = ['Uncompressed', "Shared Layers", "Shared Files"]
18 |
19 | df = (data.oci_combined
20 | >> pivot_longer(cols=select(*methods), names_to='Method', values_to='Size')
21 | >> do(reorder_by('Method', methods))
22 | >> do(reorder_by('Base Image', distributions))
23 | >> group_by('Base Image')
24 | >> define(Size_Rel='Size/max(Size)'))
25 |
26 | oci_combined = (ggplot(df, aes(x='Base Image', y='Size_Rel* 100', fill='Method'))
27 | + geom_col(position='dodge2')
28 | + scale_fill_manual(values = ['tab:blue', 'tab:red', 'tab:green'])
29 | + geom_text(aes(y=5, label='Size/2**30'), angle=90, position=position_dodge(width=0.89), format_string='{:.1f} GiB', va='bottom')
30 | + labs(y='Relative Size to Uncomb. [%]',x="")# fill='')
31 | + annotate("path", x=['default', 'default'], y=[75, 85], color="tab:red", arrow=arrow(length=0.05, type="closed", ends="first", angle=15))
32 | + annotate("text", x='default', y=85, label='Docker',size=9, ha='left', va='bottom', color='tab:red', nudge_x=-0.1)
33 | + annotate("path", x=['alt', 'alt'], y=[65, 85], position=position_nudge([0.29,0.29]), color="tab:green", arrow=arrow(length=0.05, type="closed", ends="first", angle=15))
34 | + annotate("text", x='alt', y=85, label='reUpNix',size=9, ha='left', va='bottom', color='tab:green', nudge_x=-0.1)
35 | + theme(
36 | figure_size=(6, 3),
37 | legend_position=(0.5,0.98),
38 | legend_title=element_blank(),
39 | legend_background=element_rect(fill='#fff0', color='#000', size=1),
40 | legend_box_margin=2,
41 | axis_text_x=element_text(rotation=45),
42 | panel_grid_major_x=element_blank(),
43 | )
44 | )
45 |
46 | save_as_pdf_pages([oci_combined], outDir / 'oci_combined.pdf')
47 |
--------------------------------------------------------------------------------
/lib/data/fig-reboot.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from pathlib import Path
4 | import sys
5 | #import numpy as np
6 | import pandas as pd
7 | from plotnine import *
8 | from plydata import *
9 | from plydata.tidy import *
10 | from osg.pandas import reorder_by, mapvalues
11 | import numpy as np
12 |
13 | import data
14 |
15 | outDir = data._dir / '..' / 'fig' if len(sys.argv) < 2 else Path(sys.argv[1])
16 |
17 | phases = ['poweroff', 'firmware', 'uboot', 'nixos']
18 | phase_names = ['PowerOff', "RPi Firmw.", 'U-Boot', 'reUpNix']
19 |
20 | means = (data.reboot_logs
21 | >> select(*phases)
22 | >> summarize_all('mean'))
23 | print(means.iloc[0].to_list())
24 |
25 | pdf = (means
26 | >> pivot_longer(cols=select(*phases),
27 | names_to='component', values_to='duration')
28 | >> do(reorder_by('component', reversed(phases)))
29 | >> define(component_label=mapvalues('component', phases, phase_names))
30 | >> (ggplot(aes(y='duration', x=1))
31 | + geom_col(aes(y='duration', fill='component'), show_legend=False, width=0.5)
32 | + geom_text(aes(label='duration', x=1), format_string='{:.1f}',
33 | position=position_stack(vjust=0.5))
34 | + geom_text(aes(label='component_label', x=1.5),
35 | position=position_stack(vjust=0.5))
36 | + coord_flip() + lims(x=[0.7,1.6])
37 | + labs(y='Boot Time [s]',x=None)
38 | + scale_fill_manual(['#e41a1c','#377eb8','#4daf4a','#984ea3'])
39 | + scale_y_continuous(breaks=[0]+means.iloc[0].cumsum().to_list())
40 | + theme_minimal()
41 | + theme(
42 | figure_size=(6,1),
43 | #legend_position=(0.,1),
44 | panel_grid_major_y=element_blank(),
45 | panel_grid_minor_y=element_blank(),
46 | axis_text_y=element_blank(),
47 | axis_text_x=element_text(margin=dict(t=-8)),
48 | axis_title_y=element_blank(),
49 | axis_ticks_major_y=element_blank(),
50 | # axis_ticks_minor_y=element_blank(),
51 | legend_title=element_blank(),
52 | legend_background=element_rect(fill='#fff0', color='#0000', size=1),
53 | #legend_box_margin=2,
54 | )
55 | )
56 | )
57 |
58 | save_as_pdf_pages([pdf], outDir / 'reboot.pdf')
59 |
--------------------------------------------------------------------------------
/lib/data/fig-update-size.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from pathlib import Path
4 | import sys
5 | #import numpy as np
6 | import pandas as pd
7 | from plotnine import *
8 | from plydata import *
9 | from osg.pandas import reorder_by, mapvalues
10 |
11 | import data
12 |
13 | outDir = data._dir / '..' / 'fig' if len(sys.argv) < 2 else Path(sys.argv[1])
14 |
15 | L = labeller(cols=lambda x: data.facet_labels[x])
16 |
17 | print(set(data.update_nix.chunkingType.astype('category')))
18 |
19 | cat = data.update_nix_cat
20 |
21 | df = data.update_nix
22 | df = df[~((df.systemType == 'withOci') & (df.before == 'old') & (df.after == 'app') & (df.originalChunkingType == 'bsd-nar+none'))] # exclude this, as it fails sporadically
23 |
24 | df: pd.DataFrame = (df
25 | >> define(
26 | systemType=mapvalues('systemType',
27 | 'minimal noKernel withMqtt withOci'.split(),
28 | "Base System|Base w/o Kernel|MQTT/Nix|MQTT/OCI".split('|')),
29 | changeType=mapvalues('after',
30 | 'clb new app'.split(),
31 | 'Update Libc|75 Days|Version Update'.split("|"))
32 | )
33 | #>> define(usesBSdiff=lambda df: list(map(lambda t: t.startswith('bsd+'), df['chunkingType'])))
34 | #>> define(chunkingType=lambda df: df['chunkingType'].map(lambda t: t.replace('bsd+ ')))
35 | #>> define(transferWeight='comp_p*(time_U+time_S)')
36 | #>> define(transferWeight='time_M')
37 | >> define(
38 | label_va=if_else('transferWeight < 15','"bottom"', '"top"'),
39 | label=if_else('transferWeight <1','transferWeight.round(2)', 'transferWeight.round(1)'),
40 | )
41 | )
42 |
43 | base_update=(df
44 | >> group_by('systemType', 'changeType')
45 | >> summarize(file_s='file_s.values[0]/2**20'))
46 |
47 | transfer = (ggplot(df, aes(x='chunkingType', y='transferWeight', fill='chunkingType'))
48 | + geom_col(position='dodge2')
49 | + scale_fill_manual(values={(x[1] or x[0]): x[2] for x in cat})
50 | + facet_grid('changeType ~ systemType')#, scales='free')
51 | + labs(x='', y='Remaining Transfer Size [%]', fill='Chunking')
52 | + geom_text(aes(va='label_va', label='label'), angle=90, position=position_dodge(width=0.9), size=7,
53 | format_string=" {} ")
54 | + guides(fill=guide_legend(nrow=2))
55 | + geom_label(aes(x='"FD+R+4K+BSD(Block)"',y=34,label='file_s'), data=base_update,
56 | format_string="{:.1f} MiB",size=7,inherit_aes=False,va='top',ha='right',
57 | )
58 | + theme(
59 | legend_position=(0.5,1),
60 | axis_text_x=element_blank(),
61 | axis_ticks_major_x=element_blank(),
62 | legend_title=element_blank(),
63 | legend_background=element_rect(fill='#fff0', color='#0000', size=1),
64 | legend_box_margin=2,
65 | )
66 | )
67 |
68 | #save_as_pdf_pages([transfer], data._dir / __import__('os').path.basename(__file__).replace('.py', '.pdf'))
69 | save_as_pdf_pages([transfer], outDir / 'update-size.pdf')
70 |
--------------------------------------------------------------------------------
/lib/data/parse_logs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import pandas as pd
4 | import re
5 | import sys
6 | import numpy as np
7 | from osg.pandas import read_directory
8 |
9 | def parse(fn):
10 | data = []
11 | with open(fn) as fd:
12 | for line in fd.readlines():
13 | if not line.strip(): continue
14 | m = re.match('\[(\d\d):(\d\d):(\d\d).(\d\d\d)\] (.*)', line)
15 | if not m:
16 | print(line)
17 | h,m,s,ms,log = m.groups()
18 | ts = int(h) *3600 + int(m)*60+int(s) + int(ms)/1000.0
19 | data.append([ts, log])
20 | return pd.DataFrame(data=data,columns=['timestamp', 'msg'])
21 |
22 | def categorize(msg):
23 | if 'Stopping session-2.scope' in msg:
24 | return 'poweroff_start'
25 | if 'reboot: Restarting system' in msg:
26 | return 'poweroff_stop'
27 | if 'U-Boot 2022.01' in msg:
28 | return 'uboot_start'
29 | if re.match('Retrieving file:.*initrd', msg):
30 | return 'load_initrd'
31 | if re.match('Retrieving file:.*Image', msg):
32 | return 'load_kernel'
33 | if 'Starting kernel ...' in msg:
34 | return 'uboot_stop'
35 | if 'NixOS Stage 1' in msg:
36 | return 'stage1_start'
37 | if 'NixOS Stage 2' in msg:
38 | return 'stage2_start'
39 | if 'Reached target multi-user.target' in msg:
40 | return 'reboot_stop'
41 | return np.nan
42 |
43 | def condense(fn,df):
44 | data = {}
45 | for _,row in df[~df.event.isna()].iterrows():
46 | data[row.event] = row
47 | return dict(
48 | fn=fn,
49 | total=data['reboot_stop'].timestamp - data['poweroff_start'].timestamp,
50 | poweroff=data['poweroff_stop'].timestamp - data['poweroff_start'].timestamp,
51 | firmware=data['uboot_start'].timestamp - data['poweroff_stop'].timestamp,
52 | uboot=data['uboot_stop'].timestamp - data['uboot_start'].timestamp,
53 | nixos=data['reboot_stop'].timestamp - data['uboot_stop'].timestamp,
54 | kernel=data['load_kernel'].timestamp_next - data['load_kernel'].timestamp,
55 | initrd=data['load_initrd'].timestamp_next - data['load_initrd'].timestamp,
56 | )
57 |
58 | def read_log(fn):
59 | df = parse(fn)
60 | df['event'] = df.msg.apply(categorize)
61 | df['timestamp_next'] = df.timestamp.shift(-1)
62 | return pd.DataFrame(data=[condense(fn,df)])
63 |
64 | if __name__ == "__main__":
65 | df = read_directory(sys.argv[1], read=read_log)
66 | print(df)
67 |
--------------------------------------------------------------------------------
/lib/data/system-listing.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import sys
4 | from pathlib import Path
5 | import os
6 | import pandas as pd
7 | import magic
8 |
9 |
10 | if len(sys.argv) != 4: print(f"Usage: {sys.argv[0]} SYSTEM_ID NIX_STORE_DIR OUTPUT_PATH") ; sys.exit(-1)
11 | _, systemId, nixStoreDir, outputPath = sys.argv ; nixStoreDir = Path(nixStoreDir) ; outputPath = Path(outputPath)
12 |
13 |
14 | inodes = set()
15 | def stat_file(path: Path):
16 | # Nix Store Deduplication
17 | stat = path.lstat()
18 | if stat.st_ino in inodes:
19 | return None
20 | inodes.add(stat.st_ino)
21 |
22 | d = dict(name=path,
23 | st_size = stat.st_size,
24 | )
25 |
26 | if path.is_symlink():
27 | d['mime_type'] = 'os/symlink'
28 | d['file_desc'] = 'Symlink'
29 | elif path.is_file():
30 | try:
31 | with open(path, 'rb') as fd:
32 | data = fd.read(2048)
33 | d['mime_type'] = magic.from_buffer(data, mime=True)
34 | d['file_desc'] = magic.from_buffer(data)
35 | except Exception as err:
36 | if path.suffix == '.lock': return None
37 | raise err
38 | elif path.is_dir():
39 | d['mime_type'] = 'os/directory'
40 | d['file_desc'] = 'Directory'
41 | else:
42 | assert False, path
43 |
44 | return d
45 |
46 |
47 | def stat_component(component: Path):
48 | dfs: 'list[pd.DataFrame]' = []
49 | dfs.append(stat_file(component))
50 | if component.is_dir():
51 | for (root, dirs, files) in os.walk(component):
52 | for x in dirs + files:
53 | if d := stat_file(Path(root) / x):
54 | dfs.append(d)
55 | df = pd.DataFrame(data=dfs)
56 | df['component'] = component.name
57 | return df
58 |
59 |
60 | dfs: 'list[pd.DataFrame]' = []
61 | if nixStoreDir.is_file():
62 | with open(nixStoreDir) as fd:
63 | for comp in fd.readlines():
64 | df = stat_component(nixStoreDir.parent/Path(comp.strip()).name)
65 | dfs.append(df)
66 | else:
67 | for comp in nixStoreDir.iterdir():
68 | df = stat_component(comp)
69 | dfs.append(df)
70 |
71 | df = pd.concat(dfs)
72 | df['variant'] = systemId
73 |
74 | df.to_csv(outputPath)
75 |
76 | #df['files'] = 1
77 | #print(df.groupby(['variant', 'mime_type']).agg(dict(st_size=sum, files=len)))
78 |
--------------------------------------------------------------------------------
/lib/default.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs: inputs.functions.lib.importLib inputs dirname { rename = {
2 | functions = "fun";
3 | wiplib = "wip";
4 | self = "th";
5 | }; }
6 |
--------------------------------------------------------------------------------
/lib/misc.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs: let
2 | lib = inputs.self.lib.__internal__;
3 | in rec {
4 |
5 | ## From a host's »default.nix«, import the »machine.nix« configuration, any »systems/« configurations, and enable the defaults for target devices.
6 | # The reason why this is a library function and not part of the »../target/defaults« module is that importing based on a config value is not possible, and config values are the only way to pass variables (»dirname«) to a module (other than the global module function arguments, but given that they are global, they are a pretty bad way to pass variables).
7 | importMachineConfig = inputs: dirname: {
8 | th.target.defaults.enable = true;
9 | imports = [ (lib.fun.importWrapped inputs "${dirname}/machine.nix").module ];
10 | specialisation = lib.mapAttrs (name: path: { configuration = { imports = [
11 | { th.target.specs.name = name; }
12 | (lib.fun.importWrapped inputs path).module
13 | ]; _file = "${dirname}/misc.nix#specialisation"; }; }) (lib.fun.getNixFiles "${dirname}/systems");
14 | installer.scripts.extra-setup = { path = ../utils/setup.sh; order = 1500; }; # (for reasons of weirdness, this only works when placed here)
15 | };
16 |
17 | copy-files = pkgs: name: attrs: let
18 | copy = pkgs.runCommandLocal name { } ''
19 | mkdir $out
20 | ${lib.concatStringsSep "\n" (lib.mapAttrsToList (name: path: let dir = builtins.dirOf name; in (if dir != "." then "mkdir -p $out/${lib.escapeShellArg dir} ; " else "") + ''cp -aT ${lib.escapeShellArg path} $out/${lib.escapeShellArg name}'') attrs)}
21 | '';
22 | in lib.mapAttrs (name: _: "${copy}/${name}") attrs;
23 |
24 | ## Extracts the result of »pkgs.dockerTools.pullImage«:
25 | extract-docker-image = pkgs: image: pkgs.runCommandLocal (if lib.isString image then "container-image" else "docker-image-${image.imageName}-${image.imageTag}") { inherit image; outputs = [ "out" "info" ]; } ''
26 | set -x
27 | tar -xf $image
28 | ls -al .
29 | layers=( $( ${pkgs.jq}/bin/jq -r '.[0].Layers|.[]' manifest.json ) )
30 | mkdir -p $out
31 | for layer in "''${layers[@]}" ; do
32 | tar --anchored --exclude='dev/*' -tf $layer | ( grep -Pe '(^|/)[.]wh[.]' || true ) | while IFS= read -r path ; do
33 | if [[ $path == */.wh..wh..opq ]] ; then
34 | ( shopt -s dotglob ; rm -rf $out/"''${path%%.wh..wh..opq}"/* )
35 | else
36 | name=$( basename "$path" ) ; rm -rf $out/"$( dirname "$path" )"/''${name##.wh.}
37 | fi
38 | done
39 | tar --anchored --exclude='dev/*' -C $out -xf $layer -v |
40 | ( grep -Pe '(^|/)[.]wh[.]' || true ) | while IFS= read -r path ; do
41 | name=$( basename "$path" ) ; rm -rf $out/"$path"
42 | done
43 | chmod -R +w $out
44 | done
45 | mkdir -p $info
46 | cp ./manifest.json $info/
47 | config=$( ${pkgs.jq}/bin/jq -r '.[0].Config' manifest.json || true )
48 | [[ ! $config ]] || cp ./"$config" $info/config.json
49 | stat --printf='%s\t%n\n' "''${layers[@]}" | LC_ALL=C sort -k2 > $info/layers
50 | '';
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/lib/util.ts:
--------------------------------------------------------------------------------
1 |
2 | export const TXT = {
3 | /**
4 | * Formats an array of uniform objects as a human readable text table.
5 | *
6 | * @example
7 | * const preContent = encodeHtml(TXT.stringify({
8 | * columns: { date: '^9', method: '^meth', host: '8^', path: '40^', ua: '^50' },
9 | * entries: [ ... ],
10 | * });
11 | *
12 | * @return Plain text table as multi line string.
13 | */
14 | stringify>({
15 | columns, entries, stringify = null, ellipsis = '…', separator = ' ', delimiter = '\r\n', finalize = null,
16 | }: {
17 | /** Object whose properties define the columns of the table.
18 | * Each column's values will be the entries' properties of the same name,
19 | * formatted according to the values properties as follows:
20 | * .limit: maximum width of the column, longer values will be trimmed.
21 | * .align: true to align right, left otherwise,
22 | * .trim: true to trim left, right otherwise. Defaults to `.align`.
23 | * .name: Header name of the column. Defaults to the property name.
24 | * As shortcuts, the value can be a number specifying only the `.limit`
25 | * or a string `[align]?[limit[trim]?]? ?[name]?`, where `align` and `trim`
26 | * may be `^` or `$` for left and right, respectively, `limit` is a decimal
27 | * number, and `name` is any string (prefix it with a/another space if it
28 | * should start with `^`, `$` or a digit or space).
29 | * A `null`ish value will ignore that column. */
30 | columns: { [key in keyof EntryT]: number | string | { limit?: number, align?: boolean, trim?: boolean, name?: string, } | null; },
31 | /** Array of objects to fill the table. */
32 | entries: EntryT[],
33 | /** Stringification function invoked as `string = *(value, key, entry)`
34 | * to cast each used value into strings before taking their lengths.
35 | * The default prints primitives and `ObjectId`s as `+''`,
36 | * `Date`s as ISO dates, and other objects as `JSON5`. */
37 | stringify?: ((value: EntryT[keyof EntryT], key: keyof EntryT, entry: EntryT) => string) | null,
38 | /** Function called on each stringified and padded/trimmed value, before
39 | * placing it in its cell. E.g. useful for syntax highlighting in HTML. */
40 | finalize?: ((value: string, key: keyof EntryT, entry: EntryT) => string) | null,
41 | /** String placed at the cropped end of values.
42 | * Must be shorter than the smallest column width. Defaults to `…`. */
43 | ellipsis?: string,
44 | /** Column separator. Defaults to ` ` (space). */
45 | separator?: string,
46 | /** Line break. Defaults to `\r\n`. */
47 | delimiter?: string,
48 | }): string {
49 | !finalize && (finalize = x => x);
50 | !stringify && (stringify = value => {
51 | if (value == null) { return value +'' /*value === null ? '\u2400' : '\u2205'*/; }
52 | if (typeof value !== 'object') { return value +''; }
53 | if (value instanceof Date) { return isFinite(+value) ? value.toISOString().replace('T', '').replace(/(?:[.]000)?Z$/, '') : 'Invalid Date'; }
54 | return JSON.stringify(value);
55 | });
56 | const width = Object.keys(columns).length, height = entries.length;
57 | const header = new Array(width);
58 | const body = new Array(height); for (let i = 0; i < height; ++i) { body[i] = new Array(width); }
59 |
60 | Object.entries(columns).forEach(([ key, props, ], col) => {
61 | if (props == null) { return; }
62 | if (typeof props === 'number') { props = { limit: props, }; }
63 | if (typeof props === 'string') {
64 | const [ , align, limit, trim, name, ] = (/^([$^])?(?:(\d+)([$^])?)? ?(.*?)?$/).exec(props) || [ ];
65 | props = {
66 | name: name || key, limit: limit == null ? -1 : +limit,
67 | align: align === '$', trim: trim ? trim === '^' : align === '$',
68 | };
69 | }
70 | const { align = false, limit = -1, } = props, { trim = align, name = key, } = props;
71 | if (limit === 0) { header[col] = separator; body.forEach(_=>(_[col] = separator)); return; }
72 |
73 | let max = name.length; entries.forEach((entry, row) => {
74 | const value = body[row][col] = stringify!(entry[key as keyof EntryT], key, entry);
75 | max = Math.max(max, value.length);
76 | });
77 | max = limit < 0 ? max : Math.min(limit, max);
78 | const pre = col === 0 ? '' : separator, elp = ellipsis.length < max ? ellipsis : '';
79 | entries.forEach((entry, row) => {
80 | body[row][col] = pre + finalize!(trimAlign(body[row][col]), key, entry);
81 | }); header[col] = pre + trimAlign(name);
82 |
83 | function trimAlign(value: string) {
84 | return value.length > max
85 | ? trim ? elp + value.slice(-max + elp.length) : value.slice(0, max - elp.length) + elp
86 | : align ? value.padStart(max) : value.padEnd(max);
87 | }
88 | });
89 |
90 | return header.join('') + delimiter + body.map(row => row.join('')).join(delimiter);
91 | },
92 | };
93 |
94 | export const CSV = {
95 | /**
96 | * Serializes an optional header and a number of records as a RFC 4180-compliant CSV-file string.
97 | *
98 | * @example
99 | * const csvBuffer = Buffer.from('\ufeff'+ CSV.stringify({
100 | * header: { name: 'Name', age: 'Age', },
101 | * records: [ { name: 'Dad', age: 42, }, { name: 'Baby', age: '1', }, ],
102 | * separator: ';',
103 | * }) +'\r\n', 'utf-8');
104 | *
105 | * @return (Depending on the options) RFC 4180-compliant CSV-file string, without final delimiter.
106 | */
107 | stringify>({
108 | header = null, records, serialize = null, stringify = null, separator = ',', delimiter = '\r\n',
109 | }: {
110 | /** Optional. Header row as `[ ...keys, ]` or `{ [key]: name, ... }`.
111 | * Fields with `name == null` will be ignored. If `.serialize` is unset,
112 | * non-Array records are mapped to their property values of the provided keys. */
113 | header?: (keyof RecordT)[] | { [key in keyof RecordT]?: string | null; } | null,
114 | /** Iterable of records. Each record passed is mapped through `.serialize`. */
115 | records: (RecordT | RecordT[keyof RecordT][])[],
116 | /** Serialization function `(record) => [ ...values, ]`.
117 | * Must return a constant-length iterable of values for each record.
118 | * Defaults to the id function or what is described under `.header`. */
119 | serialize?: ((record: RecordT | RecordT[keyof RecordT][]) => RecordT[keyof RecordT][]) | null,
120 | /** Optional stringification function that is invoked for every value (returned by `serialize`).
121 | * The default function keeps numbers, omits null/undefined and casts everything else into strings.
122 | * The default behavior may be adjusted by passing an object with the following properties instead:
123 | * `.tabPrefix`: Optional `RegExp`, defaulting to `/\d/` if not set. Values matching
124 | * this will be prefixed with a `'\t'`. This is done to prevent MS Office from
125 | * interpreting string numbers as numerical values, messing up their string formatting.
126 | * This may cause other applications to read the additional '\t'.
127 | * `.quote`: `RegExp`, defaulting to `/[,;\t\r\n"]/` if not set.
128 | * Matching values will be surrounded with double quotes and have internal double quotes duplicated.
129 | * which are surrounded with quotes and prefixed when necessary. */
130 | stringify?: ((value: RecordT[keyof RecordT], index: number/* , record: RecordT[keyof RecordT][] */) => string) | { tabPrefix?: RegExp|false, quote?: RegExp, } | null,
131 | /** Value separator within record lines. Defaults to `,`. */
132 | separator?: string,
133 | /** Record/line delimiter. Defaults to `\r\n`. */
134 | delimiter?: string,
135 | }): string {
136 | if (typeof header === 'object' && header !== null) {
137 | const fields = Array.isArray(header) ? header : Object.keys(header).filter(key => header![key] != null);
138 | !Array.isArray(header) && (header = fields.map(key => header![key as any]));
139 | !serialize && (serialize = obj => Array.isArray(obj) ? obj : fields.map(_=>obj[_]));
140 | }
141 | !serialize && (serialize = (x: any) => x);
142 | if (typeof stringify !== 'function') {
143 | const {
144 | tabPrefix = (/\d/),
145 | quote = new RegExp(String.raw`[${ separator + delimiter },;\t\r\n"]`),
146 | } = stringify || { };
147 | stringify = value => {
148 | if (value == null) { return ''; } if (typeof value === 'number') { return value +''; } let _value = value +'';
149 | if (tabPrefix && tabPrefix.test(_value)) { _value = '\t'+ _value; } // see https://superuser.com/a/704291
150 | return quote.test(_value) ? '"'+ _value.split('"').join('""') + '"' : _value;
151 | };
152 | }
153 | function record(values: RecordT | RecordT[keyof RecordT][]) { return Array.from(serialize!(values), stringify as any).join(separator); }
154 | return (header ? record(header as any) + delimiter : '') + Array.from(records, record).join(delimiter);
155 | },
156 | };
157 |
--------------------------------------------------------------------------------
/modules/README.md:
--------------------------------------------------------------------------------
1 |
2 | # NixOS Modules
3 |
4 | A NixOS module is a collection of any number of NixOS option definitions and value assignments to those or other options.
5 | While the set of imported modules and thereby defined options is static (in this case starting with the modules passed to `mkNixosSystem` in `../flake.nix`), the value assignments can generally be contingent on other values (as long as there are no logical loops), making for a highly flexible system construction.
6 | Since modules can't be imported (or excluded) dynamically, most modules have an `enable` option, which, if false, effectively disables whatever that module does.
7 |
8 | Ultimately, the goal of a NixOS configuration is to build an operating system, which is basically a structured collection of program and configuration files.
9 | To that end, there are a number of pre-defined options (in `nixpkgs`) that collect programs, create and write configuration files (primarily in `/etc`), compose a boot loader, etc.
10 | Other modules use those options to manipulate how the system is built.
11 |
12 |
13 | ## Template
14 |
15 | Here is a skeleton structure for writing a new `.nix.md`:
16 |
17 | ````md
18 | /*
19 |
20 | # TODO: title
21 |
22 | TODO: documentation
23 |
24 | ## Implementation
25 |
26 | ```nix
27 | #*/# end of MarkDown, beginning of NixOS module:
28 | dirname: inputs: { config, pkgs, lib, ... }: let lib = inputs.self.lib.__internal__; in let
29 | cfg = config.th.${TODO: name};
30 | in {
31 |
32 | options.th = { ${TODO: name} = {
33 | enable = lib.mkEnableOption "TODO: what";
34 | # TODO: more options
35 | }; };
36 |
37 | config = lib.mkIf cfg.enable (lib.mkMerge [ ({
38 | # TODO: implementation
39 | }) ]);
40 |
41 | }
42 | ````
43 |
--------------------------------------------------------------------------------
/modules/default.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs: inputs.functions.lib.importModules inputs dirname { }
2 |
--------------------------------------------------------------------------------
/modules/hermetic-bootloader.sh:
--------------------------------------------------------------------------------
1 |
2 | function include-path {( set -eu ; # 1: path
3 | if [[ -L $tree/$1 ]] ; then
4 | if [[ $( readlink $tree/"$1" ) != "$1" ]] ; then echo "Link $1 exists and does not point at the expected target!" ; exit 1 ; fi
5 | elif [[ ! -e $1 ]] ; then
6 | echo "Path $1 can't be included because it doesn't exist!" ; exit 1
7 | else
8 | if [[ "$1" == */* ]] ; then mkdir -p "$( dirname $tree/"$1" )" ; fi
9 | ln -sT "$1" $tree/"$1"
10 | fi
11 | )}
12 |
13 | function write-file {( set -eu ; # 1: path, 2: content
14 | mkdir -p $tree/"$(dirname "$1")"
15 | printf %s "$2" > $tree/"$1"
16 | )}
17 |
18 | function config-uboot-extlinux {( set -eu ; # 1: default
19 | config=$'\nMENU TITLE ------------------------------------------------------------\n'
20 | if [[ @{config.boot.loader.timeout} == 0 ]] ; then
21 | config+=$'TIMEOUT 1\n' # near-instantaneous
22 | else
23 | config+="TIMEOUT $(( @{config.boot.loader.timeout:-0} * 10 ))"$'\n'
24 | fi
25 | for name in "@{!cfg.entries[@]}" ; do
26 | eval 'declare -A entry='"@{cfg.entries[$name]}"
27 | config+="
28 | LABEL ${entry[id]}
29 | MENU LABEL ${entry[title]} (${entry[id]})
30 | LINUX ${entry[kernel]}
31 | INITRD ${entry[initrd]}
32 | APPEND ${entry[cmdline]}
33 | ${entry[deviceTree]:+"FDTDIR ${entry[deviceTree]}"}
34 | "
35 | done
36 | write-file /extlinux/extlinux.conf "${1:+"DEFAULT $1"}"$'\n'"$config"
37 | for name in "@{!cfg.entries[@]}" ; do
38 | eval 'declare -A entry='"@{cfg.entries[$name]}"
39 | write-file /extlinux/entries/"${entry[id]}".conf "DEFAULT ${entry[id]}"$'\n'"$config"
40 | done
41 | )}
42 |
43 |
44 | function config-systemd-boot {( set -eu ; # 1: default
45 | write-file /loader/loader.conf "
46 | @{config.boot.loader.timeout:+timeout @{config.boot.loader.timeout}}
47 | ${1:+"default $1.conf"}
48 | editor 0
49 | console-mode keep
50 | "
51 | for name in "@{!cfg.entries[@]}" ; do
52 | eval 'declare -A entry='"@{cfg.entries[$name]}"
53 | write-file /loader/entries/"${entry[id]}".conf "
54 | title ${entry[title]}
55 | version ${entry[id]}
56 | linux ${entry[kernel]}
57 | initrd ${entry[initrd]}
58 | options ${entry[cmdline]}
59 | ${entry[machine-id]:+"machine-id ${entry[machine-id]}"}
60 | "
61 | done
62 | )}
63 |
64 | function build-tree {( set -eu ; # (void)
65 | for name in "@{!cfg.entries[@]}" ; do
66 | eval 'declare -A entry='"@{cfg.entries[$name]}"
67 | include-path ${entry[kernel]}
68 | include-path ${entry[initrd]}
69 | [[ ! ${entry[deviceTree]:-} ]] || include-path ${entry[deviceTree]}
70 | done
71 | default=@{cfg.default:-} ; if [[ $default && @{cfg.entries[$default]:-} ]] ; then
72 | eval 'declare -A entry='"@{cfg.entries[$default]}"
73 | default=${entry[id]}
74 | fi
75 | if [[ @{cfg.loader} == uboot-extlinux ]] ; then
76 | config-uboot-extlinux "$default"
77 | fi
78 | if [[ @{cfg.loader} == systemd-boot ]] ; then
79 | config-systemd-boot "$default"
80 | arch=x64 ; if [[ @{pkgs.system} == aarch64-* ]] ; then arch=aa64 ; fi
81 | mkdir -p $tree/EFI/systemd ; ln -sT @{pkgs.systemd}/lib/systemd/boot/efi/systemd-boot${arch}.efi $tree/EFI/systemd/systemd-boot${arch}.efi
82 | mkdir -p $tree/EFI/BOOT ; ln -sT @{pkgs.systemd}/lib/systemd/boot/efi/systemd-boot${arch}.efi $tree/EFI/BOOT/BOOT${arch^^}.EFI
83 | fi
84 | for path in "@{!cfg.extraFiles[@]}" ; do
85 | mkdir -p $tree/"$(dirname "$path")" ; ln -sT "@{cfg.extraFiles[$path]}" $tree/"$path"
86 | done
87 | )}
88 |
89 | function write-to-fs {( set -eu ; # 1: tree, 2: root, 3?: selfRef
90 | tree=$1 ; root=$2 ; selfRef=${3:-} ; existing=( )
91 | while IFS= read -r -d $'\0' path ; do
92 | if [[ -e $root/$path ]] ; then
93 | existing+=( "$path" ) ; continue
94 | fi
95 | @{pkgs.coreutils}/bin/mkdir -p "$root"/"$( @{pkgs.coreutils}/bin/dirname "$path" )"
96 | @{pkgs.coreutils}/bin/cp -T $tree/"$path" "$root"/"$path"
97 | done < <( cd $tree ; @{pkgs.findutils}/bin/find -L . -type f,l -print0 )
98 | for path in "${existing[@]}" ; do
99 | if [[ $( cd $tree ; @{pkgs.coreutils}/bin/shasum "$path" ) != $( cd "$root" ; @{pkgs.coreutils}/bin/shasum "$path" ) ]] ; then
100 | @{pkgs.coreutils}/bin/rm "$root"/"$path" ; @{pkgs.coreutils}/bin/cp -T $tree/"$path" "$root"/"$path"
101 | fi
102 | done
103 | # TODO: delete unneeded old files/dirs
104 | if [[ $selfRef ]] ; then
105 | id=default-${selfRef:11:8}
106 | function replace {
107 | path=$1 ; str=$( @{pkgs.coreutils}/bin/cat "$path" ) ; prev="$str"
108 | str=${str//@default-self@/$id}
109 | str=${str//@toplevel@/$selfRef}
110 | [[ $str == "$prev" ]] || ( <<< "$str" @{pkgs.coreutils}/bin/cat >"$path" )
111 | }
112 | base=loader ; if [[ @{cfg.loader} == uboot-extlinux ]] ; then base=extlinux ; fi
113 | while IFS= read -r -d $'\0' path ; do replace "$path" ; done < <( @{pkgs.findutils}/bin/find -L "$root"/$base/ -type f,l -print0 )
114 | [[ ! -e "$root"/$base/entries/"@default-self@".conf ]] || mv "$root"/$base/entries/{"@default-self@","$id"}.conf
115 | <<< "$selfRef" @{pkgs.coreutils}/bin/cat > "$root"/toplevel
116 | fi
117 | )}
118 |
119 | function write-boot-partition {( set -u # 1: tree, 2: blockDev, 3: label, 4?: selfRef
120 | tree=$1 ; blockDev=$2 ; label=$3 ; selfRef=${4:-}
121 | # TODO: is it possible to just "flash" an empty FAT32? The label can be replaced with dd ...
122 | @{pkgs.dosfstools}/bin/mkfs.vfat -n "$label" "$blockDev" &1>/dev/null || @{pkgs.dosfstools}/bin/mkfs.vfat -n "$label" "$blockDev" || exit
123 | root=$( @{pkgs.coreutils}/bin/mktemp -d ) ; @{pkgs.util-linux}/bin/mount "$blockDev" $root ; trap "@{pkgs.util-linux}/bin/umount $root ; @{pkgs.coreutils}/bin/rmdir $root" EXIT || exit
124 | write-to-fs $tree "$root" "$selfRef" || exit
125 | )}
126 |
127 | function get-parent-disk {( set -u # 1: partition
128 | export PATH=@{pkgs.coreutils}/bin
129 | arg=$( realpath "$1" ) || exit
130 | arg=$( basename "$arg" ) || exit
131 | arg=$( readlink -f /sys/class/block/"$arg"/.. ) || exit
132 | arg=$( basename "$arg" ) || exit
133 | echo /dev/"$arg"
134 | )}
135 |
136 | function activate-as-slot {( set -eu ; # 1: tree, 2: index, 3: label, 4?: selfRef
137 | tree=$1 ; index=$2 ; label=$3 ; selfRef=${4:-}
138 | hash=@{config.networking.hostName!hashString.sha256:0:8}
139 |
140 | write-boot-partition $tree "/dev/disk/by-partlabel/boot-${index}-${hash}" "$label" "$selfRef"
141 |
142 | disk=$( get-parent-disk "/dev/disk/by-partlabel/boot-1-${hash}" ) # (can't reference to disks by partlabel)
143 | for (( i = 2 ; i <= @{cfg.slots.number} ; i++ )) ; do
144 | if [[ $( get-parent-disk "/dev/disk/by-partlabel/boot-${i}-${hash}" ) != "$disk" ]] ; then echo "boot slot $i is on unexpected parent disk" ; exit 1 ; fi
145 | done
146 |
147 | diskSize=$( @{pkgs.util-linux}/bin/blockdev --getsize64 "$disk" ) # TODO: could take this from the disk specification
148 |
149 | # The behavior might be slightly (EFI-)implementation-dependent, but with a working primary header, the secondary should not be used. (The spec (https://uefi.org/sites/default/files/resources/UEFI_Spec_2_8_final.pdf, page 120) says that the last step in checking that "a GPT" is valid is to check that the AlternateLBA "is a valid GPT" (without addressing the recursion there). It does not require that the two headers point at each other (here) or that they otherwise match ...)
150 | # The spec says to update the secondary (backup) header first.
151 | @{pkgs.coreutils}/bin/dd status=none conv=notrunc bs=512 skip=2 seek=$(( diskSize / 512 - 1 )) count=1 if=@{config.setup.disks.partitioning}/"@{cfg.slots.disk}".slot-${index}.backup of="$disk"
152 |
153 | if [[ @{cfg.loader} != uboot-extlinux ]] ; then
154 | @{pkgs.coreutils}/bin/dd status=none conv=notrunc bs=512 skip=1 seek=1 count=1 if=@{config.setup.disks.partitioning}/"@{cfg.slots.disk}".slot-${index}.backup of="$disk"
155 | else
156 | @{pkgs.coreutils}/bin/dd status=none conv=notrunc bs=1024 skip=0 seek=0 count=1 if=@{config.setup.disks.partitioning}/"@{cfg.slots.disk}".slot-${index}.backup of="$disk"
157 | # For systems that actually use both MBR and GPU (rPI with uboot), this assumes/requires writing two logical sectors to be atomic ...
158 | fi
159 | )}
160 |
161 | function build-out {( set -eu ; # (void)
162 | printf %s "#!@{pkgs.bash}/bin/bash -eu
163 | # 1: slot, 2?: selfRef, ...: ignored
164 | $( declare -f write-to-fs write-boot-partition get-parent-disk activate-as-slot )
165 | $( declare -p pkgs_findutils pkgs_util0linux pkgs_coreutils pkgs_dosfstools )
166 | $( declare -p config_setup_disks_partitioning config_networking_hostName1hashString_sha256 cfg_loader cfg_slots_number cfg_slots_disk )
167 | activate-as-slot $tree \"\$1\" '@{cfg.slots.currentLabel}' \"\${2:-}\"
168 | " > $out
169 | chmod +x $out
170 | )}
171 |
172 | function apply-partitionings {( set -eu ; # (void)
173 | hash=@{config.networking.hostName!hashString.sha256:0:8}
174 |
175 | disk=$( get-parent-disk "/dev/disk/by-partlabel/boot-1-${hash}" )
176 | for (( i = 2 ; i <= @{cfg.slots.number} ; i++ )) ; do
177 | if [[ $( get-parent-disk "/dev/disk/by-partlabel/boot-${i}-${hash}" ) != "$disk" ]] ; then echo "boot slot $i is on unexpected parent disk" ; exit 1 ; fi
178 | done
179 | for (( i = 1 ; i <= @{cfg.slots.number} ; i++ )) ; do
180 | @{pkgs.gptfdisk}/bin/sgdisk --load-backup=@{config.setup.disks.partitioning}/"@{cfg.slots.disk}".slot-${i}.backup "$disk" &>/dev/null
181 | done
182 | )}
183 |
184 | function build-init {( set -eu ; # (void)
185 | printf %s "#!@{pkgs.bash}/bin/bash -eu
186 | # ...: outArgs
187 | $( declare -f get-parent-disk apply-partitionings )
188 | $( declare -p pkgs_coreutils pkgs_gptfdisk )
189 | $( declare -p config_setup_disks_partitioning config_networking_hostName1hashString_sha256 cfg_slots_number cfg_slots_disk )
190 | apply-partitionings
191 | source $out
192 | disk=\$( get-parent-disk /dev/disk/by-partlabel/boot-1-@{config.networking.hostName!hashString.sha256:0:8} )
193 | @{pkgs.parted}/bin/partprobe \$disk &>/dev/null && @{config.systemd.package}/bin/udevadm settle -t 15 || true
194 | " > $init
195 | chmod +x $init
196 | )}
197 |
198 | #set -x
199 |
200 | build-tree
201 | build-out
202 | build-init
203 |
--------------------------------------------------------------------------------
/modules/target/default.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs: inputs.functions.lib.importModules inputs dirname { }
2 |
--------------------------------------------------------------------------------
/modules/target/defaults.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # Target Device Config Defaults
4 |
5 | Base configuration for the target devices, pulling in everything that all target devices should have in common.
6 |
7 |
8 | ## Implementation
9 |
10 | ```nix
11 | #*/# end of MarkDown, beginning of NixOS module:
12 | dirname: inputs: moduleArgs@{ config, pkgs, lib, ... }: let lib = inputs.self.lib.__internal__; in let
13 | cfg = config.th.target.defaults;
14 | in {
15 |
16 | options.th = { target.defaults = {
17 | enable = lib.mkEnableOption "base configuration for the target devices. This would usually be enabled by importing the result of calling »lib.th.importMachine«";
18 | }; };
19 |
20 | config = let
21 | hash = builtins.substring 0 8 (builtins.hashString "sha256" config.networking.hostName);
22 | in lib.mkIf cfg.enable (lib.mkMerge [ ({
23 |
24 | ## Enable modules:
25 | wip.base.enable = true; wip.base.autoUpgrade = false;
26 | th.hermetic-bootloader.enable = true;
27 | th.target.fs.enable = true;
28 | th.target.specs.enable = true;
29 | th.minify.enable = true; th.minify.etcAsOverlay = true;
30 | wip.services.dropbear.enable = true;
31 | th.target.watchdog.enable = true;
32 |
33 | ## Convenience:
34 | documentation.enable = false; # sometimes takes quite long to build
35 | boot.loader.timeout = 1;
36 |
37 | }) ]);
38 |
39 | }
40 |
--------------------------------------------------------------------------------
/modules/target/fs.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # File System Configuration for the Target Device
4 |
5 | For the file system setup, the relevant characteristics of the target devices are that it:
6 | * can boot into multiple (non-generational) configurations
7 | * is robust, that is, there is a fallback (default/`null`) config that the other configurations can't destroy.
8 |
9 | To realize that, the target devices use a `tmpfs` for `/`, meaning that in most locations, all files will be deleted/reset on reboot.
10 |
11 | The only persistent paths are:
12 | * `/boot` for the (single) kernel and initrd, backed by a `vfat` boot/firmware partition (if the hardware's boot architecture requires one and/or can't open `ext4` partitions),
13 | * `/nix/store` which contains all the programs and configurations, backed by an `ext4` system partition,
14 | * `/var/log` and `/volumes` for logs and container volumes, backed by per-config directories on an `ext4` data partition.
15 |
16 | The boot and system partitions are mounted read-only in all but the fallback config, and the data partition is not mounted in the fallback config.
17 | That way, no part that is relevant for the booting of the fallback config can be modified in any other config.
18 |
19 |
20 | ## Implementation
21 |
22 | ```nix
23 | #*/# end of MarkDown, beginning of NixOS module:
24 | dirname: inputs: { config, pkgs, lib, ... }: let lib = inputs.self.lib.__internal__; in let
25 | cfg = config.th.target.fs;
26 | in {
27 |
28 | options.th = { target.fs = {
29 | enable = lib.mkEnableOption "the file systems for a target device";
30 | dataDir = lib.mkOption { description = "Dir on /data partition that on which logs and volumes are stored. /data part won't be mounted if »null«."; type = lib.types.nullOr lib.types.str; default = null; };
31 | dataSize = lib.mkOption { description = "Size of the »/data« partition."; type = lib.types.str; default = "2G"; };
32 | }; };
33 |
34 | config = let
35 | hash = builtins.substring 0 8 (builtins.hashString "sha256" config.networking.hostName);
36 | implied = true; # some mount points are implied (and forced) to be »neededForBoot« in »specialArgs.utils.pathsNeededForBoot« (this marks those here)
37 |
38 | in lib.mkIf cfg.enable (lib.mkMerge [ ({
39 | # Mount a tmpfs as root, and (currently) only the nix-store from a read-only system partition:
40 |
41 | fileSystems."/" = { fsType = "tmpfs"; device = "tmpfs"; neededForBoot = implied; options = [ "mode=755" ]; };
42 |
43 | setup.disks.partitions."system-${hash}" = { type = "8300"; size = null; order = 500; };
44 | fileSystems."/system" = { fsType = "ext4"; device = "/dev/disk/by-partlabel/system-${hash}"; neededForBoot = true; options = [ "noatime" "ro" ]; formatArgs = [ "-O" "inline_data" "-b" "4k" "-E" "nodiscard" "-F" ]; };
45 | fileSystems."/nix/store" = { options = [ "bind" "ro" "private" ]; device = "/system/nix/store"; neededForBoot = implied; };
46 |
47 | systemd.tmpfiles.rules = [
48 | # Make the »/nix/store« non-enumerable:
49 | ''d /system/nix/store 0751 root 30000 - -''
50 | # »nixos-containers«/»config.containers« expect these to exist and fail to start without:
51 | ''d /nix/var/nix/db 0755 root root - -''
52 | ''d /nix/var/nix/daemon-socket 0755 root root - -''
53 | ];
54 |
55 | }) ({
56 | # Declare data partition:
57 |
58 | setup.disks.partitions."data-${hash}" = { type = "8300"; size = cfg.dataSize; order = 1000; };
59 |
60 | }) (lib.mkIf (cfg.dataDir == null) {
61 | # Make /data mountable in default spec:
62 |
63 | fileSystems."/data" = { fsType = "ext4"; device = "/dev/disk/by-partlabel/data-${hash}"; neededForBoot = false; options = [ "noatime" "noauto" "nofail" ]; formatArgs = [ "-O" "inline_data" "-E" "nodiscard" "-F" ]; };
64 |
65 | }) (lib.mkIf (cfg.dataDir != null) {
66 | # On a read/writable data partition, provide persistent logs and container volume storage separately for each spec:
67 |
68 | fileSystems."/data" = { fsType = "ext4"; device = "/dev/disk/by-partlabel/data-${hash}"; neededForBoot = true; options = [ "noatime" ]; };
69 | fileSystems."/var/log" = { options = [ "bind" ]; device = "/data/by-config/${cfg.dataDir}/log"; neededForBoot = implied; };
70 | fileSystems."/volumes" = rec { options = [ "bind" ]; device = "/data/by-config/${cfg.dataDir}/volumes"; neededForBoot = false; preMountCommands = "mkdir -p -- ${lib.escapeShellArg device}"; };
71 |
72 | }) ]);
73 |
74 | }
75 |
--------------------------------------------------------------------------------
/modules/target/specs.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # Config Specialisations
4 |
5 | Base configuration allowing for multiple specified »config.specialisations« to be bootable with virtually no overhead and without any bootloader integration.
6 |
7 |
8 | ## Implementation
9 |
10 | ```nix
11 | #*/# end of MarkDown, beginning of NixOS module:
12 | dirname: inputs: specialArgs@{ config, pkgs, lib, nodes, ... }: let lib = inputs.self.lib.__internal__; in let
13 | cfg = config.th.target.specs;
14 | in {
15 |
16 | options.th = { target.specs = {
17 | enable = lib.mkEnableOption "bootable config specialisations";
18 | name = lib.mkOption { description = "Name of the current specialisation, must be the same as this spec's key the parents »config.specialisation.*« attrset."; type = lib.types.nullOr lib.types.str; default = specialArgs.specialisation or null; };
19 | }; };
20 |
21 | config = let
22 | hash = builtins.substring 0 8 (builtins.hashString "sha256" config.networking.hostName);
23 |
24 | in lib.mkIf cfg.enable (lib.mkMerge [ ({
25 | # Default specialization:
26 |
27 | specialisation.default.configuration = {
28 | th.target.specs.name = "default";
29 |
30 | # Enable receiving of updates:
31 | environment.systemPackages = [ pkgs.nix-store-recv ];
32 |
33 | environment.etc.dummy.text = "mega significant change in configuration\n";
34 | #environment.etc.dummy.text = "super significant change in configuration\n";
35 |
36 | # Test the updating:
37 | # * switch the dummy files or make some other detectable change
38 | # * run in the repo: nix run .#nix-store-send -- $(ssh imx -- cat /boot/toplevel) $(nix build .#nixosConfigurations.imx.config.system.build.toplevel --print-out-paths) --stats | ssh imx -- nix-store-recv --no-delete --status --verbose
39 | # * run in the repo: ssh imx -- nix-store-recv --only-delete --status --verbose
40 |
41 | };
42 |
43 | # Replace the default bootloader entry (which would be the machine config) with the default(/fallback) system config:
44 | th.hermetic-bootloader.default = "default";
45 |
46 | # In the "machine config" (only), include the bootloader installer (which then references all the system configs):
47 | system.extraSystemBuilderCmds = lib.mkIf (config.specialisation != { }) ''
48 | printf '#!%s/bin/bash -e\nexec %s $1 %s\n' "${pkgs.bash}" "${config.th.hermetic-bootloader.builder}" "$out" >$out/install-bootloader
49 | chmod +x $out/install-bootloader
50 | '';
51 |
52 |
53 | }) (lib.mkIf (config.specialisation == { } && cfg.name != null) {
54 | # Config within a specialisation only:
55 |
56 | system.nixos.tags = [ cfg.name ];
57 | system.extraSystemBuilderCmds = ''rm -f $out/initrd'';
58 |
59 | }) (lib.mkIf (config.specialisation != { }) {
60 | # Config outside the specialisations only:
61 |
62 | }) ]);
63 |
64 | }
65 |
--------------------------------------------------------------------------------
/modules/target/watchdog.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # System Responsiveness Watchdog
4 |
5 | This module enables the systems hardware watchdog in u-boot (if applicable), during boot (from stage 1 onwards), and in systemd.
6 |
7 | Additional software watchdog configuration (systemd monitoring additional system state) should also be implemented.
8 |
9 |
10 | ## Implementation
11 |
12 | ```nix
13 | #*/# end of MarkDown, beginning of NixOS module:
14 | dirname: inputs: { config, pkgs, lib, ... }: let lib = inputs.self.lib.__internal__; in let
15 | cfg = config.th.target.watchdog;
16 | in {
17 |
18 | options.th = { target.watchdog = {
19 | enable = lib.mkEnableOption "hardware and software watchdog functionality";
20 | }; };
21 |
22 | config = let
23 | hash = builtins.substring 0 8 (builtins.hashString "sha256" config.networking.hostName);
24 | in lib.mkIf cfg.enable (lib.mkMerge [ ({
25 |
26 | ## Bootloader:
27 | th.hermetic-bootloader.uboot.extraConfig = [
28 | "CONFIG_WDT=y" "CONFIG_WATCHDOG=y" # enable support for and start watchdog system; also start and pacify it by default
29 | "CONFIG_CMD_WDT=y" # enable »wdt« (watchdog timer) command: https://u-boot.readthedocs.io/en/latest/usage/cmd/wdt.html
30 | "CONFIG_WATCHDOG_TIMEOUT_MSECS=20000" # This should be enough to reach the initramfs. The default probably depends on the hardware and driver (here in u-boot, but also in Linux). The imx8mp's default is 60sin either case.
31 | # NOTE: There may be dome board-specific things that need to be set!
32 | ];
33 |
34 | ## During boot:
35 | # Ideally, the kernel (driver) would just keep the timeout started by the bootloader running, but that seems to be difficult (https://community.toradex.com/t/enable-watchdog-at-boot/5538/5). Instead, start it again as early as possible.
36 | boot.initrd.extraUtilsCommands = ''copy_bin_and_libs ${pkgs.util-linux}/sbin/wdctl'';
37 | boot.initrd.preDeviceCommands = ''
38 | # Re-enable watchdog ASAP with 30s timeout until systemd takes over:
39 | wdctl -s 30 1>/dev/null || echo "Failed to set watchdog timeout!" >&2
40 | if [ -e /dev/watchdog0 ] ; then echo "" > /dev/watchdog0 ; else echo "/dev/watchdog0 does not exist!" >&2 ; fi # (opening the device and not writing 'V' before closing it triggers the timeout)
41 | ''; # »preDeviceCommands« may be too early for some watchdog devices ...
42 | # Could pass »CONFIG_WATCHDOG_NOWAYOUT« to the kernel build.
43 |
44 | ## While running:
45 | systemd.watchdog.runtimeTime = "10s"; systemd.watchdog.rebootTime = "60s"; # Ensure that systemd (and thus the kernel) are responsive. Services may still do whatever.
46 | boot.kernelParams = [ "hung_task_panic=1" "hung_task_timeout_secs=30" ]; # Panic if a (kernel?) task is stuck for 30 seconds. (TODO: only the »hung_task_panic« applies)
47 | # TODO: watch network availability and systemd services
48 |
49 |
50 | }) ]);
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/out/.gitignore:
--------------------------------------------------------------------------------
1 | # (just to keep the directory itself, see ../README.md)
2 | *
3 | !.gitignore
4 |
--------------------------------------------------------------------------------
/overlays/README.md:
--------------------------------------------------------------------------------
1 |
2 | # NixOS Overlays
3 |
4 | Nix(OS) manages its packages in a global attribute set, mostly referred to as `nixpkgs` (as repository/sources) or simply as `pkgs` (when evaluated).
5 |
6 | Overlays are a mechanism to add or replace packages in that attribute set, such that wherever else they are referenced (e.g. as `pkg.`) the added/replaced version is used.
7 |
8 | Any number of overlays can be applied in sequence when instantiating/evaluating `nixpkgs` into `pkgs`.
9 | Each overlay is a function with two parameters returning an attrset which is merged onto `pkgs`.
10 | The first parameter (called `final`) is the `pkgs` as it will result after applying all overlays. This works because of nix's lazy evaluation, but accessing attributes that are based on the result of the current overlay will logically cause unresolvable recursions.
11 | For that reason, the second parameter `prev` is the version of `pkgs` from before applying the overlay.
12 | As a general guideline, use `final` where possible (to avoid consuming unpatched packages) and `prev` only when necessary to avoid recursions.
13 |
14 | `prev` thus gives access to the packages being overridden and allows (the build instructions for) the overriding package to be based off the unmodified package.
15 | Most packages in `nixpkgs` are constructed using something like `callPackage ({ ...args }: mkDerivation { ...attributes }) { ...settings }`, where `callPackage` is usually in `all-packages.nix` and imports the code in the parentheses from a different file.
16 | Passed by `callPackage`, `args` includes `pkgs` plus optionally the `settings` to the package.
17 | The `attributes` are then based on local values and packages and settings from `args`.
18 | Any package built that way then has two functions which allow overlays (or code elsewhere) to define modified versions of that package:
19 | * `.overwrite` is a function taking an attrset that is merged over `args` before re-evaluation the package;
20 | * `.overrideAttrs` is a function from the old `attributes` to ones that are merged over `attributes` before building the derivation.
21 |
22 | Using the above mechanisms, each file in this folder adds and/or modifies one or more packages to/in `pkgs`.
23 | [`./default.nix`](./default.nix) exports all overlays as an attribute set; [`flake#outputs.packages..*`](../flake.nix), exports all packages resulting from the overlays.
24 |
25 |
26 | ## Template/Examples
27 |
28 | Here is a skeleton structure / collection of examples for writing a new `.nix.md`:
29 |
30 | ````md
31 | /*
32 |
33 | # TODO: title
34 |
35 | TODO: documentation
36 |
37 | ## Implementation
38 |
39 | ```nix
40 | #*/# end of MarkDown, beginning of NixPkgs overlay:
41 | dirname: inputs: final: prev: let
42 | inherit (final) pkgs; lib = inputs.self.lib.__internal__;
43 | in {
44 |
45 | # e.g.: add a patched version of a package (use the same name to replace)
46 | systemd-patched = prev.systemd.overrideAttrs (old: {
47 | patches = (old.patches or [ ]) ++ [
48 | ../patches/systemd-....patch
49 | ];
50 | });
51 |
52 | # e.g.: add a prebuilt program as package
53 | qemu-aarch64-static = pkgs.stdenv.mkDerivation {
54 | name = "qemu-aarch64-static";
55 | src = builtins.fetchurl {
56 | url = "https://github.com/multiarch/qemu-user-static/releases/download/v6.1.0-8/qemu-aarch64-static";
57 | sha256 = "075l122p3qfq6nm07qzvwspmsrslvrghar7i5advj945lz1fm6dd";
58 | }; dontUnpack = true;
59 | installPhase = "install -D -m 0755 $src $out/bin/qemu-aarch64-static";
60 | };
61 |
62 | # e.g.: update (or pin the version of) a package
63 | raspberrypifw = prev.raspberrypifw.overrideAttrs (old: rec {
64 | version = "1.20220308";
65 | src = pkgs.fetchFromGitHub {
66 | owner = "raspberrypi"; repo = "firmware"; rev = version;
67 | sha256 = "sha256-pwhI9sklAGq5+fJqQSadrmW09Wl6+hOFI/hEewkkLQs=";
68 | };
69 | });
70 |
71 | # e.g.: add a program as new package
72 | udptunnel = pkgs.stdenv.mkDerivation rec {
73 | pname = "udptunnel"; version = "1"; # (not versioned)
74 |
75 | src = pkgs.fetchFromGitHub {
76 | owner = "rfc1036"; repo = pname; rev = "482ed94388a0dde68561584926c7d5c14f079f7e"; # 2018-11-18
77 | sha256 = "1wkzzxslwjm5mbpyaq30bilfi2mfgi2jqld5l15hm5076mg31vp7";
78 | };
79 | patches = [ ../patches/....patch ];
80 |
81 | installPhase = ''
82 | mkdir -p $out/bin $out/share/udptunnel
83 | cp -T udptunnel $out/bin/${pname}
84 | cp COPYING $out/share/udptunnel
85 | '';
86 |
87 | meta = {
88 | homepage = "https://github.com/rfc1036/udptunnel";
89 | description = "Tunnel UDP packets in a TCP connection ";
90 | license = lib.licenses.gpl2;
91 | maintainers = [ ];
92 | platforms = lib.platforms.linux;
93 | };
94 | };
95 | }
96 | ````
97 |
--------------------------------------------------------------------------------
/overlays/default.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs: inputs.functions.lib.importOverlays inputs dirname { }
2 |
--------------------------------------------------------------------------------
/overlays/fixes.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | Fixes for stuff that doesn't build (when cross-compiling or building through qemu).
4 |
5 |
6 | ## Implementation
7 |
8 | ```nix
9 | #*/# end of MarkDown, beginning of NixPkgs overlay:
10 | dirname: inputs: final: prev: let
11 | inherit (final) pkgs; lib = inputs.self.lib.__internal__;
12 | pkgsVersion = lib.fileContents "${pkgs.path}/.version"; # (there is probably some better way to get this)
13 | in {
14 |
15 | # These do tests that expect certain program crash behavior, which is slightly different when run via qemu-user:
16 | mdbook = prev.mdbook.overrideAttrs (old: lib.optionalAttrs (pkgs.system == "aarch64-linux") {
17 | doCheck = false;
18 | });
19 | /* nix = prev.nix.overrideAttrs (old: lib.optionalAttrs (pkgs.system == "aarch64-linux") {
20 | doInstallCheck = false;
21 | }); */
22 | #nix = inputs.nix.packages.${pkgs.system}.nix;
23 |
24 |
25 | # No idea why these fail:
26 | libxml2 = prev.libxml2.overrideAttrs (old: {
27 | doCheck = false; # at least one of the errors is "Unsupported encoding ISO-8859-5" (but even without stripping glibc)
28 | });
29 | man-db = prev.man-db.overrideAttrs (old: {
30 | doCheck = false;
31 | });
32 | libhwy = prev.libhwy.overrideAttrs (old: lib.optionalAttrs (pkgsVersion >= "23.05") {
33 | doCheck = false; # HwyConvertTestGroup/HwyConvertTest.TestAllTruncate/AVX3_ZEN4 fails
34 | });
35 | gnugrep = prev.gnugrep.overrideAttrs (old: lib.optionalAttrs (pkgsVersion >= "23.11") {
36 | doCheck = false;
37 | });
38 | coreutils = prev.coreutils.overrideAttrs (old: lib.optionalAttrs (pkgsVersion >= "23.11") {
39 | doCheck = false; # tests/misc/printf-cov
40 | });
41 | qemu_kvm = prev.qemu_kvm.override (old: (lib.optionalAttrs (pkgsVersion >= "23.05") {
42 | # some audio thing depends on guile, which fails to build without some string-conversion stuff, so disable all audio things:
43 | alsaSupport = false; jackSupport = false; pulseSupport = false; sdlSupport = false; spiceSupport = false;
44 | }) // (lib.optionalAttrs (pkgsVersion >= "23.11") {
45 | pipewireSupport = false;
46 | }));
47 | perl536 = (prev.perl536.overrideAttrs (old: lib.optionalAttrs (pkgsVersion == "22.11") {
48 | passthru = old.passthru // { pkgs = old.passthru.pkgs.override {
49 | # (this effectively disables config.nixpkgs.config.perlPackageOverrides)
50 | overrides = (_: {
51 | Po4a = (lib.recurseIntoAttrs prev.perl536.pkgs).Po4a.overrideAttrs (old: {
52 | doCheck = false;
53 | });
54 | });
55 | }; };
56 | # ((yes, this verbose monster seems about the most "at the root" way to override perl packages (but still only for one version of perl)))
57 | })).override (old: lib.optionalAttrs (pkgsVersion == "23.05") {
58 | # (this effectively disables config.nixpkgs.config.perlPackageOverrides)
59 | overrides = (_: {
60 | Po4a = (lib.recurseIntoAttrs prev.perl536.pkgs).Po4a.overrideAttrs (old: {
61 | doCheck = false;
62 | });
63 | });
64 | });
65 | perl538 = prev.perl538.override (old: lib.optionalAttrs (pkgsVersion == "23.11") {
66 | # (this effectively disables config.nixpkgs.config.perlPackageOverrides)
67 | overrides = (_: {
68 | Po4a = (lib.recurseIntoAttrs prev.perl538.pkgs).Po4a.overrideAttrs (old: {
69 | doCheck = false;
70 | });
71 | });
72 | });
73 |
74 | /*
75 | # And these failed at some point, but now don't?
76 | libuv = prev.libuv.overrideAttrs (old: {
77 | doCheck = false; # failing: tcp_bind6_error_addrinuse tcp_bind_error_addrinuse_connect tcp_bind_error_addrinuse_listen
78 | });
79 | openssh = prev.openssh.overrideAttrs (old: {
80 | doCheck = false;
81 | });
82 | orc = prev.orc.overrideAttrs (old: lib.optionalAttrs (pkgsVersion == "22.11") {
83 | doCheck = false;
84 | });
85 | */
86 |
87 | }
88 |
--------------------------------------------------------------------------------
/overlays/nar-hash.cc:
--------------------------------------------------------------------------------
1 | /**
2 | * This is an extract from the Nix sources allowing to do one thing only: Create a hash of a file in exactly the same way as »nix-store --optimise« does it.
3 | * Compile with: nix-shell -p openssl gcc --run 'g++ -std=c++17 -lcrypto -lssl -O3 nar-hash.cc -o nar-hash'
4 | * Then call with one absolute or relative path as first argument. The hash will be printed to stdout.
5 | *
6 | * Composed of code snippets from https://github.com/NixOS/nix/, which is released under the LGPL v2.1 (https://github.com/NixOS/nix/blob/master/COPYING).
7 | */
8 |
9 |
10 | /// header things
11 |
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 |
27 | typedef std::string Path;
28 |
29 | const int hashSize = 32;
30 | const size_t base32Len = (hashSize * 8 - 1) / 5 + 1;
31 |
32 | struct Hash
33 | {
34 | uint8_t hash[hashSize] = {};
35 |
36 | /* Create a zero-filled hash object. */
37 | Hash();
38 |
39 | };
40 |
41 | /* Compute the hash of the given path. The hash is defined as
42 | (essentially) hashString(dumpPath(path)). */
43 | typedef std::pair HashResult;
44 |
45 | struct Sink
46 | {
47 | virtual ~Sink() { }
48 | virtual void operator () (std::string_view data) = 0;
49 | virtual bool good() { return true; }
50 | };
51 |
52 | struct BufferedSink : virtual Sink
53 | {
54 | size_t bufSize, bufPos;
55 | std::unique_ptr buffer;
56 |
57 | BufferedSink(size_t bufSize = 32 * 1024)
58 | : bufSize(bufSize), bufPos(0), buffer(nullptr) { }
59 |
60 | void operator () (std::string_view data) override;
61 |
62 | void flush();
63 |
64 | virtual void write(std::string_view data) = 0;
65 | };
66 |
67 | struct AbstractHashSink : virtual Sink
68 | {
69 | virtual HashResult finish() = 0;
70 | };
71 |
72 | class HashSink : public BufferedSink, public AbstractHashSink
73 | {
74 | private:
75 | SHA256_CTX * ctx;
76 | uint64_t bytes;
77 |
78 | public:
79 | HashSink();
80 | ~HashSink();
81 | void write(std::string_view data) override;
82 | HashResult finish() override;
83 | };
84 |
85 |
86 | /// implementation
87 |
88 |
89 | Hash::Hash() {
90 | memset(hash, 0, hashSize);
91 | }
92 |
93 | void BufferedSink::operator () (std::string_view data)
94 | {
95 | if (!buffer) buffer = decltype(buffer)(new char[bufSize]);
96 |
97 | while (!data.empty()) {
98 | /* Optimisation: bypass the buffer if the data exceeds the
99 | buffer size. */
100 | if (bufPos + data.size() >= bufSize) {
101 | flush();
102 | write(data);
103 | break;
104 | }
105 | /* Otherwise, copy the bytes to the buffer. Flush the buffer
106 | when it's full. */
107 | size_t n = bufPos + data.size() > bufSize ? bufSize - bufPos : data.size();
108 | memcpy(buffer.get() + bufPos, data.data(), n);
109 | data.remove_prefix(n); bufPos += n;
110 | if (bufPos == bufSize) flush();
111 | }
112 | }
113 |
114 | void BufferedSink::flush()
115 | {
116 | if (bufPos == 0) return;
117 | size_t n = bufPos;
118 | bufPos = 0; // don't trigger the assert() in ~BufferedSink()
119 | write({buffer.get(), n});
120 | }
121 |
122 | HashSink::HashSink() {
123 | ctx = new SHA256_CTX;
124 | bytes = 0;
125 | SHA256_Init(ctx);
126 | }
127 |
128 | HashSink::~HashSink()
129 | {
130 | bufPos = 0;
131 | delete ctx;
132 | }
133 |
134 | void HashSink::write(std::string_view data)
135 | {
136 | bytes += data.size();
137 | // std::cout << data;
138 | SHA256_Update(ctx, data.data(), data.size());
139 | }
140 |
141 | HashResult HashSink::finish()
142 | {
143 | flush();
144 | Hash hash;
145 | SHA256_Final(hash.hash, ctx);
146 | return HashResult(hash, bytes);
147 | }
148 |
149 | inline Sink & operator << (Sink & sink, uint64_t n)
150 | {
151 | unsigned char buf[8];
152 | buf[0] = n & 0xff;
153 | buf[1] = (n >> 8) & 0xff;
154 | buf[2] = (n >> 16) & 0xff;
155 | buf[3] = (n >> 24) & 0xff;
156 | buf[4] = (n >> 32) & 0xff;
157 | buf[5] = (n >> 40) & 0xff;
158 | buf[6] = (n >> 48) & 0xff;
159 | buf[7] = (unsigned char) (n >> 56) & 0xff;
160 | sink({(char *) buf, sizeof(buf)});
161 | return sink;
162 | }
163 |
164 | void writePadding(size_t len, Sink & sink)
165 | {
166 | if (len % 8) {
167 | char zero[8];
168 | memset(zero, 0, sizeof(zero));
169 | sink({zero, 8 - (len % 8)});
170 | }
171 | }
172 |
173 | void writeString(std::string_view data, Sink & sink)
174 | {
175 | sink << data.size();
176 | sink(data);
177 | writePadding(data.size(), sink);
178 | }
179 |
180 | Sink & operator << (Sink & sink, std::string_view s)
181 | {
182 | writeString(s, sink);
183 | return sink;
184 | }
185 |
186 | const std::string base32Chars = "0123456789abcdfghijklmnpqrsvwxyz";
187 |
188 | static std::string printHash32(const Hash & hash)
189 | {
190 | std::string s;
191 | s.reserve(base32Len);
192 |
193 | for (int n = (int) base32Len - 1; n >= 0; n--) {
194 | unsigned int b = n * 5;
195 | unsigned int i = b / 8;
196 | unsigned int j = b % 8;
197 | unsigned char c =
198 | (hash.hash[i] >> j)
199 | | (i >= hashSize - 1 ? 0 : hash.hash[i + 1] << (8 - j));
200 | s.push_back(base32Chars[c & 0x1f]);
201 | }
202 |
203 | return s;
204 | }
205 |
206 | void readFull(int fd, char * buf, size_t count)
207 | {
208 | while (count) {
209 | ssize_t res = read(fd, buf, count);
210 | if (res == -1) {
211 | if (errno == EINTR) continue;
212 | fprintf(stderr, "Error reading from file"); exit(1);
213 | }
214 | if (res == 0) { fprintf(stderr, "Unexpected end-of-file"); exit(1); }
215 | count -= res;
216 | buf += res;
217 | }
218 | }
219 |
220 | Path readLink(const Path & path)
221 | {
222 | std::vector buf;
223 | for (ssize_t bufSize = PATH_MAX/4; true; bufSize += bufSize/2) {
224 | buf.resize(bufSize);
225 | ssize_t rlSize = readlink(path.c_str(), buf.data(), bufSize);
226 | if (rlSize == -1)
227 | if (errno == EINVAL)
228 | { fprintf(stderr, "Error: '%s' is not a symlink", path.c_str()); exit(1); }
229 | else
230 | { fprintf(stderr, "Error: reading symbolic link '%s'", path.c_str()); exit(1); }
231 | else if (rlSize < bufSize)
232 | return std::string(buf.data(), rlSize);
233 | }
234 | }
235 |
236 | static void dumpContents(const Path & path, off_t size,
237 | Sink & sink)
238 | {
239 | sink << "contents" << size;
240 |
241 | auto fd = open(path.c_str(), O_RDONLY | O_CLOEXEC); // let system close this
242 | if (!fd) { fprintf(stderr, "opening file '%s'", path.c_str()); exit(1); }
243 |
244 | std::vector buf(65536);
245 | size_t left = size;
246 |
247 | while (left > 0) {
248 | auto n = std::min(left, buf.size());
249 | readFull(fd, buf.data(), n);
250 | left -= n;
251 | sink({buf.data(), n});
252 | }
253 |
254 | writePadding(size, sink);
255 | }
256 |
257 | struct stat lstat(const Path & path) {
258 | struct stat st; if (!lstat(path.c_str(), &st)) return st;
259 | fprintf(stderr, "Error getting status of '%s'", path.c_str()); exit(1);
260 | }
261 |
262 | void dump(const Path & path, Sink & sink)
263 | {
264 | auto st = lstat(path.c_str());
265 |
266 | sink << "(";
267 |
268 | if (S_ISREG(st.st_mode)) {
269 | sink << "type" << "regular";
270 | if (st.st_mode & S_IXUSR)
271 | sink << "executable" << "";
272 | dumpContents(path, st.st_size, sink);
273 | }
274 |
275 | else if (S_ISLNK(st.st_mode))
276 | sink << "type" << "symlink" << "target" << readLink(path);
277 |
278 | else { fprintf(stderr, "Error: file '%s' has an unsupported type", path.c_str()); exit(1); }
279 |
280 | sink << ")";
281 | }
282 |
283 | const std::string narVersionMagic1 = "nix-archive-1";
284 |
285 | HashResult hashPath(const Path & path) {
286 | HashSink sink;
287 | sink << narVersionMagic1;
288 | dump(path, sink);
289 | return sink.finish();
290 | }
291 |
292 | int main(int argc, char *argv[]) {
293 | Hash hash = hashPath(argv[1]).first;
294 | fprintf(stdout, "%s", printHash32(hash).c_str());
295 | }
296 |
--------------------------------------------------------------------------------
/overlays/nix-store-send.nix.md:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | # Non-Interactive `nix copy`
4 |
5 | `nix copy` copies build outputs from one store to another, but it interactively reads from both stores to build the diff that actually needs copying, and it copies whole store components (as Nix Archives (NAR)).
6 |
7 | This implements a copy mechanism that works more akin to a `zfs send`/`zfs receive` pair; that is, there is a first part of the process that runs on the source only, and generates a differential upload stream/archive, which can then be stored or directly forwarded to be applied on the target.
8 |
9 | The advantage is that this removes load from the target. With a correct diff, the communication is reduced to an absolute minimum (no interaction, just a single file upload / stream), and nothing needs to be read and not much to be computed on the target.
10 | The current implementation diffs with whole-file granularity, which could be improved.
11 |
12 |
13 | ## Sending
14 |
15 | `nix-store-send` needs to be called with a set of desired target (`after`) store root components and a set of store root components that are assumed to be existing `before` on the target (and exist locally).
16 |
17 | These are then both expanded to all their dependencies, and any diff in store components is packed into an archive stream, together with unpacking instructions.
18 | `nix-store-send` hashes each file it processes, to optimize the send stream to include files (by content) only once, and only if they are not also referenced from `before` components.
19 | The hashes are the same as the ones `nix-store --optimize` creates in `/nix/store/.links/`.
20 |
21 |
22 | ### Description of Implementation
23 |
24 | Naming convention:
25 | * *files* are (the names of) elements in the `/nix/store/.links/` directory (which contains regular files and symbolic links)
26 | * *components* (`*Comps`) are (the names of) elements in the `/nix/store/` directory
27 | * components are hardlinks to files or directories containing hardlinks to files (recursively)
28 |
29 | * assessment: build sets of `afterComps` and `beforeComps` components including dependencies
30 | * let `\` be the complement operator on sets or keys of a map, `!` extract the keys of a map as a set, and `findFiles` be a function that, for a set of components, finds all files in those components and builds a multi-map ``
31 | * `createComps = afterComps \ beforeComps` # components we create
32 | * `pruneComps = beforeComps \ afterComps` # components we no longer need
33 | * `keepComps = beforeComps \ pruneComps` # components we keep
34 | * `linkHM = findFiles(createComps)` # files linked from components that we create (i.e. will create new links to)
35 | * `keepHM = findFiles(keepComps)` # files linked from components that we keep
36 | * `oldHM = findFiles(pruneComps)` # files linked from components that we can delete
37 | * `pruneHL = !oldHM \ !keepHM \ !linkHM` # files we no longer need
38 | * `uploadHL = !linkHM \ !keepHM \ !oldHM` # files we need but don't have
39 | * `restoreHL = !linkHM \ uploadHL` # files we need and have (just not in the .links dir)
40 | * sending: `tar` to stdout:
41 | * `.restore-links` (optional): `${hash}=$(file $hash)\0` for each `hash` in `restoreHL`, where `file` is a function returning an entry from `restoreHL` or `restoreHL` (i.e., for each file we'll need to hardlink, one of the paths where it exists)
42 | * `.cerate-paths`: serialize `linkHM` as per below instructions
43 | * `.delete-paths`: serialize `pruneComps` as `\n` separated list of `$(basename $path)`
44 | * `.prune-links` (optional): serialize `pruneHL` as `\n` separated list
45 | * new files: from (and relative to) `/nix/store/.links/` all files listed in `uploadHL`
46 |
47 |
48 | #### `cerate-paths` Linking Script
49 |
50 | We build a "script" (sequence of instructions with arguments).
51 | Possible instructions are:
52 | * `r()`: `cd /nix/store/`. This is implicitly the first instruction.
53 | * `d(name)`: `mkdir $name && cd $name`.
54 | * `p()`: `cd ..`. May not exit `/nix/store/`.
55 | * `f(hash,mode,name)`: `ln -T /nix/store/.links/$hash $name && [[ ! mode == *x* ]] || chmod +x $name`.
56 |
57 | Flip the `linkHM` (multi) map from `hash => [path]` to `path => hash` and process the keys sorted (with `LC_ALL=C`).
58 | Start with an empty `cwd` stack and no previous path. For each path:
59 | * If the first label of the path is different than the previous path's first label, emit `r()` and clear the stack.
60 | * Split the path at `/`. Call the last element `name` and the list of elements before `dirs`.
61 | * Form the front, find the first position where `cwd` and `dirs` differ (or either ended).
62 | * For each element in `cwd` starting at that position, emit `p()` and remove the element.
63 | * For each element in `dirs` starting at that position, emit `d(dir)` and add that element to `cwd`.
64 | * `stat` `name` in `cwd`, set `mode` to `x` if the file is executable, `-` otherwise.
65 | * Emit `f(hash,mode,name)`.
66 |
67 | The serialization of the script should be compact and simple/fast to parse (in bash). Possible values are instructions, hashes and file/directory names. Names only occur as last argument, and can not contain `\`, `\0` or the zero byte in general.
68 | Therefore we use `\0` as line terminator and `/` as separator between instructions and arguments.
69 | With one byte/character per text label, this is about as compact as it gets (for variable-length fields), and a simple replace of `\0` by `\n` (and `/` by ` `) makes the file quite readable (for standard file names).
70 |
71 | Example (printable version):
72 | ```bash
73 | r # start new component
74 | f - hash-name # link file with directly as single-file store component
75 | # (still at top level)
76 | d hash-name # create directory for next component
77 | d bin # create subdirectory
78 | f x prog # link file (which is executable)
79 | p # go to parent dir
80 | d lib # create subdirectory
81 | d share # create nested subdirectory
82 | f - lib.o # link files
83 | f - lib.1.o
84 | r # start new component
85 | d hash-name # ...
86 | [...]
87 | ```
88 |
89 |
90 | ## Receiving
91 |
92 | `nix-store-recv` accepts a (flat) `tar` stream on `stdin` and unpacks it to a temporary directory on the same file system as the `/nix/store/` (or a path to either of those).
93 |
94 | It can work by amending an existing `/nix/store/.links/` list, or it can re-create the required parts of it.
95 | If the links list does not exist already, the `tar`/dir must included the `.restore-links` file; otherwise it should contain the `.prune-links` file.
96 |
97 | Note that while a receive is in progress, or if one was aborted and not rolled back, there may be partial store paths, and the dependency closure invariant (that all dependencies of an existing path also exist) may very well be violated. This should be non-critical, since Nix itself won't be accessing the store (and the databases are missing/outdated anyway).
98 |
99 |
100 | ### Description of the Implementation
101 |
102 | If `/nix/store/.links/` exists, move all files from the temp dir into it, but do keep a list of all moved files (edit: or just defer the moving).
103 | Otherwise, for each entry in `.restore-links`, hard-link the path after the `=` as the hash before the `=` into the temporary directory, then move the temporary directory to `/nix/store/.links/`.
104 |
105 | Execute the `.cerate-paths` script by executing the commands as translated above in the sequence they occur in the script. For any `d` or `f` directly following an `r`, add its `name` argument to a list of added store components.
106 | TODO: Make creation of store components atomic, e.g. : On `r`, rename the previous path to remove the `.tmp.`-prefix; when `d` or `f` directly follow an (implicit) `r`, prefix `name` with `.tmp.`.
107 | Delete `/nix/store/.links/` if it got restored.
108 |
109 | To remove everything that was existed `before` but should not `after`, delete all store components listed in `.delete-paths` and, if both exist, all `/nix/store/.links/` files listed in `.prune-links`.
110 |
111 | Before deleting the old components, tests on the new set of components can and should be performed. If the tests or the unpacking fail, a rollback can be performed:
112 | If `/nix/store/.links/` got restored and exists, delete it; else, delete all files moved into it, so they exist.
113 | Delete all store components listed in the list of added store components (which, if lost, can be extracted by dry-running `.cerate-paths`).
114 |
115 |
116 | ## Implementation
117 |
118 | ```nix
119 | #*/# end of MarkDown, beginning of NixPkgs overlay:
120 | dirname: inputs: final: prev: let
121 | inherit (final) pkgs; lib = inputs.self.lib.__internal__;
122 | in {
123 |
124 | nix-store-send = pkgs.substituteAll {
125 | src = ./nix-store-send.sh; dir = "bin"; name = "nix-store-send"; isExecutable = true;
126 | shell = "${pkgs.bash}/bin/bash";
127 | nix = "${pkgs.nix}/bin/nix --extra-experimental-features nix-command";
128 | narHash = "${pkgs.nar-hash}/bin/nar-hash";
129 | inherit (lib.fun.bash.asVars) generic_arg_parse;
130 | };
131 | nix-store-recv = pkgs.substituteAll {
132 | src = ./nix-store-recv.sh; dir = "bin"; name = "nix-store-recv"; isExecutable = true;
133 | shell = "${pkgs.bash}/bin/bash";
134 | unshare = "${pkgs.util-linux}/bin/unshare";
135 | xargs = "${pkgs.findutils}/bin/xargs";
136 | inherit (lib.fun.bash.asVars) generic_arg_parse generic_arg_help generic_arg_verify;
137 | };
138 |
139 | nar-hash = pkgs.runCommandLocal "nar-hash" {
140 | src = ./nar-hash.cc; nativeBuildInputs = [ pkgs.buildPackages.gcc pkgs.buildPackages.openssl ];
141 | } ''
142 | mkdir -p $out/bin/
143 | g++ -std=c++17 -lcrypto -lssl -O3 $src -o $out/bin/nar-hash
144 | '';
145 |
146 | }
147 |
--------------------------------------------------------------------------------
/patches/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Some Patches
3 |
4 | ... for `nixpkgs` or programs therein.
5 |
6 | A patch `-*.patch` is generally for the open source software `` which is added/modified by the nixpkgs overlay in `../overlays/.nix.md`.
7 | Patches for `nixpkgs` are applied in [`../flake.nix`](../flake.nix).
8 |
9 | To create/"commit" a patch of the current directory vs its latest commit:
10 | ```bash
11 | git diff >.../overlays/patches/....patch
12 | ```
13 |
14 | To test a patch against the repo in CWD, or to "check it out" to edit and then "commit" again:
15 | ```bash
16 | git reset --hard HEAD # destructively reset the working tree to the current commit
17 | patch --dry-run -p1 <.../overlays/patches/....patch # test only
18 | patch -p1 <.../overlays/patches/....patch # apply to CWD
19 | ```
20 |
21 |
22 | ## License
23 |
24 | Patches included in this repository are written by the direct contributors to this repository (unless individually noted otherwise; pre-existing patches should be referenced by URL).
25 |
26 | Each individual patch shall be licensed by the most permissive license (up to common domain / CC0) that the software it is for (and derived from) allows.
27 | Usually that would probably be the license of the original software itself, which should be mentioned in the respective overlay and/or the linked source code.
28 |
--------------------------------------------------------------------------------
/patches/default.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs: inputs.functions.lib.importPatches inputs dirname { }
2 |
--------------------------------------------------------------------------------
/patches/nixpkgs/default.nix:
--------------------------------------------------------------------------------
1 | dirname: inputs@{ self, nixpkgs, ...}: self.lib.__internal__.fun.importPatches inputs dirname { }
2 |
--------------------------------------------------------------------------------
/patches/nixpkgs/make-bootable-optional.patch:
--------------------------------------------------------------------------------
1 | diff --git a/nixos/modules/system/activation/top-level.nix b/nixos/modules/system/activation/top-level.nix
2 | index b8aeee8c11b..f4e72666f39 100644
3 | --- a/nixos/modules/system/activation/top-level.nix
4 | +++ b/nixos/modules/system/activation/top-level.nix
5 | @@ -27,7 +27,7 @@ let
6 |
7 | # Containers don't have their own kernel or initrd. They boot
8 | # directly into stage 2.
9 | - ${optionalString (!config.boot.isContainer) ''
10 | + ${optionalString config.boot.loader.enable ''
11 | if [ ! -f ${kernelPath} ]; then
12 | echo "The bootloader cannot find the proper kernel image."
13 | echo "(Expecting ${kernelPath})"
14 | @@ -74,18 +74,19 @@ let
15 | ${concatStringsSep "\n"
16 | (mapAttrsToList (name: path: "ln -s ${path} $out/specialisation/${name}") children)}
17 |
18 | - mkdir $out/bin
19 | - export localeArchive="${config.i18n.glibcLocales}/lib/locale/locale-archive"
20 | - substituteAll ${./switch-to-configuration.pl} $out/bin/switch-to-configuration
21 | - chmod +x $out/bin/switch-to-configuration
22 | - ${optionalString (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) ''
23 | - if ! output=$($perl/bin/perl -c $out/bin/switch-to-configuration 2>&1); then
24 | - echo "switch-to-configuration syntax is not valid:"
25 | - echo "$output"
26 | - exit 1
27 | - fi
28 | + ${optionalString config.system.build.makeSwitchable ''
29 | + mkdir $out/bin
30 | + export localeArchive="${config.i18n.glibcLocales}/lib/locale/locale-archive"
31 | + substituteAll ${./switch-to-configuration.pl} $out/bin/switch-to-configuration
32 | + chmod +x $out/bin/switch-to-configuration
33 | + ${optionalString (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) ''
34 | + if ! output=$($perl/bin/perl -c $out/bin/switch-to-configuration 2>&1); then
35 | + echo "switch-to-configuration syntax is not valid:"
36 | + echo "$output"
37 | + exit 1
38 | + fi
39 | + ''}
40 | ''}
41 | -
42 | echo -n "${toString config.system.extraDependencies}" > $out/extra-dependencies
43 |
44 | ${config.system.extraSystemBuilderCmds}
45 | @@ -102,22 +103,21 @@ let
46 | allowSubstitutes = false;
47 | buildCommand = systemBuilder;
48 |
49 | - inherit (pkgs) coreutils;
50 | - systemd = config.systemd.package;
51 | - shell = "${pkgs.bash}/bin/sh";
52 | - su = "${pkgs.shadow.su}/bin/su";
53 | - utillinux = pkgs.util-linux;
54 | + #shell = "${pkgs.bash}/bin/sh";
55 |
56 | - kernelParams = config.boot.kernelParams;
57 | - installBootLoader = config.system.build.installBootLoader;
58 | + systemd = config.systemd.package;
59 | + kernelParams = /* lib.optionalString config.boot.loader.enable */ config.boot.kernelParams;
60 | activationScript = config.system.activationScripts.script;
61 | dryActivationScript = config.system.dryActivationScript;
62 | nixosLabel = config.system.nixos.label;
63 | -
64 | configurationName = config.boot.loader.grub.configurationName;
65 |
66 | # Needed by switch-to-configuration.
67 | - perl = pkgs.perl.withPackages (p: with p; [ ConfigIniFiles FileSlurp NetDBus ]);
68 | + installBootLoader = lib.optionalString config.system.build.makeSwitchable config.system.build.installBootLoader;
69 | + perl = lib.optionalString config.system.build.makeSwitchable (pkgs.perl.withPackages (p: with p; [ ConfigIniFiles FileSlurp NetDBus ]));
70 | + su = lib.optionalString config.system.build.makeSwitchable "${pkgs.shadow.su}/bin/su";
71 | + utillinux = lib.optionalString config.system.build.makeSwitchable pkgs.util-linux;
72 | + coreutils = lib.optionalString config.system.build.makeSwitchable pkgs.coreutils;
73 | };
74 |
75 | # Handle assertions and warnings
76 | @@ -197,6 +197,15 @@ in
77 | );
78 | };
79 |
80 | + boot.loader.enable = mkOption {
81 | + default = !config.boot.isContainer;
82 | + defaultText = literalExpression "!config.boot.isContainer";
83 | + description = ''
84 | + Whether to make this system configuration directly bootable by configuring a bootloader, or instead just support booting from stage 2 onwards.
85 | + '';
86 | + type = types.bool;
87 | + };
88 | +
89 | system.boot.loader.id = mkOption {
90 | internal = true;
91 | default = "";
92 | @@ -245,6 +254,15 @@ in
93 | } (types.either types.str types.package);
94 | };
95 |
96 | + makeSwitchable = mkOption {
97 | + default = !config.boot.isContainer;
98 | + defaultText = literalExpression "!config.boot.isContainer";
99 | + description = ''
100 | + Whether to create the switch-to-configuration script, allowing to switch to the system at runtime or activating it for booting from the bootloader.
101 | + '';
102 | + type = types.bool;
103 | + };
104 | +
105 | toplevel = mkOption {
106 | type = types.package;
107 | readOnly = true;
108 | diff --git a/nixos/modules/system/boot/stage-1.nix b/nixos/modules/system/boot/stage-1.nix
109 | index 04753a6767d..03ea4e3e1c0 100644
110 | --- a/nixos/modules/system/boot/stage-1.nix
111 | +++ b/nixos/modules/system/boot/stage-1.nix
112 | @@ -464,8 +464,8 @@ in
113 |
114 | boot.initrd.enable = mkOption {
115 | type = types.bool;
116 | - default = !config.boot.isContainer;
117 | - defaultText = literalExpression "!config.boot.isContainer";
118 | + default = config.boot.loader.enable;
119 | + defaultText = literalExpression "config.boot.loader.enable";
120 | description = ''
121 | Whether to enable the NixOS initial RAM disk (initrd). This may be
122 | needed to perform some initialisation tasks (like mounting
123 |
--------------------------------------------------------------------------------
/patches/nixpkgs/make-required-packages-optional.patch:
--------------------------------------------------------------------------------
1 | diff --git a/nixos/modules/config/system-path.nix b/nixos/modules/config/system-path.nix
2 | index 875c4c9c441..a703c6d28aa 100644
3 | --- a/nixos/modules/config/system-path.nix
4 | +++ b/nixos/modules/config/system-path.nix
5 | @@ -112,6 +112,12 @@ in
6 | description = "List of directories to be symlinked in /run/current-system/sw.";
7 | };
8 |
9 | + includeRequiredPackages = mkOption {
10 | + type = types.bool;
11 | + default = true;
12 | + description = "Whether to include a list of (usually) required packages in .";
13 | + };
14 | +
15 | extraOutputsToInstall = mkOption {
16 | type = types.listOf types.str;
17 | default = [ ];
18 | @@ -142,7 +148,7 @@ in
19 |
20 | config = {
21 |
22 | - environment.systemPackages = requiredPackages ++ config.environment.defaultPackages;
23 | + environment.systemPackages = (lib.optionals config.environment.includeRequiredPackages requiredPackages) ++ config.environment.defaultPackages;
24 |
25 | environment.pathsToLink =
26 | [ "/bin"
27 |
--------------------------------------------------------------------------------
/patches/nixpkgs/make-switchable-optional-22.11.patch:
--------------------------------------------------------------------------------
1 | diff --git a/nixos/modules/system/activation/top-level.nix b/nixos/modules/system/activation/top-level.nix
2 | index 00b11471e1c..463f57f3ce1 100644
3 | --- a/nixos/modules/system/activation/top-level.nix
4 | +++ b/nixos/modules/system/activation/top-level.nix
5 | @@ -63,16 +63,18 @@ let
6 | echo -n "$nixosLabel" > $out/nixos-version
7 | echo -n "${config.boot.kernelPackages.stdenv.hostPlatform.system}" > $out/system
8 |
9 | - mkdir $out/bin
10 | - export localeArchive="${config.i18n.glibcLocales}/lib/locale/locale-archive"
11 | - substituteAll ${./switch-to-configuration.pl} $out/bin/switch-to-configuration
12 | - chmod +x $out/bin/switch-to-configuration
13 | - ${optionalString (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) ''
14 | - if ! output=$($perl/bin/perl -c $out/bin/switch-to-configuration 2>&1); then
15 | - echo "switch-to-configuration syntax is not valid:"
16 | - echo "$output"
17 | - exit 1
18 | - fi
19 | + ${optionalString config.system.build.makeSwitchable ''
20 | + mkdir $out/bin
21 | + export localeArchive="${config.i18n.glibcLocales}/lib/locale/locale-archive"
22 | + substituteAll ${./switch-to-configuration.pl} $out/bin/switch-to-configuration
23 | + chmod +x $out/bin/switch-to-configuration
24 | + ${optionalString (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) ''
25 | + if ! output=$($perl/bin/perl -c $out/bin/switch-to-configuration 2>&1); then
26 | + echo "switch-to-configuration syntax is not valid:"
27 | + echo "$output"
28 | + exit 1
29 | + fi
30 | + ''}
31 | ''}
32 |
33 | ${config.system.systemBuilderCommands}
34 | @@ -98,14 +100,14 @@ let
35 | allowSubstitutes = false;
36 | buildCommand = systemBuilder;
37 |
38 | - inherit (pkgs) coreutils;
39 | + coreutils = lib.optionalString config.system.build.makeSwitchable pkgs.coreutils;
40 | systemd = config.systemd.package;
41 | shell = "${pkgs.bash}/bin/sh";
42 | - su = "${pkgs.shadow.su}/bin/su";
43 | - utillinux = pkgs.util-linux;
44 | + su = lib.optionalString config.system.build.makeSwitchable "${pkgs.shadow.su}/bin/su";
45 | + utillinux = lib.optionalString config.system.build.makeSwitchable pkgs.util-linux;
46 |
47 | kernelParams = config.boot.kernelParams;
48 | - installBootLoader = config.system.build.installBootLoader;
49 | + installBootLoader = lib.optionalString config.system.build.makeSwitchable config.system.build.installBootLoader;
50 | activationScript = config.system.activationScripts.script;
51 | dryActivationScript = config.system.dryActivationScript;
52 | nixosLabel = config.system.nixos.label;
53 | @@ -113,7 +115,7 @@ let
54 | inherit (config.system) extraDependencies;
55 |
56 | # Needed by switch-to-configuration.
57 | - perl = pkgs.perl.withPackages (p: with p; [ ConfigIniFiles FileSlurp ]);
58 | + perl = lib.optionalString config.system.build.makeSwitchable (pkgs.perl.withPackages (p: with p; [ ConfigIniFiles FileSlurp ]));
59 | } // config.system.systemBuilderArgs);
60 |
61 | # Handle assertions and warnings
62 | @@ -188,6 +190,15 @@ in
63 | } (types.either types.str types.package);
64 | };
65 |
66 | + makeSwitchable = mkOption {
67 | + default = !config.boot.isContainer;
68 | + defaultText = literalExpression "!config.boot.isContainer";
69 | + description = ''
70 | + Whether to create the switch-to-configuration script, allowing to switch to the system at runtime or activating it for booting from the bootloader.
71 | + '';
72 | + type = types.bool;
73 | + };
74 | +
75 | toplevel = mkOption {
76 | type = types.package;
77 | readOnly = true;
78 |
--------------------------------------------------------------------------------
/patches/nixpkgs/make-switchable-optional-23.05.patch:
--------------------------------------------------------------------------------
1 | diff --git a/nixos/modules/system/activation/top-level.nix b/nixos/modules/system/activation/top-level.nix
2 | index 00b11471e1c..463f57f3ce1 100644
3 | --- a/nixos/modules/system/activation/top-level.nix
4 | +++ b/nixos/modules/system/activation/top-level.nix
5 | @@ -63,17 +63,19 @@ let
6 | echo -n "$nixosLabel" > $out/nixos-version
7 | echo -n "${config.boot.kernelPackages.stdenv.hostPlatform.system}" > $out/system
8 |
9 | - mkdir $out/bin
10 | - export localeArchive="${config.i18n.glibcLocales}/lib/locale/locale-archive"
11 | - export distroId=${config.system.nixos.distroId};
12 | - substituteAll ${./switch-to-configuration.pl} $out/bin/switch-to-configuration
13 | - chmod +x $out/bin/switch-to-configuration
14 | - ${optionalString (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) ''
15 | - if ! output=$($perl/bin/perl -c $out/bin/switch-to-configuration 2>&1); then
16 | - echo "switch-to-configuration syntax is not valid:"
17 | - echo "$output"
18 | - exit 1
19 | - fi
20 | + ${optionalString config.system.build.makeSwitchable ''
21 | + mkdir $out/bin
22 | + export localeArchive="${config.i18n.glibcLocales}/lib/locale/locale-archive"
23 | + export distroId=${config.system.nixos.distroId};
24 | + substituteAll ${./switch-to-configuration.pl} $out/bin/switch-to-configuration
25 | + chmod +x $out/bin/switch-to-configuration
26 | + ${optionalString (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) ''
27 | + if ! output=$($perl/bin/perl -c $out/bin/switch-to-configuration 2>&1); then
28 | + echo "switch-to-configuration syntax is not valid:"
29 | + echo "$output"
30 | + exit 1
31 | + fi
32 | + ''}
33 | ''}
34 |
35 | ${config.system.systemBuilderCommands}
36 | @@ -98,14 +100,14 @@ let
37 | allowSubstitutes = false;
38 | buildCommand = systemBuilder;
39 |
40 | - inherit (pkgs) coreutils;
41 | + coreutils = lib.optionalString config.system.build.makeSwitchable pkgs.coreutils;
42 | systemd = config.systemd.package;
43 | shell = "${pkgs.bash}/bin/sh";
44 | - su = "${pkgs.shadow.su}/bin/su";
45 | - utillinux = pkgs.util-linux;
46 | + su = lib.optionalString config.system.build.makeSwitchable "${pkgs.shadow.su}/bin/su";
47 | + utillinux = lib.optionalString config.system.build.makeSwitchable pkgs.util-linux;
48 |
49 | kernelParams = config.boot.kernelParams;
50 | - installBootLoader = config.system.build.installBootLoader;
51 | + installBootLoader = lib.optionalString config.system.build.makeSwitchable config.system.build.installBootLoader;
52 | activationScript = config.system.activationScripts.script;
53 | dryActivationScript = config.system.dryActivationScript;
54 | nixosLabel = config.system.nixos.label;
55 | @@ -113,7 +115,7 @@ let
56 | inherit (config.system) extraDependencies;
57 |
58 | # Needed by switch-to-configuration.
59 | - perl = pkgs.perl.withPackages (p: with p; [ ConfigIniFiles FileSlurp ]);
60 | + perl = lib.optionalString config.system.build.makeSwitchable (pkgs.perl.withPackages (p: with p; [ ConfigIniFiles FileSlurp ]));
61 | } // config.system.systemBuilderArgs);
62 |
63 | # Handle assertions and warnings
64 | @@ -188,6 +190,15 @@ in
65 | } (types.either types.str types.package);
66 | };
67 |
68 | + makeSwitchable = mkOption {
69 | + default = !config.boot.isContainer;
70 | + defaultText = literalExpression "!config.boot.isContainer";
71 | + description = ''
72 | + Whether to create the switch-to-configuration script, allowing to switch to the system at runtime or activating it for booting from the bootloader.
73 | + '';
74 | + type = types.bool;
75 | + };
76 | +
77 | toplevel = mkOption {
78 | type = types.package;
79 | readOnly = true;
80 |
--------------------------------------------------------------------------------
/utils/container-nix:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env -S bash -u -o pipefail
2 |
3 | repo=$( dirname -- "$( cd "$( dirname -- "$0" )" ; pwd )" )
4 | otherArch=x86_64 ; [[ $( uname --machine ) != x86_64 ]] || otherArch=aarch64
5 |
6 | description="Provides a way of running »nix« commands without installing Nix, by running the commands inside a container.
7 | It probably makes sense to create an »alias nix='./utils/container-nix --options=... --'« and use that as »nix«.
8 | When called without »NIX_ARGS«, this opens a bash shell in the container, with »nix« available directly.
9 | Also, anything executed by »nix run« will run inside the container, and »result« symlinks are resolvable inside the container (outside, the leading »/nix« has to be replaced with the value of »--nix-dir«).
10 | This repo ($repo) is volume-mounted to the CWD in the container (»/repo«).
11 | "
12 | declare -g -A allowedArgs=(
13 | [--runtime=BIN]="Path to or binary name of the container runtime to use. Defaults to »docker«. »podman« may or may not work as well."
14 | [--nix-dir=PATH]="Path to create and/or use the »/nix/« dir at. Defaults to »$( dirname "$repo" )/.nix/«."
15 | [--init]="Whether to force initialization of the »--nix-dir« dir. Enabled implicitly only if »--nix-dir« does not exist yet. Should not be enabled if »--nix-dir« has been initialized before."
16 | [--no-privileged]="Don't run the container as »--privileged«. This probably breaks things."
17 | [--pass-dev]="Pass the host's »/dev/« into the container. Otherwise, installations will happen in qemu VMs."
18 | [--no-kvm]="With »--no-privileged«, also do not pass in »/dev/kvm«. VMs will be very slow. May be incompatible with »--pass-dev«."
19 | [--no-binfmt]="Do not create a (temporary but system-wide) »binfmt_misc« registration for »$otherArch« (if there is none whose name contains that word yet). This requires the container to run as »--privileged« and »root«. (Uses .)"
20 | [--verbose]="Whether to print out the container commands before running them."
21 | [--container-args=ARGS]="Additional arguments to pass to the container runtime (split at spaces)."
22 | )
23 |
24 | function main {
25 |
26 | generic-arg-parse "$@" || return
27 | generic-arg-help "$0" '[NIX_ARGS]' "$description" '' || return
28 | exitCode=3 generic-arg-verify || return
29 |
30 | if [[ ${args[verbose]:-} ]] ; then _set_x='set -x' ; fi
31 |
32 | nixDir=${args[nix-dir]:-$( dirname "$repo" )/.nix/}
33 | doInit=${args[init]:-} ; if [[ ! -e "$nixDir"/ ]] ; then doInit=1 ; fi
34 | mkdir -p "$nixDir"/
35 |
36 | cmd=( )
37 | # if [[ ! ${args[runtime]:-} ]] && which podman &>/dev/null ; then
38 | # args[runtime]=podman
39 | # fi
40 | cmd+=( "${args[runtime]:-docker}" run )
41 |
42 | if [[ ${args[runtime]:-} == podman ]] ; then
43 | echo '{"default":[{"type":"insecureAcceptAnything"}]}' > "$nixDir"/registry-policy.json # TODO: verify that podman still checks that the hash matches the one requested below
44 | cmd+=( --signature-policy="$nixDir"/registry-policy.json )
45 | fi
46 |
47 | cmd+=( --rm )
48 |
49 | if [[ ! ${args[no-privileged]:-} ]] ; then
50 | cmd+=( --privileged )
51 | if [[ ${args[runtime]:-} == podman ]] ; then
52 | cmd+=( --group-add keep-groups )
53 | fi
54 | if [[ ! ${args[no-binfmt]:-} ]] && [[ $( ls /proc/sys/fs/binfmt_misc ) != *$otherArch* ]] ; then
55 | hash=03a74d722a906b41e46a3790ec351636ca76d876e5c827f5c7740082ecfdf7e3 ; [[ $( uname --machine ) != x86_64 ]] || hash=6088cbd69c369178ffd6b68992f342c3a9d5c3cc619bbaa4bfe9a98cb23893d0
56 | image= ; [[ ${args[runtime]:-} != podman ]] || image=docker:// ; image+=tonistiigi/binfmt@sha256:$hash
57 | toAdd=arm64 ; [[ $( uname --machine ) == x86_64 ]] || toAdd=amd64
58 | ( ${_set_x:-:} ; "${cmd[@]}" "$image" --install $toAdd ) || return
59 | fi
60 | else
61 | if [[ ! ${args[no-kvm]:-} ]]; then
62 | if [[ -r /dev/kvm && -w /dev/kvm ]] ; then
63 | cmd+=( --device /dev/kvm )
64 | else
65 | echo "KVM is not available (for the current user). VMs will be slow." 1>&2
66 | fi
67 | fi
68 | fi
69 |
70 | if [[ ${#argv[@]} == 0 ]] ; then cmd+=( -i -t ) ; fi # interactive TTY
71 |
72 | hash=7affae8af67e021b702e123b30a3710f8275cef004efc86d57d5cadff0d6fa56 ; if [[ $( uname --machine ) == x86_64 ]] ; then hash=251a921be086aa489705e31fa5bd59f2dadfa0824aa7f362728dfe264eb6a3d2 ; fi
73 | image= ; if [[ ${args[runtime]:-} == podman ]] ; then image+=docker:// ; fi ; image+=nixos/nix@sha256:$hash
74 |
75 | if [[ $doInit ]] ; then
76 | ( ${_set_x:-:} ; "${cmd[@]}" --volume "$nixDir"/:/.nix/ "$image" bash -c 'cp -a /nix/* /.nix/' ) || return
77 | fi
78 |
79 | nixConf=$( mktemp ) ; trap "rm $nixConf" EXIT
80 | echo '
81 | auto-optimise-store = true
82 | build-users-group = nixbld
83 | cores = 0
84 | experimental-features = recursive-nix impure-derivations nix-command flakes
85 | max-jobs = auto
86 | substituters = https://cache.nixos.org/
87 | system-features = nixos-test benchmark big-parallel kvm
88 | trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
89 | ' >> "$nixConf"
90 | if [[ ! ${args[no-binfmt]:-} ]] ; then
91 | echo 'extra-platforms = '$otherArch'-linux' >> "$nixConf"
92 | fi
93 | if [[ ! ${args[no-privileged]:-} ]] ; then
94 | echo 'sandbox = true' >> "$nixConf"
95 | echo 'sandbox-fallback = false' >> "$nixConf"
96 | fi
97 |
98 | cmd+=( --volume "$repo"/:/repo/ --workdir /repo/ )
99 | cmd+=( --volume "$nixDir"/:/nix/ )
100 | cmd+=( --volume "$nixConf":/etc/nix/nix.conf:ro )
101 | cmd+=( --tmpfs /tmp:mode=1777,exec,size=100% )
102 | cmd+=( --ulimit nofile=1048576:1048576 ) # 1 million open files should do
103 | if [[ ${args[pass-dev]:-} ]] ; then cmd+=( --volume /dev/:/dev/ ) ; fi # (this should be ok to do: https://github.com/moby/moby/pull/16639)
104 | cmd+=( ${args[container-args]:-} )
105 |
106 | #init='mkdir -p /.nix/{upper,work}/ || exit ; mount -t overlay -o lowerdir=/nix/,upperdir=/.nix/upper/,workdir=/.nix/upper/ none /nix/ || exit'
107 |
108 | cmd+=( "$image" )
109 |
110 | # cmd+=( bash -c )
111 | # if [[ ${#argv[@]} == 0 ]] ; then
112 | # cmd+=( "$init"' ; bash -l' )
113 | # else
114 | # cmd+=( "$init"' ; nix '"$( printf '%q ' "${argv[@]}" )" )
115 | # fi
116 |
117 | if [[ ${#argv[@]} == 0 ]] ; then
118 | cmd+=( bash -l )
119 | else
120 | cmd+=( nix "${argv[@]}" )
121 | fi
122 |
123 | ( ${_set_x:-:} ; "${cmd[@]}" ) || return
124 |
125 | }
126 |
127 |
128 | ############################################
129 | # copied from https://github.com/NiklasGollenstede/nix-wiplib/blob/c001ad7f51c0e2f7daba6ac7ff0c235fdaebe7ed/lib/setup-scripts/utils.sh
130 | ############################################
131 |
132 | ## Performs a simple and generic parsing of CLI arguments. Creates a global associative array »args« and a global normal array »argv«.
133 | # Named options may be passed as »--name[=value]«, where »value« defaults to »1«, and are assigned to »args«.
134 | # Everything else, or everything following the »--« argument, ends up as positional arguments in »argv«.
135 | # Checking the validity of the parsed arguments is up to the caller.
136 | function generic-arg-parse { # ...
137 | declare -g -A args=( ) ; declare -g -a argv=( ) # this ends up in the caller's scope
138 | while (( "$#" )) ; do
139 | if [[ $1 == -- ]] ; then shift ; argv+=( "$@" ) ; \return 0 ; fi
140 | if [[ $1 == --* ]] ; then
141 | if [[ $1 == *=* ]] ; then
142 | local key=${1/=*/} ; args[${key/--/}]=${1/$key=/}
143 | else args[${1/--/}]=1 ; fi
144 | else argv+=( "$1" ) ; fi
145 | shift ; done
146 | }
147 |
148 | ## Shows the help text for a program and exits, if »--help« was passed as argument and parsed, or does nothing otherwise.
149 | # Expects to be called between parsing and verifying the arguments.
150 | # Uses »allowedArgs« for the list of the named arguments (the values are the descriptions).
151 | # »name« should be the program name/path (usually »$0«), »args« the form/names of any positional arguments expected (e.g. »SOURCE... DEST«) and is included in the "Usage" description,
152 | # »description« the introductory text shown before the "Usage", and »suffix« any text printed after the argument list.
153 | function generic-arg-help { # 1: name, 2?: args, 3?: description, 4?: suffix
154 | if [[ ! ${args[help]:-} ]] ; then : ${allowedArgs[help]:=1} ; \return 0 ; fi
155 | [[ ! ${3:-} ]] || echo "$3"
156 | printf 'Usage:\n %s [ARG[=value]]... [--] %s\n\nWhere »ARG« may be any of:\n' "$1" "${2:-}"
157 | local name ; while IFS= read -u3 -r name ; do
158 | printf ' %s\n %s\n' "$name" "${allowedArgs[$name]}"
159 | done 3< <( printf '%s\n' "${!allowedArgs[@]}" | LC_ALL=C sort )
160 | printf ' %s\n %s\n' "--help" "Do nothing but print this message and exit with success."
161 | [[ ! ${4:-} ]] || echo "$4"
162 | \exit 0
163 | }
164 |
165 | ## Performs a basic verification of the named arguments passed by the user and parsed by »generic-arg-parse« against the names in »allowedArgs«.
166 | # Entries in »allowedArgs« should have the form »[--name]="description"« for boolean flags, and »[--name=VAL]="description"« for string arguments.
167 | # »description« is used by »generic-arg-help«. Boolean flags may only have the values »1« (as set by »generic-ags-parse« for flags without value) or be empty.
168 | # »VAL« is purely nominal. Any argument passed that is not in »allowedArgs« raises an error.
169 | function generic-arg-verify { # 1?: exitCode
170 | local exitCode=${exitCode:-${1:-1}}
171 | local names=' '"${!allowedArgs[@]}"
172 | for name in "${!args[@]}" ; do
173 | if [[ ${allowedArgs[--$name]:-} ]] ; then
174 | if [[ ${args[$name]} == '' || ${args[$name]} == 1 ]] ; then continue ; fi
175 | echo "Argument »--$name« should be a boolean, but its value is: ${args[$name]}" 1>&2 ; \return $exitCode
176 | fi
177 | if [[ $names == *' --'"$name"'='* || $names == *' --'"$name"'[='* ]] ; then continue ; fi
178 | if [[ ${undeclared:-} && $name =~ $undeclared ]] ; then continue ; fi
179 | echo "Unexpected argument »--$name«.${allowedArgs[help]:+ Call with »--help« for a list of valid arguments.}" 1>&2 ; \return $exitCode
180 | done
181 | }
182 |
183 | # /end copied
184 |
185 | main "$@"
186 |
--------------------------------------------------------------------------------
/utils/lctes23-artifact/TODO.md:
--------------------------------------------------------------------------------
1 |
2 | To create the artifact submission:
3 | * copy this folder
4 | * add original submission PDF
5 | * add current revision PDF
6 | * replace the `./artifact/` symlink with `../..`
7 | * but omit/delete its `.git/` dir
8 | * delete this file
9 | * zip and upload
10 |
--------------------------------------------------------------------------------
/utils/lctes23-artifact/artifact:
--------------------------------------------------------------------------------
1 | ../../
--------------------------------------------------------------------------------
/utils/res/README.md:
--------------------------------------------------------------------------------
1 |
2 | Collection of resources, so far only keys that should **exclusively** be used for testing.
3 |
4 | To generate an SSH key pair:
5 | ```bash
6 | ssh-keygen -q -t rsa -b2048 -f utils/res/ssh_testkey_2 -C 'ssh_testkey_2' -N ''
7 | ```
8 |
--------------------------------------------------------------------------------
/utils/res/dropbear_ecdsa_host_key:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tuhhosg/reupnix/a56892ebac5baef819e32657a433ea036006d4e7/utils/res/dropbear_ecdsa_host_key
--------------------------------------------------------------------------------
/utils/res/dropbear_ecdsa_host_key.pub:
--------------------------------------------------------------------------------
1 | ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLsKU5aaMd/hjjnZj8VqX6t27NmNeiXL16kqD+62YbYa706YKO1e1N7VKqUlS0pzZGikagqxYbomc7O7SflEwbU=
2 |
--------------------------------------------------------------------------------
/utils/res/niklas-gollenstede.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC+y3pbsUopXWSWVz+sowoMPTWv+u9Qj9aEl20NUN1LrKxduUv/fijmOyui92ZdTYJEu1oa5+V5jbxxlqNDn51yuwXXCxnIwFgh/aSl34Mc86HrjH73kZonya26jfCBE/7Mn9rppUmpkTt0Dk13Y1gnKp0OvuukEQ+Fa5ZxPLtyZ9d3zYDKIBbwNhISOHlllj8jgEMgGNNDGS7EdFh9AEnKG9d8s4+zTlHEXTom0srr4GBrRcG8qlV6DEcHB/aS7hhI5lA79H9AFWd1PjTV7ZUvX9sLsfRitcmQy2psicDxlagA15Lm/pLuf11t+IIO6bv9EG1cCAvkrGqnGqHLCPFYIW0rKyxD2IRq1ZG4+sbyQlgJiACw1WPiJkOXK88hmjlvwKGx4i8bk2bkXgcmxEHtd0rl+zsSMaZnNltaaGae7DVPKEYhn/sx+hzPpdpz7nhNs/OmN1Y61Zi8J8NHyBKWJ+lQSpV7AY8f2VNKvTFPdXzZmTYd4xVd7saGCa9235oqHX54rZ2zXZaj24zncnxhsvvKkLHeeYbr8knSZNDVfqCCzrm6FTV8aQ5M+QJwfnjVW+TQ/2hEnM1Jb4qbAylJfGY+LHZC9tysRyMwStvnB2+td4HX4hjO75CWbDsW6RLsXQjuzMNAwcGhftA9rnV8azIVX9PD4FYSadPptwuOsw== gpg_rsa.niklas@gollenstede.net
2 |
--------------------------------------------------------------------------------
/utils/res/root.sha256-pass:
--------------------------------------------------------------------------------
1 | $5$UqX6IreDL98q0PpD$rZRHDlu8LmorOHNCWo/2SQNQewQ.G/r/8XAUNGSxuo2
2 |
--------------------------------------------------------------------------------
/utils/res/root.yescrypt-pass:
--------------------------------------------------------------------------------
1 | $y$j9T$ALHQ2IxECIkZYTtkSsMTI1$vlJgtkQZL.apmySvUiMnpGOYSSwgp5.Vc64k9pya.u6
2 |
--------------------------------------------------------------------------------
/utils/res/ssh_dummy_1:
--------------------------------------------------------------------------------
1 | -----BEGIN OPENSSH PRIVATE KEY-----
2 | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
3 | NhAAAAAwEAAQAAAYEA0cU7S7Sk/22DGQ88xrfwwuSLeXdXRL2Prw83seh3D1YCFuqlLYsU
4 | CnrP+WUWWb8wZRUmiJMfOX+e87mptctRktfdRG3fM4kPAQ7JIz1+oi+nA3PKaroyBBRNm4
5 | 6KggZC/p6SPOYLhCswTeMAR0D2lrfov6mg93n5kR3sCvVqK6OpND+CCeS+8xvjOnsKwxJd
6 | Wo6ayfYKHn6kOGsXfmjNnWgm2Wd9ZsvVJH6ZouJ+1XWtP6vawB9g80S24IvvvJ//CRewtQ
7 | lsODKGeRJRxnDvWnCu5w77C3o+loGhKx6DgCCv77zfArkBhobrCALDvFnBfzs6BeNLFerF
8 | i7pk5CW7JR8hFJSlfjkPcJP/9pYn6j7dCxIQouLf8pSM4wtGcg+tV2rbuZHmwhaB4vkWi3
9 | y9cdggyfB4gfSFuZ9RwIzl63aDxWovJEJoSKkAhAymskEHPj3rDzdetgi7PdIwmVwajZnR
10 | SyPq3hS5p/GPlStVEsGMa57oOQc0zivTP1avklK5AAAFkIZI8BeGSPAXAAAAB3NzaC1yc2
11 | EAAAGBANHFO0u0pP9tgxkPPMa38MLki3l3V0S9j68PN7Hodw9WAhbqpS2LFAp6z/llFlm/
12 | MGUVJoiTHzl/nvO5qbXLUZLX3URt3zOJDwEOySM9fqIvpwNzymq6MgQUTZuOioIGQv6ekj
13 | zmC4QrME3jAEdA9pa36L+poPd5+ZEd7Ar1aiujqTQ/ggnkvvMb4zp7CsMSXVqOmsn2Ch5+
14 | pDhrF35ozZ1oJtlnfWbL1SR+maLiftV1rT+r2sAfYPNEtuCL77yf/wkXsLUJbDgyhnkSUc
15 | Zw71pwrucO+wt6PpaBoSseg4Agr++83wK5AYaG6wgCw7xZwX87OgXjSxXqxYu6ZOQluyUf
16 | IRSUpX45D3CT//aWJ+o+3QsSEKLi3/KUjOMLRnIPrVdq27mR5sIWgeL5Fot8vXHYIMnweI
17 | H0hbmfUcCM5et2g8VqLyRCaEipAIQMprJBBz496w83XrYIuz3SMJlcGo2Z0Usj6t4Uuafx
18 | j5UrVRLBjGue6DkHNM4r0z9Wr5JSuQAAAAMBAAEAAAGAU/0kQKuTZLH0S5ROQUJko6PeNE
19 | QULPG5Fd6B7jmlcf59168bc6C75XZGzxQqXsSuG8rcItLFLcBrZCpySNefUawca8mSFJBL
20 | 8aB56CI3EbkCpd82rITggcGfUSlBS842BLx40OlKsyCXtpetBNp94NrB1fuU94Fe6zuPxt
21 | N8LvzYQpMq/csYqT4BPOkG5hP0TM3MLBDW/F64Yd5/CZ3stZl99I37J611KaeG3DDTl6QG
22 | OfgVHddyefSJRF7DHkLwoGC9fnRitmNJgDXd7l39iqjYZi87WlVBMKBH/8ZRRkuLr1UMqS
23 | eFvvIPuUEhiJ6r98T1i+iSJXFDzYFLFSaZLyh1tKLqHdkJhpoq98NjaycseMGCqItAQCdI
24 | n3pmazxbB1tQdZDx8FlMoVw4qCIwtuJPqk8zfRmEEaQgOCPmDlsqy0FkFvWQINNFwmB31/
25 | rK8KgrYdGa4T7Viu8pJN89tiAp6Uyj8ho1nZcFulIHih4E76dtft7ofB9QAvDAAQgBAAAA
26 | wQCxggmQG+63tscM1v3apR1CSqOdSEh6Oh1PS6MdcpthPxio5PJ6/qPkTdDNXDf8mp5yVU
27 | OdLSrA/H+OAt57lQdNn8l14H/PhKqLiJp6gOk/3qkXYDk0C+70aLpjbdWg3PSgmIHEyr98
28 | 7WiUGr0Vk6uozTcVU7ZF14+5tXtDVoIxf4dGBAGwxSFB+amhY5Pmyc28Gll/qyeTdnnI05
29 | QamwtnROw+nGbWetowzg7a9uk2c47iXRPTh/RD5TxjzuFWYNEAAADBAPQrCqLoTKjNrFE8
30 | JlnoFWPF/kr0/Z997bXmD52rsvjZwQHOUIZkB+h2lUyis3I8mWpjKE7yhR+2lr97Cb8PsA
31 | v63+ye1wcWq6J49cYAshAexaOuosB88UbR+5MvRvilaYCcFp4v24GNHajzDIYXvQ2SIaHo
32 | IYQ5Em4SnY/iqDmRkofSSuBXa8a8gG/CKRaEG+urQIV0gSpIru3x9G/f2JTk/Grn0A+JsT
33 | ngXfWzFKV4xceGiO/vA5Wj/8E9Brud3QAAAMEA2+96rv01nvH3J9e/rOL1qh3cK6MHUZlk
34 | zOzPrcbw36jJhBcuWNtiw5tuSPKIs6JDVe17Em6bXdHASvLwx5E4d2iT6oedIY/l/nW3mG
35 | edW+UrEqWoyokf4vHvh2aOF6vDzYk687hy42SU+y+elV3kDKxVMvhweJw4Lm9+11ZHMeHj
36 | IRCPvCzKVn7bB24jJTv0Q7abdI+yl1zcI3+SIekwR7ZkZLKcEV7wKKL040vm3U7fe7OPnU
37 | Qj2ExkBFYyV+CNAAAAGnVzZXJAbmtsbmIuZ29sbGVuc3RlZGUubmV0
38 | -----END OPENSSH PRIVATE KEY-----
39 |
--------------------------------------------------------------------------------
/utils/res/ssh_dummy_1.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDRxTtLtKT/bYMZDzzGt/DC5It5d1dEvY+vDzex6HcPVgIW6qUtixQKes/5ZRZZvzBlFSaIkx85f57zuam1y1GS191Ebd8ziQ8BDskjPX6iL6cDc8pqujIEFE2bjoqCBkL+npI85guEKzBN4wBHQPaWt+i/qaD3efmRHewK9Woro6k0P4IJ5L7zG+M6ewrDEl1ajprJ9goefqQ4axd+aM2daCbZZ31my9Ukfpmi4n7Vda0/q9rAH2DzRLbgi++8n/8JF7C1CWw4MoZ5ElHGcO9acK7nDvsLej6WgaErHoOAIK/vvN8CuQGGhusIAsO8WcF/OzoF40sV6sWLumTkJbslHyEUlKV+OQ9wk//2lifqPt0LEhCi4t/ylIzjC0ZyD61Xatu5kebCFoHi+RaLfL1x2CDJ8HiB9IW5n1HAjOXrdoPFai8kQmhIqQCEDKayQQc+PesPN162CLs90jCZXBqNmdFLI+reFLmn8Y+VK1USwYxrnug5BzTOK9M/Vq+SUrk= ssh_dummy_1
2 |
--------------------------------------------------------------------------------
/utils/res/ssh_testkey_1:
--------------------------------------------------------------------------------
1 | -----BEGIN OPENSSH PRIVATE KEY-----
2 | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn
3 | NhAAAAAwEAAQAAAQEA4v/AX8AakGkzXaclQqIcouzzg9i1Il/RK1uBufs6vTNPEFyq4/2L
4 | jS1ULnW4JOD65QfnhszZ0JwwIg/TDvR4lf/E+XadsVzlGzq2T3J54ky3hnXp7YP5DX4Va6
5 | izefEJOs6LbUWo2JQXonAYU4jxN8B6+/XNYCSP2rxhwwpi3I/qLkoP8mSr+G1pEmjTQZdm
6 | IgPxZAD1dKC2ZZ8sevml+PwyYyRhQ6oJ3r3p29HJsBGyaIetMbhygj87W5+tV1cyxF8ydC
7 | tiDGsjsv4upA2H8NN32IQKQ6gsoMW64AgSZ1wpSWm3GSBj11q49sJlLoTXBgUXKF28/ypF
8 | 5TUCr1aZ3QAAA7jCkucGwpLnBgAAAAdzc2gtcnNhAAABAQDi/8BfwBqQaTNdpyVCohyi7P
9 | OD2LUiX9ErW4G5+zq9M08QXKrj/YuNLVQudbgk4PrlB+eGzNnQnDAiD9MO9HiV/8T5dp2x
10 | XOUbOrZPcnniTLeGdentg/kNfhVrqLN58Qk6zottRajYlBeicBhTiPE3wHr79c1gJI/avG
11 | HDCmLcj+ouSg/yZKv4bWkSaNNBl2YiA/FkAPV0oLZlnyx6+aX4/DJjJGFDqgnevenb0cmw
12 | EbJoh60xuHKCPztbn61XVzLEXzJ0K2IMayOy/i6kDYfw03fYhApDqCygxbrgCBJnXClJab
13 | cZIGPXWrj2wmUuhNcGBRcoXbz/KkXlNQKvVpndAAAAAwEAAQAAAQBVZ2WoYK1d4caqmKVD
14 | IZzmi4G+/DyB90zKJD51i5j48CHyBPtildT4Q7KlXDLw5RhgLQ4lwFK+0tSlLBD7Um0/+h
15 | /Qv5jqzwl/qiJHV8mVo8l0tE5Tx1BfjC1g+PK1kUMotebtMMUq3uTZf5UXcLFYiXl77Gnd
16 | n0PkRvX9HJ4/XsxsI9zFhGvZw/UAn634xow+mNMYUZcmb8yh+99jpGUuTHeQm3J/o4kqOV
17 | gs9oousDhN3+ARMplOjUlBaLUhhzjjuVY6Nq5evY9IqDzUp6V0zUCJJaln0Fz9fGeFm1tM
18 | eYm7lcl/1QG2wjDZv+wB31OZ7L0JQflN/qqq2OMhmAD9AAAAgFCX7N0s0JedDxg6xXTbbC
19 | bmG2lqCt+MYKXu8+J007CvhHI6DClSWtneEMrtutgdk2eFYWtAUUuF3ng1Auj0yX9YhK3c
20 | mTtUumuhmdZd0rv2aU+DuAYSeCMZtfmFr07s5T8X/eUdmf+K3sM6kNuWc5/Le8Sr3SJnMs
21 | +MJciqEQ+CAAAAgQD7oNIFbgM04CFK51bmGd6idqyF6Zh5/81f7YdI0pPNpeSNh3lUtmmW
22 | +fdVWVI1uS8fRlW1mt4spm+L3F3ZOHYhjXceC3lliZ9EfZ1GlJf2VlYEqjeL/tprCaJDGR
23 | 4pJIN1PzTuMBqSwn66ezEmHcfGc4SFoqba/Asgc0AoeLBTXwAAAIEA5vFi+dwW8lJ+Md47
24 | mxEadoQNt9GdEyoVNAsNNlaYxdOEOZtL/yMCABuZoV7TxIqfg7xE0oCd7MPqRD/kuTzuxI
25 | ppBRQpjhGGTIM90bKdTR2/epGqypB+gk+ZcYwj01B69+idE1s5cWY3mjr7A830hdmLuDe9
26 | +oaFr9PTdnGMOEMAAAAAAQID
27 | -----END OPENSSH PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/utils/res/ssh_testkey_1.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDi/8BfwBqQaTNdpyVCohyi7POD2LUiX9ErW4G5+zq9M08QXKrj/YuNLVQudbgk4PrlB+eGzNnQnDAiD9MO9HiV/8T5dp2xXOUbOrZPcnniTLeGdentg/kNfhVrqLN58Qk6zottRajYlBeicBhTiPE3wHr79c1gJI/avGHDCmLcj+ouSg/yZKv4bWkSaNNBl2YiA/FkAPV0oLZlnyx6+aX4/DJjJGFDqgnevenb0cmwEbJoh60xuHKCPztbn61XVzLEXzJ0K2IMayOy/i6kDYfw03fYhApDqCygxbrgCBJnXClJabcZIGPXWrj2wmUuhNcGBRcoXbz/KkXlNQKvVpnd ssh testkey_1
2 |
--------------------------------------------------------------------------------
/utils/res/ssh_testkey_2:
--------------------------------------------------------------------------------
1 | -----BEGIN OPENSSH PRIVATE KEY-----
2 | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn
3 | NhAAAAAwEAAQAAAQEAuK5JKCa4dOEH3hR3jvtxKh2ANPbNTwYCW7ONiXHeXW2dgvMxH0W7
4 | a9LUtHnY9cRLJXsjTF6sZCgN8C8jHl+Ti8Rym01FpZ1HAEFOOoh/2mtbtDtNh7yZH3ybCX
5 | MMTs9FElQDQ4z91mS5YZfls1x2GCV2K/P+KgThs9coABpRL/OEcOC56hMFjs0dkhCzJ5Kp
6 | oN6lyHP+R3lYaQtRxKKeT4Oo7AiHjkD5n8Bub0SKK0LS6JCluReaOQ2shMJJy5QfV68smY
7 | hRLZLeBfQfPgEH7bQ0HP94yV3qL6uMTzFGonc3xOOordaFLi3l76kCGQLeoeVF41F6K3Rt
8 | yNwR/T53TwAAA8iMFhycjBYcnAAAAAdzc2gtcnNhAAABAQC4rkkoJrh04QfeFHeO+3EqHY
9 | A09s1PBgJbs42Jcd5dbZ2C8zEfRbtr0tS0edj1xEsleyNMXqxkKA3wLyMeX5OLxHKbTUWl
10 | nUcAQU46iH/aa1u0O02HvJkffJsJcwxOz0USVANDjP3WZLlhl+WzXHYYJXYr8/4qBOGz1y
11 | gAGlEv84Rw4LnqEwWOzR2SELMnkqmg3qXIc/5HeVhpC1HEop5Pg6jsCIeOQPmfwG5vRIor
12 | QtLokKW5F5o5DayEwknLlB9XryyZiFEtkt4F9B8+AQfttDQc/3jJXeovq4xPMUaidzfE46
13 | it1oUuLeXvqQIZAt6h5UXjUXordG3I3BH9PndPAAAAAwEAAQAAAQEAnIj1bzfqrv6yCXW1
14 | pRbQ446fxcTPXsTMKOf29uHyRaSMlpsFLumPI6RPMC37TM01mK5Xy/qPClG9BrEdhjgiup
15 | wnPCVx/B9kN/4ijZB3Ni47S3fcjkm4TCz0yDyg2QfsFs3o+k++NAOp8eOrCa08V9kg4b5s
16 | 1zbOC/45ZGTqHcZMqRvlOEzaUxVLJ1bUzDdOd3F53bc+xJVTwK9RXiRf8yLoaiyRC4H2fx
17 | pzd4TFEYMLE7peQyc7dWqj/9jLINcJhHIkmheuTlwNZNoiuIblnq73UqroWlQAJPQ9bBCs
18 | ippvCKB/t070A0BWx2mndoWq3T+1XGFZjWw4jYIi344lgQAAAIEAmRCcH93MJ4ITJ0zreD
19 | 4XxTA7OIxjMqlMLvcg8I/8vGhtIiK9jhR+t1xB9S2BWaYsTUM4nEUmSonBA9zOp7heX3aY
20 | nMf0xbTiU/IO7Vc+ERks/VHllInCO2xRXtx/+xzPkDwZ5H+jrSsRpZ+uPbqKwxRMk7PJ2R
21 | 2WIplNKsE7YZAAAACBAONz23RF1hOv60PRWvH9AwJs5vIDnjCbFSOynzAFFcUnleXl2GQA
22 | Y6X3h71ZTnONgYvR1ePxHpDyZBh/X8TXo+LGa3XSw8bcwH912GTr793M5oSkbajj/493Bu
23 | ypOz0iQbzPjEQAnwR2BZaL3IG9r7k+n4aBmgPNrLj0ceUbaIuPAAAAgQDP3CeNsMz1e+kw
24 | 39c8KKqjO0lFozvX84Tt/Da/B/cmBP8kiAd8ZIj8Xcswqznv7kntccdJok4NRMW/kcyFRJ
25 | itXlilUEayk2UnE5VAoi8JVe7Cy9l5kCLz4uy3y6+pbzeiWOk/W4b8IXAiVn3x2byONHOd
26 | FaKyUxSCAzh2ETh4QQAAAA1zc2hfdGVzdGtleV8yAQIDBA==
27 | -----END OPENSSH PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/utils/res/ssh_testkey_2.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4rkkoJrh04QfeFHeO+3EqHYA09s1PBgJbs42Jcd5dbZ2C8zEfRbtr0tS0edj1xEsleyNMXqxkKA3wLyMeX5OLxHKbTUWlnUcAQU46iH/aa1u0O02HvJkffJsJcwxOz0USVANDjP3WZLlhl+WzXHYYJXYr8/4qBOGz1ygAGlEv84Rw4LnqEwWOzR2SELMnkqmg3qXIc/5HeVhpC1HEop5Pg6jsCIeOQPmfwG5vRIorQtLokKW5F5o5DayEwknLlB9XryyZiFEtkt4F9B8+AQfttDQc/3jJXeovq4xPMUaidzfE46it1oUuLeXvqQIZAt6h5UXjUXordG3I3BH9PndP ssh_testkey_2
2 |
--------------------------------------------------------------------------------
/utils/setup.sh:
--------------------------------------------------------------------------------
1 |
2 | ##
3 | # Host Setup Scripts
4 | ##
5 |
6 | # This project uses the default setup scripts of »wiplib«, see https://github.com/NiklasGollenstede/nix-wiplib/tree/master/lib/setup-scripts
7 |
8 | # (No overrides necessary at the moment.)
9 |
--------------------------------------------------------------------------------