├── .gitignore ├── LICENSE ├── README.md ├── default.nix ├── flake.nix ├── hydra-jobs ├── .gitignore ├── LICENSE ├── README.md ├── default.nix ├── docs.nix ├── jobsets.nix ├── modules │ └── qemu-image.nix ├── packages.nix └── spec.json ├── module-list.nix ├── modules ├── automx.nix ├── base.nix ├── copy-nixpkgs.nix ├── demockrazy.nix ├── docker-runner.nix ├── hagrid.nix ├── kvm.nix ├── log-aggregation.nix ├── machines.nix ├── matrix.nix ├── monitoring │ ├── alert-rules.nix │ ├── alerting.nix │ ├── blackbox-exporter.nix │ ├── default.nix │ ├── smartmon-textfile.nix │ └── snmp-exporter.nix ├── opsdroid.nix ├── prometheus-federation.nix ├── service-overview.nix ├── sops-extension.nix └── wireguard.nix ├── overlay.nix ├── pkgs ├── hagrid │ ├── Cargo.lock │ ├── default.nix │ └── remove-news-and-stats.patch ├── matrix-alertmanager │ ├── default.nix │ ├── package.json │ ├── yarn.lock │ └── yarn.nix ├── postorius_users_can_create_lists.patch ├── privacyidea │ ├── 0001-remove-subscription-check.patch │ └── add-description.patch ├── prometheus-snmp-exporter-generator.nix ├── python-packages.nix ├── python │ ├── automx │ │ └── default.nix │ ├── colorhash │ │ └── default.nix │ ├── django-allauth │ │ ├── 0001-Automatically-link-social-login-users-against-existi.patch │ │ ├── 0002-Implement-superuser-to-oidc-mapping-similar-to-the-a.patch │ │ └── 0003-Prohibit-authentication-against-local-users.patch │ ├── duckling │ │ └── default.nix │ ├── fbmessenger │ │ └── default.nix │ ├── mailmanclient │ │ └── default.nix │ ├── mattermostwrapper │ │ └── default.nix │ ├── rasa-core │ │ └── default.nix │ ├── rasa-nlu │ │ └── default.nix │ └── sklearn-crfsuite │ │ └── default.nix └── service-overview │ ├── assets │ ├── css │ │ └── custom.css │ └── img │ │ ├── favicons │ │ └── favicon.ico │ │ └── logo.png │ ├── default.nix │ ├── demo.nix │ └── templates │ ├── entry.nix │ ├── footer.nix │ └── header.nix └── tests └── wireguard-star.nix /.gitignore: -------------------------------------------------------------------------------- 1 | result* 2 | *.swp 3 | *.swo 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Mayflower-opinionated Nix modules, overlay and Hydra Configuration 2 | 3 | This is a collection of too opinionated or too experimental Nix code we use at 4 | Mayflower, mostly in production. 5 | 6 | ## Overlay 7 | 8 | The overlay consists of some additional packages that are too experimental to 9 | upstream currently: 10 | * mailman3, including Postorius and Hyperkitty 11 | * serviceOverview, see below in the modules section 12 | * dovecot and postfix, overridden to include PgSQL support 13 | 14 | **Using the overlay** 15 | 16 | When importing `nixpkgs`: 17 | ``` 18 | { nixpkgs ? }: 19 | 20 | let 21 | nixexprsRepo = (import nixpkgs {}).fetchFromGitHub { 22 | owner = "mayflower"; 23 | repo = "nixexprs"; 24 | rev = "afee8fa90f5f864a9d011a0bdcdb4b657deff813"; 25 | sha256 = "0y8da7cg9jg2xl83x6slg5zjjdn2ah9m2ymqynnq8aqqymdp4xf8"; 26 | }; 27 | in import nixpkgs { 28 | overlays = [ (import "${nixexprsRepo}/overlay.nix") ]; 29 | } 30 | ``` 31 | 32 | When using it in the NixOS configuration: 33 | ``` 34 | { pkgs, ... }: 35 | 36 | let 37 | nixexprsRepo = pkgs.fetchFromGitHub { 38 | owner = "mayflower"; 39 | repo = "nixexprs"; 40 | rev = "afee8fa90f5f864a9d011a0bdcdb4b657deff813"; 41 | sha256 = "0y8da7cg9jg2xl83x6slg5zjjdn2ah9m2ymqynnq8aqqymdp4xf8"; 42 | }; 43 | in { 44 | nixpkgs.overlays = [ (import "${nixexprsRepo}/overlay.nix") ]; 45 | } 46 | ``` 47 | 48 | Testing builds with your local nixpkgs (from `NIX_PATH`). 49 | In this repository default.nix just applies the overlay to ``: 50 | ``` 51 | nix-build -A postfix # includes pgsql support 52 | ``` 53 | 54 | Use another nixpkgs repository: 55 | ``` 56 | nix-build --argstr nixpkgs /home/robin/dev/nixpkgs-upstream -A postfix 57 | ``` 58 | 59 | ## Modules 60 | 61 | ### services.automx 62 | 63 | Automx service to provide mail client auto configuration. To be upstreamed soon, 64 | but not clean enough, yet. 65 | 66 | ### mayflower.base 67 | 68 | Enabled by default. Sets a number of default options including some opinionated 69 | defaults as well as very Mayflower-specific things like setting our mailserver 70 | for delivery. Thus, you will want to turn this off and steal the parts you want 71 | to have yourself. Or feel free to open a PR to split this up into opinionated 72 | options and company-specific options if we don't beat you to it. 73 | 74 | ### mayflower.cachet 75 | 76 | Packaging + VHost for [cachet](https://cachethq.io/), an open source status page. 77 | 78 | ### mayflower.copy-nixpkgs 79 | 80 | Copies the `nixpkgs` used during deployment to the target system to a stable location. 81 | 82 | ### mayflower.demockrazy 83 | 84 | A service to create polls which are as anonymous as possible and leave as little 85 | trace as possible. This is software that was written in-house at mayflower but 86 | is open source and mostly needs some polishing up prior to upstreaming this as 87 | `services.demockrazy`. 88 | 89 | ### mayflower.docker-runner 90 | 91 | A litte abstraction for docker gitlab runners. Not too interesting probably if 92 | you don't actually want our defaults and not worth upstreaming. 93 | 94 | ### mayflower.hagrid 95 | 96 | Minimal module to self-host Hagrid, an open-source PGP keyserver. 97 | 98 | ### mayflower.kvm 99 | 100 | Some defaults for kvm guests. 101 | 102 | ### mayflower.log-aggregation 103 | 104 | Generates a VPN network for a deployment to push logs via promtail to a central 105 | loki instance. 106 | 107 | ### mayflower.machines 108 | 109 | An option to pass NixOps `resources.machines` to, which is not accessible from 110 | the module system otherwise. 111 | 112 | ### mayflower.matrix 113 | 114 | Module which aims to simplify the setup of a full-blown Matrix setup including 115 | Element, TURN integration and mxisd as directory service. 116 | 117 | ### mayflower.monitoring 118 | 119 | Auto-generation of prometheus targets from the config of all your machines in 120 | the deployment (fetched by using `mayflower.machines`). Very opinionated, but 121 | feel free to send in PRs to make this more configurable and fit your needs 122 | better. Especially the alert rules will most probably not meet your 123 | requirements, but this generally should be a good starting point for prometheus 124 | used with NixOps as it includes federation support and meshed alertmanager and a 125 | number of exporters that get enabled by default if the relevant service is 126 | running. As always YMMV. 127 | 128 | ### services.opsdroid 129 | 130 | Minimal module to deploy opsdroid, a python-based ops bot (used for e.g. 131 | JIRA and GitLab notifications). 132 | 133 | ### mayflower.prometheusFederation 134 | 135 | Generates a VPN network used to create a mesh of several Prometheus instances 136 | syncing metrics with each other. Used to have a single Prometheus for each site, 137 | but centralized metrics and alerting. 138 | 139 | ### mayflower.serviceOverview 140 | 141 | Generates a list of services and their support status from options over all 142 | `mayflower.machines`. 143 | 144 | ### services.simplesamlphp 145 | 146 | Sets up [simplesamlphp](https://simplesamlphp.org/) to allow our users to authenticate 147 | with their PrivacyIDEA credentials against internal services using SAML. 148 | 149 | ### sops-extension 150 | 151 | Extends the [`sops-nix`](https://github.com/Mic92/sops-nix/)-module to support 152 | a structure like `secrets//secrets.sops.yaml` to support 153 | per-machine secrets in a large deployment. 154 | 155 | ### mayflower.wireguard 156 | 157 | Allows to generate a VPN network in a star topology using WireGuard over machines 158 | defined in a deployment. 159 | 160 | ## Hydra Jobs 161 | 162 | This repository also contains our complete hydra declarative project and jobset 163 | specification. This won't be of much use outside of our environment but there 164 | probably are a few things to take inspiration from. 165 | -------------------------------------------------------------------------------- /default.nix: -------------------------------------------------------------------------------- 1 | { nixpkgs ? }: 2 | (import nixpkgs { 3 | overlays = [ (import ./overlay.nix) ]; 4 | }).pkgs 5 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "Mayflower-opinionated Nix modules, overlay and Hydra Configuration"; 3 | outputs = { ... }: { 4 | overlay = import ./overlay.nix; 5 | nixosModules = let 6 | inherit (builtins) listToAttrs head match elemAt split; 7 | mkModuleName = x: 8 | let 9 | strippedExt = baseNameOf (head (match "(.*)\\.nix$" x)); 10 | dirname = baseNameOf (dirOf x); 11 | in "mf-${if strippedExt == "default" then dirname else strippedExt}"; 12 | in 13 | listToAttrs 14 | (map (x: { 15 | name = mkModuleName (toString x); 16 | value.imports = [ x ]; 17 | }) (import ./module-list.nix)); 18 | }; 19 | } 20 | -------------------------------------------------------------------------------- /hydra-jobs/.gitignore: -------------------------------------------------------------------------------- 1 | .*.swp 2 | .*.swo 3 | result 4 | result-* 5 | -------------------------------------------------------------------------------- /hydra-jobs/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Mayflower GmbH 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /hydra-jobs/README.md: -------------------------------------------------------------------------------- 1 | # hydra-jobs 2 | Our jobs for the hydra CI 3 | -------------------------------------------------------------------------------- /hydra-jobs/default.nix: -------------------------------------------------------------------------------- 1 | { nixpkgs ? 2 | , nixexprs ? 3 | , supportedSystems ? [ "x86_64-linux" ] 4 | , config ? { } 5 | }: 6 | 7 | import ./packages.nix { 8 | inherit nixpkgs; 9 | releaseLib = import "${nixpkgs}/pkgs/top-level/release-lib.nix" { 10 | inherit supportedSystems; 11 | nixpkgsArgs = { 12 | config = config // { 13 | allowUnfree = false; 14 | 15 | # As soon as something is redistributable, it's legally OK to - well - redistribute 16 | # it in our binary cache. Nixpkgs is stricter here because of political reasons, 17 | # i.e. everything that's not approved as free by e.g. OSI won't be 18 | # redistributed with only a *few* exceptions such as firmware code. 19 | # 20 | # However, we want mongodb to be built by our Hydra. 21 | allowUnfreePredicate = { meta ? {}, ... }: 22 | let 23 | license = meta.license or {}; 24 | in 25 | ((builtins.isAttrs license) && (license.redistributable or false)); 26 | 27 | inHydra = true; 28 | 29 | permittedInsecurePackages = [ 30 | "nodejs-16.20.2" # build-time dependency of discourse 31 | ]; 32 | }; 33 | overlays = [ (import "${nixexprs}/overlay.nix") ]; 34 | }; 35 | }; 36 | } 37 | -------------------------------------------------------------------------------- /hydra-jobs/docs.nix: -------------------------------------------------------------------------------- 1 | { nixpkgs ? 2 | , nixexprs ? 3 | , system ? builtins.currentSystem 4 | }: 5 | let 6 | nixosManual = (import (nixpkgs + "/nixos/lib/eval-config.nix") { 7 | inherit system; 8 | modules = [({ lib, ... }: { 9 | config.documentation.nixos.includeAllModules = true; 10 | config.documentation.nixos.enable = lib.mkForce true; 11 | config.documentation.nixos.options.warningsAreErrors = false; 12 | })]; 13 | baseModules = import (nixpkgs + "/nixos/modules/module-list.nix"); 14 | extraModules = import (nixexprs + "/module-list.nix"); 15 | }).config.system.build.manual; 16 | in { 17 | inherit (nixosManual) manual manualEpub manualHTML optionsDocBook optionsJSON manpages; 18 | nixpkgsManual = import (nixpkgs + "/doc/default.nix") {}; 19 | } 20 | -------------------------------------------------------------------------------- /hydra-jobs/jobsets.nix: -------------------------------------------------------------------------------- 1 | { nixpkgs ? , declInput ? {} }: 2 | 3 | let 4 | pkgs = import nixpkgs {}; 5 | inherit (pkgs) lib; 6 | defaultSettings = { 7 | enabled = "1"; 8 | hidden = false; 9 | description = ""; 10 | nixexprinput = "nixexprs"; 11 | keepnr = 1; 12 | schedulingshares = 42; 13 | checkinterval = 300; 14 | type = 0; 15 | inputs = { 16 | nixpkgs = { 17 | type = "git"; 18 | value = "https://github.com/mayflower/nixpkgs mf-stable"; 19 | emailresponsible = false; 20 | }; 21 | nixexprs = { 22 | type = "git"; 23 | value = "https://github.com/mayflower/nixexprs master"; 24 | emailresponsible = false; 25 | }; 26 | supportedSystems = { 27 | type = "nix"; 28 | value = ''[ "x86_64-linux" ]''; 29 | emailresponsible = false; 30 | }; 31 | }; 32 | enableemail = true; 33 | emailoverride = "devnull+hydra@mayflower.de"; 34 | }; 35 | 36 | jobsets = lib.mapAttrs (name: settings: lib.recursiveUpdate defaultSettings settings) ({ 37 | bootstrap-tools = { 38 | keepnr = 2; 39 | nixexprinput = "nixpkgs"; 40 | nixexprpath = "pkgs/stdenv/linux/make-bootstrap-tools.nix"; 41 | }; 42 | hydra-jobs = { 43 | nixexprpath = "hydra-jobs/default.nix"; 44 | keepnr = 3; 45 | schedulingshares = 420; 46 | }; 47 | nixos-small = { 48 | nixexprinput = "nixpkgs"; 49 | nixexprpath = "nixos/release-small.nix"; 50 | inputs.supportedSystems.value = ''[ "x86_64-linux" ]''; 51 | }; 52 | docs = { 53 | nixexprpath = "hydra-jobs/docs.nix"; 54 | }; 55 | #hydra-jobs-arm-cross = { 56 | # nixexprpath = "hydra-jobs/arm-cross.nix"; 57 | # schedulingshares = 5; 58 | #}; 59 | }); 60 | 61 | jobsets-next = lib.mapAttrs' (name: value: { 62 | name = "next-${name}"; 63 | value = lib.recursiveUpdate value { 64 | inputs.nixpkgs.value = "https://github.com/mayflower/nixpkgs mf-next"; 65 | inputs.nixexprs.value = "https://github.com/mayflower/nixexprs mf-next"; 66 | }; 67 | }) jobsets; 68 | 69 | jobsets-structured-attrs = lib.mapAttrs' (name: value: { 70 | name = "structured-attrs-${name}"; 71 | value = lib.recursiveUpdate value { 72 | inputs.nixpkgs.value = "https://github.com/mayflower/nixpkgs structured-attrs-v2"; 73 | }; 74 | }) jobsets; 75 | 76 | jobsets-structured-attrs-compat = lib.mapAttrs' (name: value: { 77 | name = "structured-attrs-compat-${name}"; 78 | value = lib.recursiveUpdate value { 79 | inputs.config = { 80 | type = "nix"; 81 | value = lib.generators.toPretty { } { 82 | structuredAttrsByDefault = false; 83 | }; 84 | }; 85 | }; 86 | }) jobsets-structured-attrs; 87 | in { 88 | jobsets = pkgs.writeText "spec.json" (builtins.toJSON (jobsets // jobsets-next // jobsets-structured-attrs // jobsets-structured-attrs-compat)); 89 | } 90 | -------------------------------------------------------------------------------- /hydra-jobs/modules/qemu-image.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, ... }: 2 | 3 | with lib; 4 | 5 | { 6 | imports = 7 | [ ]; 8 | 9 | system.build.qemuImage = import { 10 | inherit lib config pkgs; 11 | diskSize = 2048; 12 | format = "raw"; 13 | postVM = '' 14 | xz -T $NIX_BUILD_CORES -v -1 --stdout $diskImage > $diskImage.xz 15 | rm -f $diskImage 16 | ''; 17 | configFile = pkgs.writeText "configuration.nix" '' 18 | throw "nixos-rebuild is not supported with this VM image!" 19 | ''; 20 | }; 21 | 22 | } 23 | -------------------------------------------------------------------------------- /hydra-jobs/spec.json: -------------------------------------------------------------------------------- 1 | { 2 | "enabled": 1, 3 | "hidden": false, 4 | "description": "Jobsets", 5 | "nixexprinput": "src", 6 | "nixexprpath": "hydra-jobs/jobsets.nix", 7 | "checkinterval": 300, 8 | "schedulingshares": 420, 9 | "enableemail": true, 10 | "emailoverride": "devnull+hydra@mayflower.de", 11 | "keepnr": 3, 12 | "type": 0, 13 | "inputs": { 14 | "src": { "type": "git", "value": "https://github.com/mayflower/nixexprs master", "emailresponsible": false }, 15 | "nixpkgs": { "type": "git", "value": "https://github.com/mayflower/nixpkgs.git mf-stable", "emailresponsible": false } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /module-list.nix: -------------------------------------------------------------------------------- 1 | [ 2 | modules/automx.nix 3 | modules/base.nix 4 | modules/copy-nixpkgs.nix 5 | modules/demockrazy.nix 6 | modules/docker-runner.nix 7 | modules/hagrid.nix 8 | modules/kvm.nix 9 | modules/log-aggregation.nix 10 | modules/machines.nix 11 | modules/matrix.nix 12 | modules/monitoring/default.nix 13 | modules/opsdroid.nix 14 | modules/prometheus-federation.nix 15 | modules/service-overview.nix 16 | modules/sops-extension.nix 17 | modules/wireguard.nix 18 | ] 19 | -------------------------------------------------------------------------------- /modules/automx.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, ... }: 2 | 3 | with lib; 4 | 5 | let 6 | cfg = config.services.automx; 7 | uwsgi = pkgs.uwsgi.override { plugins = [ "python3" ]; }; 8 | automxUwsgi = pkgs.writeText "uwsgi.json" (builtins.toJSON { 9 | uwsgi = { 10 | plugins = [ "python3" ]; 11 | uid = "automx"; 12 | socket = "/run/automx/web.socket"; 13 | chown-socket = "automx:nginx"; 14 | chmod-socket = 770; 15 | chdir = "${uwsgi.python3.pkgs.automx}/share/automx"; 16 | wsgi-file = "automx_wsgi.py"; 17 | master = true; 18 | processes = 4; 19 | no-orphans = true; 20 | vacuum = true; 21 | logger = "syslog"; 22 | }; 23 | }); 24 | pythonEnv = uwsgi.python3.withPackages (ps: [ ps.automx ]); 25 | pythonPath = "${pythonEnv}/${pythonEnv.python.sitePackages}"; 26 | in { 27 | options.services.automx = { 28 | enable = mkEnableOption (mdDoc "automx service"); 29 | nginx.enable = mkEnableOption (mdDoc "nginx vhosts for automx"); 30 | domain = mkOption { 31 | type = types.str; 32 | example = "example.com"; 33 | description = mdDoc "Mail domain to use for configuration of nginx."; 34 | }; 35 | configFile = mkOption { 36 | type = types.path; 37 | description = mdDoc "Config file for the automx service."; 38 | }; 39 | }; 40 | 41 | config = mkIf cfg.enable { 42 | services.memcached.enable = mkDefault true; 43 | 44 | environment.etc."automx.conf".source = cfg.configFile; 45 | 46 | systemd.sockets.automx = { 47 | wantedBy = [ "sockets.target" ]; 48 | listenStreams = [ "/run/automx/web.socket" ]; 49 | }; 50 | systemd.services.automx = { 51 | description = "A mail client account configuration service, combining various autoconfiguration techniques in one webservice"; 52 | after = [ "network.target" ]; 53 | requires = [ "automx.socket" ]; 54 | environment.PYTHONPATH = pythonPath; 55 | serviceConfig = { 56 | Type = "notify"; 57 | Restart = "on-failure"; 58 | KillSignal = "SIGQUIT"; 59 | StandardError = "syslog"; 60 | NotifyAccess = "all"; 61 | ExecStart = "${uwsgi}/bin/uwsgi --json ${automxUwsgi}"; 62 | DynamicUser = true; 63 | }; 64 | }; 65 | 66 | services.nginx = mkIf cfg.nginx.enable { 67 | enable = mkDefault true; 68 | virtualHosts."autoconfig.${cfg.domain}" = { 69 | forceSSL = true; 70 | enableACME = true; 71 | serverAliases = [ "autodiscover.${cfg.domain}" ]; 72 | root = "${uwsgi.python3.pkgs.automx}/share/automx/html"; 73 | locations."/" = { 74 | extraConfig = '' 75 | try_files $uri $uri/ index.html; 76 | ''; 77 | }; 78 | locations."~ ^/(mail/config-v1.1.xml|autodiscover|mobileconfig)" = { 79 | extraConfig = '' 80 | uwsgi_pass unix:/run/automx/web.socket; 81 | ''; 82 | }; 83 | extraConfig = '' 84 | add_header X-Content-Type-Options "nosniff" always; 85 | ''; 86 | }; 87 | }; 88 | }; 89 | } 90 | -------------------------------------------------------------------------------- /modules/base.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, ... }: 2 | 3 | with lib; 4 | 5 | { 6 | options = { 7 | mayflower.base.enable = mkOption { 8 | type = types.bool; 9 | default = true; 10 | }; 11 | }; 12 | 13 | config = mkIf config.mayflower.base.enable { 14 | boot.tmp.useTmpfs = true; 15 | boot.kernelParams = [ 16 | "boot.shell_on_fail" 17 | "panic=10" # wait a bit before rebooting on panics pre-stage2 18 | ]; 19 | 20 | boot.loader.grub.splashImage = null; 21 | boot.loader.grub.copyKernels = true; 22 | boot.loader.timeout = 2; 23 | 24 | boot.kernel.sysctl = { 25 | "kernel.panic" = "1"; # instantly reboot on panics 26 | "net.core.default_qdisc" = "fq_codel"; 27 | "net.ipv6.conf.all.use_tempaddr" = 0; 28 | "net.ipv4.tcp_slow_start_after_idle" = 0; 29 | "net.ipv6.conf.all.autoconf" = 0; 30 | "net.ipv6.conf.default.autoconf" = 0; 31 | "net.ipv6.conf.all.accept_ra" = 0; 32 | "net.ipv6.conf.default.accept_ra" = 0; 33 | }; 34 | 35 | i18n.defaultLocale = "en_US.UTF-8"; 36 | i18n.supportedLocales = ["en_US.UTF-8/UTF-8" "en_GB.UTF-8/UTF-8"]; 37 | 38 | console = { 39 | font = "Lat2-Terminus16"; 40 | keyMap = "us"; 41 | }; 42 | 43 | programs = { 44 | bash.enableCompletion = true; 45 | ssh.startAgent = false; 46 | mtr.enable = true; 47 | }; 48 | 49 | environment = { 50 | variables.EDITOR = "vim"; 51 | systemPackages = with pkgs; [ 52 | curl 53 | dnsutils 54 | file 55 | htop 56 | iftop 57 | iotop 58 | iperf3 59 | jnettop 60 | jq 61 | lsof 62 | ncdu 63 | pciutils 64 | pv 65 | strace 66 | tcpdump 67 | tmux 68 | vim 69 | wget 70 | ]; 71 | }; 72 | 73 | time.timeZone = mkDefault "GMT"; 74 | 75 | security.audit.enable = mkDefault false; 76 | 77 | services = { 78 | openssh = { 79 | enable = true; 80 | settings.PasswordAuthentication = false; 81 | }; 82 | 83 | fail2ban = { 84 | enable = true; 85 | # ssh-iptables jail is enabled by default 86 | }; 87 | 88 | chrony = { 89 | enable = !config.boot.isContainer; 90 | servers = [ 91 | "0.de.pool.ntp.org" 92 | "1.de.pool.ntp.org" 93 | "2.de.pool.ntp.org" 94 | "3.de.pool.ntp.org" 95 | ]; 96 | }; 97 | 98 | nginx = { 99 | package = pkgs.nginxMainline; 100 | appendConfig = '' 101 | worker_processes auto; 102 | worker_cpu_affinity auto; 103 | ''; 104 | eventsConfig = '' 105 | worker_connections 2048; 106 | ''; 107 | appendHttpConfig = '' 108 | server_names_hash_max_size 1024; 109 | server_names_hash_bucket_size 64; 110 | 111 | error_log syslog:server=unix:/dev/log; 112 | ''; 113 | logError = "syslog:server=unix:/dev/log error"; 114 | recommendedOptimisation = true; 115 | recommendedTlsSettings = true; 116 | recommendedGzipSettings = true; 117 | recommendedProxySettings = true; 118 | }; 119 | 120 | udisks2.enable = mkDefault false; 121 | ntp.enable = false; 122 | timesyncd.enable = false; 123 | 124 | journald.extraConfig = '' 125 | MaxFileSec=1day 126 | MaxRetentionSec=1week 127 | ''; 128 | 129 | zfs.autoScrub.enable = config.fileSystems ? "/" && 130 | config.fileSystems."/".fsType == "zfs"; 131 | }; 132 | 133 | fonts.fontconfig.enable = lib.mkDefault config.services.xserver.enable; 134 | 135 | virtualisation.docker.autoPrune = { 136 | enable = mkDefault true; 137 | flags = [ "--all" "--volumes" ]; 138 | }; 139 | 140 | virtualisation.libvirtd.qemu.verbatimConfig = '' 141 | namespaces = [] 142 | set_process_name = 1 143 | seccomp_sandbox = 1 144 | ''; 145 | 146 | nix = { 147 | gc = { 148 | automatic = !config.boot.isContainer; 149 | options = "--delete-older-than 7d"; 150 | }; 151 | settings = { 152 | substituters = lib.mkForce [ 153 | "https://hydra.mayflower.de/" 154 | "https://cache.nixos.org/" 155 | ]; 156 | trusted-public-keys = [ 157 | "hydra.mayflower.de:9knPU2SJ2xyI0KTJjtUKOGUVdR2/3cOB4VNDQThcfaY=" 158 | ]; 159 | cores = 0; 160 | }; 161 | }; 162 | 163 | documentation.nixos.enable = false; 164 | 165 | hardware.enableAllFirmware = mkDefault false; 166 | sound.enable = mkDefault false; 167 | }; 168 | } 169 | -------------------------------------------------------------------------------- /modules/copy-nixpkgs.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, ... }: 2 | let cfg = config.mayflower; in { 3 | options.mayflower.use-run-nixpkgs = (lib.mkEnableOption (lib.mdDoc "setting NIX_PATH to nixpkgs=/run/nixpkgs")) // {default = true;}; 4 | options.mayflower.copy-nixpkgs = lib.mkEnableOption (lib.mdDoc "including nixpkgs used for evaluation in system closure"); 5 | config = { 6 | nix.nixPath = lib.mkIf (cfg.use-run-nixpkgs) (lib.mkForce [ "nixpkgs=/run/nixpkgs" ]); 7 | 8 | systemd.tmpfiles.rules = lib.mkIf (cfg.copy-nixpkgs) [ 9 | "L+ /run/nixpkgs - - - - ${builtins.path { 10 | path = pkgs.path; 11 | filter = lib.cleanSourceFilter; 12 | name = "nixpkgs-${lib.version}"; 13 | }}" 14 | ]; 15 | }; 16 | } 17 | -------------------------------------------------------------------------------- /modules/demockrazy.nix: -------------------------------------------------------------------------------- 1 | { config, pkgs, lib, ... }: 2 | 3 | with lib; 4 | 5 | let 6 | cfg = config.mayflower.demockrazy; 7 | pkg = pkgs.stdenv.mkDerivation rec { 8 | pname = "demockrazy"; 9 | version = "2024-02-08"; 10 | src = pkgs.fetchFromGitHub { 11 | owner = "mayflower"; 12 | repo = "demockrazy"; 13 | rev = "3074dbb79c882ec999028dd2ef1b447cd0638ddc"; 14 | sha256 = "sha256-4MJkKwFGhOGMJ1jphtgM1H94oOdZo3ta0ucCZe/cOUs="; 15 | }; 16 | 17 | installPhase = '' 18 | mkdir -p $out/share/demockrazy 19 | cp -R . $out/share/demockrazy 20 | ''; 21 | }; 22 | configModule = pkgs.python3Packages.buildPythonPackage { 23 | name = "demockrazy-config"; 24 | format = "other"; 25 | unpackPhase = ":"; 26 | installPhase = '' 27 | mkdir -p $out/${pkgs.python3.sitePackages}/demockrazy_config 28 | cat << "EOF" > $out/${pkgs.python3.sitePackages}/demockrazy_config/__init__.py 29 | from demockrazy.settings import * 30 | STATIC_ROOT = '/var/lib/demockrazy/static' 31 | DEBUG = False 32 | DATABASES = { 33 | 'default': { 34 | 'ENGINE': 'django.db.backends.sqlite3', 35 | 'NAME': '/var/lib/demockrazy/db.sqlite3' 36 | } 37 | } 38 | 39 | LOGGING = { 40 | 'version': 1, 41 | 'disable_existing_loggers': False, 42 | 'handlers': { 43 | 'console': { 44 | 'class': 'logging.StreamHandler', 45 | }, 46 | }, 47 | 'loggers': { 48 | 'django': { 49 | 'handlers': ['console'], 50 | 'level': '${cfg.logLevel}', 51 | }, 52 | }, 53 | } 54 | 55 | with open('${cfg.secretKeyFile}') as secret_file: 56 | SECRET_KEY = secret_file.read().strip() 57 | 58 | ${optionalString (cfg.mail.passwordFile != null) '' 59 | with open('${cfg.mail.passwordFile}') as pw_file: 60 | EMAIL_HOST_PASSWORD = pw_file.read().strip() 61 | ''} 62 | 63 | ${optionalString (cfg.mail.host != null) "EMAIL_HOST = \"${cfg.mail.host}\""} 64 | EMAIL_PORT = ${toString cfg.mail.port} 65 | EMAIL_USE_TLS = ${if cfg.mail.useTLS then "True" else "False"} 66 | EMAIL_USE_SSL = ${if cfg.mail.useSSL then "True" else "False"} 67 | ${optionalString (cfg.mail.user != null) "EMAIL_HOST_USER = \"${cfg.mail.user}\""} 68 | VOTE_MAIL_FROM = "${cfg.mail.from}" 69 | VOTE_SEND_MAILS = ${if cfg.mail.sendMails then "True" else "False"} 70 | VOTE_BASE_URL = '${cfg.baseUrl}' 71 | ALLOWED_HOSTS = [ ${concatStringsSep ", " (map (h: "\"${h}\"") cfg.allowedHosts)} ] 72 | CSRF_COOKIE_SECURE = ${if cfg.secureCookies then "True" else "False"} 73 | SESSION_COOKIE_SECURE = ${if cfg.secureCookies then "True" else "False"} 74 | 75 | ${cfg.djangoSettings} 76 | EOF 77 | ''; 78 | }; 79 | in { 80 | options.mayflower.demockrazy = { 81 | enable = mkEnableOption (mdDoc "demockrazy"); 82 | djangoSettings = mkOption { 83 | type = types.lines; 84 | default = ""; 85 | description = mdDoc '' 86 | Verbatim django settings. 87 | ''; 88 | }; 89 | logLevel = mkOption { 90 | type = types.str; 91 | default = "INFO"; 92 | description = mdDoc '' 93 | Django log level 94 | ''; 95 | }; 96 | secretKeyFile = mkOption { 97 | type = types.path; 98 | description = mdDoc '' 99 | File containing the Django secret key 100 | ''; 101 | }; 102 | baseUrl = mkOption { 103 | type = types.str; 104 | example = "https://demockrazy.example.com"; 105 | description = mdDoc '' 106 | Base URL of demockrazy 107 | ''; 108 | }; 109 | allowedHosts = mkOption { 110 | type = types.listOf types.str; 111 | example = literalExample ''[ "demockrazy.example.com" ]''; 112 | description = mdDoc '' 113 | Hostnames/IPs allowed to access demockrazy 114 | ''; 115 | }; 116 | secureCookies = mkOption { 117 | type = types.bool; 118 | default = true; 119 | description = mdDoc '' 120 | Allows cookies to be only served via HTTPS 121 | ''; 122 | }; 123 | mail = mkOption { 124 | type = types.submodule { 125 | options = { 126 | sendMails = mkOption { 127 | type = types.bool; 128 | default = true; 129 | description = mdDoc '' 130 | Wheter to enable demockrazy sending mails. 131 | ''; 132 | }; 133 | from = mkOption { 134 | type = types.str; 135 | example = "demockrazy@example.com"; 136 | description = mdDoc '' 137 | Address to send mails from. 138 | ''; 139 | }; 140 | host = mkOption { 141 | type = types.nullOr types.str; 142 | default = null; 143 | description = mdDoc '' 144 | Mail relay host name 145 | ''; 146 | }; 147 | user = mkOption { 148 | type = types.nullOr types.str; 149 | default = null; 150 | description = mdDoc '' 151 | Mail relay user name 152 | ''; 153 | }; 154 | passwordFile = mkOption { 155 | type = types.nullOr types.str; 156 | default = null; 157 | description = mdDoc '' 158 | Path to the file containing the mail relay user password 159 | ''; 160 | }; 161 | port = mkOption { 162 | type = types.nullOr types.int; 163 | default = 25; 164 | description = mdDoc '' 165 | Mail relay port 166 | ''; 167 | }; 168 | useTLS = mkOption { 169 | type = types.bool; 170 | default = true; 171 | description = mdDoc '' 172 | Whether to use STARTTLS when connecting to the mail relay host. 173 | ''; 174 | }; 175 | useSSL = mkOption { 176 | type = types.bool; 177 | default = false; 178 | description = mdDoc '' 179 | Whether to use SSL when connecting to the mail relay host. 180 | ''; 181 | }; 182 | }; 183 | }; 184 | default = {}; 185 | description = mdDoc '' 186 | Options for mail handling of demockrazy 187 | ''; 188 | }; 189 | }; 190 | config = mkIf cfg.enable { 191 | systemd.services.demockrazy = let 192 | uwsgi = pkgs.uwsgi.override { plugins = [ "python3" ]; }; 193 | djangoenv = uwsgi.python3.buildEnv.override { 194 | extraLibs = [ pkgs.python3Packages.django configModule uwsgi ]; 195 | }; 196 | demockrazyUwsgi = pkgs.writeText "uwsgi.json" (builtins.toJSON { 197 | uwsgi = { 198 | plugins = [ "python3" ]; 199 | pythonpath = "${djangoenv}/${uwsgi.python3.sitePackages}"; 200 | uid = "demockrazy"; 201 | gid = "demockrazy"; 202 | socket = "/run/demockrazy/uwsgi.socket"; 203 | chown-socket = "demockrazy:nginx"; 204 | chmod-socket = 770; 205 | chdir = "${pkg}/share/demockrazy"; 206 | wsgi-file = "demockrazy/wsgi.py"; 207 | env = "DJANGO_SETTINGS_MODULE=demockrazy_config"; 208 | master = true; 209 | processes = 4; 210 | stats = "/run/demockrazy/stats.socket"; 211 | no-orphans = true; 212 | vacuum = true; 213 | logger = "syslog"; 214 | }; 215 | }); 216 | in { 217 | description = "demockrazy"; 218 | after = [ "network.target" ]; 219 | wantedBy = [ "multi-user.target" ]; 220 | environment = { 221 | DJANGO_SETTINGS_MODULE = "demockrazy_config"; 222 | }; 223 | preStart = '' 224 | cd ${pkg}/share/demockrazy && ${djangoenv}/bin/python3 manage.py migrate && ${djangoenv}/bin/python3 manage.py collectstatic --noinput 225 | ''; 226 | serviceConfig = { 227 | Type = "notify"; 228 | Restart = "on-failure"; 229 | KillSignal = "SIGQUIT"; 230 | StandardError = "syslog"; 231 | NotifyAccess = "all"; 232 | ExecStart = "${uwsgi}/bin/uwsgi --json ${demockrazyUwsgi}"; 233 | PrivateDevices = "yes"; 234 | PrivateTmp = "yes"; 235 | ProtectSystem = "full"; 236 | ReadWritePaths = "/run/demockrazy /var/lib/demockrazy"; 237 | ProtectHome = "yes"; 238 | NoNewPrivileges = "yes"; 239 | }; 240 | }; 241 | systemd.tmpfiles.rules = [ 242 | "d /var/lib/demockrazy 0755 demockrazy demockrazy -" 243 | "d /var/lib/demockrazy/static 0755 demockrazy demockrazy -" 244 | "d /run/demockrazy 0755 demockrazy demockrazy -" 245 | ]; 246 | 247 | users.users.demockrazy = { 248 | group = "demockrazy"; 249 | isSystemUser = true; 250 | }; 251 | users.groups.demockrazy = {}; 252 | }; 253 | } 254 | -------------------------------------------------------------------------------- /modules/docker-runner.nix: -------------------------------------------------------------------------------- 1 | { lib, pkgs, config, ... }: 2 | 3 | with lib; 4 | 5 | let 6 | 7 | cfg = config.mayflower.docker-runner; 8 | 9 | in 10 | 11 | { 12 | options = { 13 | mayflower.docker-runner = { 14 | enable = mkEnableOption (mdDoc "docker runner"); 15 | 16 | name = mkOption { 17 | type = types.str; 18 | description = mdDoc "Name of the runner"; 19 | }; 20 | 21 | token = mkOption { 22 | type = types.str; 23 | description = mdDoc "Token of the runner"; 24 | }; 25 | 26 | concurrent = mkOption { 27 | type = types.int; 28 | description = mdDoc "Number of parallel jobs to run"; 29 | }; 30 | 31 | url = mkOption { 32 | type = types.str; 33 | default = "https://git.mayflower.de/"; 34 | description = mdDoc "URL to Gitlab instance"; 35 | }; 36 | }; 37 | }; 38 | 39 | config = mkIf cfg.enable { 40 | services.gitlab-runner = { 41 | enable = true; 42 | configOptions = { 43 | concurrent = cfg.concurrent; 44 | check_interval = 5; 45 | log_level = "info"; 46 | metrics_server = "[::]:9055"; 47 | runners = [ 48 | { 49 | inherit (cfg) name url token; 50 | executor = "docker"; 51 | docker = { 52 | image = "ubuntu:17.10"; 53 | }; 54 | } 55 | ]; 56 | }; 57 | package = pkgs.gitlab-runner; 58 | }; 59 | 60 | networking.dhcpcd.denyInterfaces = [ "veth*" "docker*" ]; 61 | networking.firewall.allowedTCPPorts = [ 9055 ]; 62 | 63 | virtualisation.docker = { 64 | enable = true; 65 | package = pkgs.docker-edge; 66 | autoPrune = { 67 | enable = true; 68 | dates = "daily"; 69 | }; 70 | }; 71 | }; 72 | } 73 | -------------------------------------------------------------------------------- /modules/hagrid.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, ... }: 2 | 3 | let 4 | inherit (builtins) readFile toString; 5 | inherit (lib) attrsets generators modules options strings types; 6 | inherit (attrsets) mapAttrsToList recursiveUpdate; 7 | inherit (generators) toINI; 8 | inherit (modules) mkIf mkDefault; 9 | inherit (options) literalExpression mdDoc mkEnableOption mkOption; 10 | inherit (strings) concatStrings replaceStrings; 11 | 12 | cfg = config.services.hagrid; 13 | toml = pkgs.formats.toml {}; 14 | in { 15 | meta.maintainers = with lib.maintainers; [ ]; 16 | 17 | options = { 18 | 19 | services.hagrid = { 20 | 21 | enable = mkEnableOption (mdDoc '' 22 | hagrid (Verifying OpenPGP keyserver, written in Rust)''); 23 | 24 | package = mkOption { 25 | default = pkgs.hagrid; 26 | defaultText = literalExpression "pkgs.hagrid"; 27 | type = types.package; 28 | description = mdDoc "Which hagrid derivation to use."; 29 | }; 30 | 31 | dataDir = mkOption { 32 | type = types.str; 33 | default = "/var/lib/hagrid"; 34 | example = "/var/db/hagrid"; 35 | description = mdDoc '' 36 | Data directory for hagrid, where the keys, assets, 37 | templates and Rocket.toml are located. 38 | ''; 39 | }; 40 | 41 | environmentFile = mkOption { 42 | type = types.str; 43 | description = mdDoc '' 44 | Systemd EnvironmentFile with secrets to inject 45 | into the service config before startup. 46 | ''; 47 | }; 48 | 49 | hostname = mkOption { 50 | type = types.str; 51 | description = mdDoc '' 52 | Which hostname to set the base-URI of hagrid to. 53 | ''; 54 | }; 55 | 56 | settings = mkOption { 57 | type = toml.type; 58 | default = {}; 59 | description = mdDoc '' 60 | Configuration for hagrid rocket server in toml type (attrset). 61 | See https://gitlab.com/keys.openpgp.org/hagrid/ for details. 62 | ''; 63 | }; 64 | 65 | webRoot = mkOption { 66 | type = types.str; 67 | default = "${cfg.dataDir}/public"; 68 | readOnly = true; 69 | }; 70 | 71 | }; 72 | }; 73 | 74 | config = mkIf cfg.enable { 75 | 76 | services.hagrid.settings = { 77 | global = { 78 | address = mkDefault "0.0.0.0"; 79 | port = mkDefault 8080; 80 | }; 81 | release = { 82 | assets_dir = mkDefault "${cfg.webRoot}/assets"; 83 | base-URI = mkDefault "https://${cfg.hostname}"; 84 | base-URI-Onion = mkDefault "https://${cfg.hostname}"; 85 | email_template_dir = mkDefault "${cfg.dataDir}/email-templates"; 86 | enable_prometheus = mkDefault false; 87 | from = mkDefault "noreply@localhost"; 88 | keys_external_dir = mkDefault "${cfg.webRoot}/keys"; 89 | keys_internal_dir = mkDefault "${cfg.dataDir}/keys"; 90 | mail_rate_limit = mkDefault 3600; 91 | maintenance_file = mkDefault "${cfg.dataDir}/maintenance"; 92 | template_dir = mkDefault "${cfg.dataDir}/templates"; 93 | tmp_dir = mkDefault "${cfg.dataDir}/tmp"; 94 | token_dir = mkDefault "${cfg.dataDir}/tokens"; 95 | token_secret = mkDefault "$TOKEN_SECRET"; 96 | token_validity = mkDefault 3600; 97 | x-accel-redirect = mkDefault true; 98 | }; 99 | }; 100 | 101 | users = { 102 | users.hagrid = { 103 | isSystemUser = true; 104 | description = "hagrid user"; 105 | home = cfg.dataDir; 106 | createHome = true; 107 | group = "hagrid"; 108 | useDefaultShell = true; 109 | packages = [ cfg.package ]; 110 | }; 111 | groups.hagrid = { }; 112 | }; 113 | 114 | systemd.services = let 115 | releaseProfile = cfg.settings.release; 116 | dirMap = { 117 | assets = releaseProfile.assets_dir; 118 | templates = releaseProfile.template_dir; 119 | email-templates = releaseProfile.email_template_dir; 120 | #errors-static = cfg.webRoot + "/errors-static"; 121 | }; 122 | in { 123 | hagrid = { 124 | description = "hagrid (Verifying OpenPGP keyserver)"; 125 | after = [ "network.target" ]; 126 | wantedBy = [ "multi-user.target" ]; 127 | preStart = '' 128 | ${concatStrings (mapAttrsToList (name: path: '' 129 | mkdir -p $(dirname ${path}) 130 | ln -nsf ${cfg.package}/dist/${name} ${path} 131 | '') dirMap)} 132 | 133 | rm -f "${cfg.dataDir}/Rocket.toml" 134 | 135 | ${pkgs.envsubst}/bin/envsubst \ 136 | -o "${cfg.dataDir}/Rocket.toml" \ 137 | -i "${toml.generate "Rocket.toml" cfg.settings}" 138 | ''; 139 | serviceConfig = { 140 | WorkingDirectory = "~"; 141 | User = "hagrid"; 142 | Group = "hagrid"; 143 | Restart = "always"; 144 | ExecStart = "${cfg.package}/bin/hagrid"; 145 | EnvironmentFile = cfg.environmentFile; 146 | PrivateTmp = true; 147 | }; 148 | }; 149 | }; 150 | 151 | networking.firewall.allowedTCPPorts = [ cfg.settings.global.port ]; 152 | 153 | }; 154 | } 155 | -------------------------------------------------------------------------------- /modules/kvm.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, modulesPath, ... }: 2 | with lib; 3 | { 4 | options = { 5 | mayflower.kvm.enable = mkOption { 6 | type = types.bool; 7 | default = false; 8 | }; 9 | }; 10 | 11 | config = mkIf config.mayflower.kvm.enable (mkMerge [ 12 | { hardware.enableAllFirmware = false; 13 | 14 | boot.loader.grub.enable = true; 15 | boot.loader.grub.device = "/dev/vda"; 16 | boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" ]; 17 | 18 | fileSystems."/" = { 19 | device = "/dev/disk/by-label/nixos"; 20 | fsType = lib.mkDefault "ext4"; 21 | }; 22 | } 23 | ((import "${modulesPath}/profiles/qemu-guest.nix") { inherit config pkgs lib; }) 24 | ]); 25 | } 26 | -------------------------------------------------------------------------------- /modules/log-aggregation.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, ... }: 2 | 3 | let 4 | cfg = config.mayflower.log-aggregation; 5 | 6 | inherit (lib.attrsets) 7 | recursiveUpdate; 8 | 9 | inherit (lib.options) 10 | mkEnableOption 11 | mkOption; 12 | 13 | inherit (lib.modules) 14 | mkDefault 15 | mkIf 16 | mkMerge; 17 | 18 | inherit (lib.lists) 19 | optionals; 20 | 21 | inherit (lib) 22 | types 23 | mdDoc; 24 | 25 | lokiServiceConfig = { 26 | enable = true; 27 | configuration = { 28 | server.http_listen_port = 3100; 29 | common = { 30 | path_prefix = "/var/lib/loki"; 31 | replication_factor = 1; 32 | ring.kvstore.store = "inmemory"; 33 | }; 34 | ruler.storage.s3.bucketnames = "loki-ruler"; 35 | query_scheduler.max_outstanding_requests_per_tenant = 8192; 36 | limits_config = { 37 | retention_period = "90d"; 38 | max_query_length = "90d"; 39 | max_query_series = 5000; 40 | split_queries_by_interval = "2h"; 41 | }; 42 | }; 43 | extraFlags = [ 44 | "-log-config-reverse-order" 45 | "-config.expand-env=true" 46 | ]; 47 | }; 48 | 49 | promtailServiceConfig = { 50 | enable = true; 51 | configuration = { 52 | server = { 53 | http_listen_port = 9080; # default port 80 used on many hosts 54 | grpc_listen_port = 0; # default port 9095 used by loki (0 means random) 55 | }; 56 | clients = let 57 | lokiHostname = config.mayflower.wireguard.star.${cfg.networkName}.centralPeerHostname; 58 | in [ 59 | { url = "http://${lokiHostname}:3100/loki/api/v1/push"; tenant_id = "mfadm"; } 60 | ]; 61 | scrape_configs = [ 62 | { 63 | job_name = "journal"; 64 | journal = { 65 | max_age = "12h"; 66 | labels = recursiveUpdate { 67 | job = "systemd-journal"; 68 | } cfg.extraStaticJobLabels; 69 | }; 70 | relabel_configs = [ 71 | { 72 | source_labels = ["__journal__systemd_unit"]; 73 | target_label = "unit"; 74 | } 75 | { 76 | source_labels = ["__journal__hostname"]; 77 | target_label = "hostname"; 78 | } 79 | { 80 | source_labels = ["__journal_syslog_identifier"]; 81 | target_label = "syslog_identifier"; 82 | } 83 | { # A priority value between 0 ("emerg") and 7 ("debug") formatted as a decimal string. 84 | # This field is compatible with syslog's priority concept. 85 | source_labels = ["__journal_priority"]; 86 | target_label = "priority"; 87 | } 88 | { # How the entry was received by the journal service. Valid transports are: 89 | # audit, driver, syslog, journal, stdout, kernel 90 | source_labels = ["__journal__transport"]; 91 | target_label = "transport"; 92 | } 93 | ]; 94 | } 95 | ]; 96 | }; 97 | }; 98 | in 99 | { 100 | options.mayflower.log-aggregation = { 101 | enable = mkOption { 102 | type = types.bool; 103 | default = true; 104 | }; 105 | 106 | networkName = mkOption { 107 | type = types.str; 108 | default = "log"; 109 | }; 110 | 111 | isServer = mkEnableOption (mdDoc "config for the central loki instance"); 112 | 113 | extraStaticJobLabels = mkOption { 114 | type = types.attrsOf types.str; 115 | example = { 116 | datacenter = "dc1"; 117 | }; 118 | description = mdDoc '' 119 | Additional static labels added to the systemd-journal promtail job. 120 | ''; 121 | }; 122 | }; 123 | 124 | # TODO assertion, only one central host 125 | # TODO assertion for network name (same as in mayflower.wireguard) 126 | 127 | config = mkIf cfg.enable ({ 128 | mayflower.log-aggregation.extraStaticJobLabels = mkDefault { 129 | inherit (config.mayflower.monitoring) datacenter; 130 | }; 131 | 132 | mayflower.wireguard = { 133 | enable = true; 134 | star.${cfg.networkName} = { 135 | inherit (cfg) isServer; 136 | enable = true; 137 | }; 138 | }; 139 | 140 | networking.firewall = mkIf cfg.isServer { 141 | extraCommands = '' 142 | ip46tables -A nixos-fw -i wg-${cfg.networkName} -p tcp --dport 3100 -m comment --comment "loki mayflower.log-aggregation" -j ACCEPT 143 | ''; 144 | }; 145 | 146 | environment.systemPackages = optionals cfg.isServer [ 147 | pkgs.grafana-loki # for logcli 148 | ]; 149 | 150 | services = mkMerge [ 151 | (mkIf cfg.isServer { 152 | loki = lokiServiceConfig; 153 | }) 154 | ({ 155 | promtail = promtailServiceConfig; 156 | }) 157 | ]; 158 | 159 | sops.secrets = mkMerge [ 160 | ({ 161 | "wireguard-${cfg.networkName}-privatekey" = {}; 162 | }) 163 | ]; 164 | }); 165 | } 166 | -------------------------------------------------------------------------------- /modules/machines.nix: -------------------------------------------------------------------------------- 1 | { lib, config, ... }: 2 | 3 | with lib; 4 | 5 | { 6 | options = { 7 | mayflower.machines = mkOption { 8 | type = types.attrs; 9 | default = {}; 10 | description = mdDoc "Machines in the deployment"; 11 | }; 12 | }; 13 | } 14 | -------------------------------------------------------------------------------- /modules/matrix.nix: -------------------------------------------------------------------------------- 1 | { lib, config, pkgs, ... }: 2 | 3 | with lib; 4 | 5 | let 6 | cfg = config.mayflower.matrix; 7 | 8 | in 9 | { 10 | options.mayflower.matrix = { 11 | enable = mkEnableOption (mdDoc "The Matrix"); 12 | 13 | fqdn = mkOption { 14 | type = types.str; 15 | example = "matrix.example.com"; 16 | description = mdDoc '' 17 | The fully qualified domain name of the matrix server. 18 | This domain name will also be used to open a minimal nginx reverse proxy 19 | for the matrix-synapse service. 20 | ''; 21 | }; 22 | 23 | turn = { 24 | enable = mkEnableOption (mdDoc "coturn as turn server"); 25 | 26 | authSecretFile = mkOption { 27 | type = types.str; 28 | description = mdDoc '' 29 | Path to the file containing the shared secret for coturn. 30 | ''; 31 | }; 32 | 33 | # Note(@Ma27): I considered to somehow implement an abstraction 34 | # which allows to do this with a single option, but since this is a special use-case 35 | # I didn't consider it worth the effort (including future maintenance effort). 36 | synapseAuthSecretFile = mkOption { 37 | type = types.str; 38 | description = mdDoc '' 39 | Path to the file containing the same secret as 40 | [](#opt-mayflower.matrix.turn.authSecretFile), 41 | but prefixed with `turn_shared_secret` since 42 | it will be passed to synasep as config file. 43 | ''; 44 | }; 45 | 46 | listenIPs = mkOption { 47 | type = with types; listOf str; 48 | default = []; 49 | description = ""; 50 | }; 51 | }; 52 | 53 | element = { 54 | enable = mkEnableOption (mdDoc "Element web client"); 55 | 56 | fqdn = mkOption { 57 | type = types.str; 58 | example = "chat.example.com"; 59 | description = mdDoc '' 60 | The fully qualified domain name of where the Element web frontend will be deployed. 61 | ''; 62 | }; 63 | 64 | defaultHomeServerUrl = mkOption { 65 | type = types.str; 66 | example = "https://matrix.org/"; 67 | description = mdDoc '' 68 | The default home server URL Element should use. 69 | ''; 70 | }; 71 | 72 | defaultIdentityServerUrl = mkOption { 73 | type = types.str; 74 | example = "https://vector.im/"; 75 | default = ""; 76 | description = mdDoc '' 77 | The default identity server URL Element should use. 78 | ''; 79 | }; 80 | 81 | disableCustomUrls = mkOption { 82 | type = types.bool; 83 | default = true; 84 | description = mdDoc '' 85 | Whether or not to allow custom URLs in Element. 86 | ''; 87 | }; 88 | 89 | disableGuests = mkOption { 90 | type = types.bool; 91 | default = true; 92 | description = mdDoc '' 93 | Whether or not to allow guest logins through Element. 94 | Guests must be enabled on the server too. 95 | ''; 96 | }; 97 | 98 | disableLoginLanguageSelector = mkOption { 99 | type = types.bool; 100 | default = false; 101 | description = mdDoc '' 102 | Whether or not to allow users to change the language for Element. 103 | ''; 104 | }; 105 | 106 | disable3pidLogin = mkOption { 107 | type = types.bool; 108 | default = false; 109 | description = mdDoc '' 110 | Whether or not to allow third party ID logins. 111 | This can be an E-Mail address or phone number using an identity server. 112 | ''; 113 | }; 114 | 115 | brand = mkOption { 116 | type = types.str; 117 | default = "Element"; 118 | }; 119 | 120 | crossOriginRendererDomain = mkOption { 121 | type = types.nullOr types.str; 122 | default = null; 123 | description = mdDoc ""; 124 | }; 125 | 126 | extraConfig = mkOption { 127 | type = types.attrs; 128 | default = {}; 129 | description = mdDoc "Overrides to the Element config"; 130 | }; 131 | }; 132 | }; 133 | 134 | config = mkIf cfg.enable { 135 | 136 | networking.firewall = { 137 | allowedTCPPorts = [ 138 | 80 139 | 443 140 | # matrix-synapse federation port 141 | 8448 142 | ] ++ optionals cfg.turn.enable [ 3478 3479 5349 5350 ]; 143 | allowedUDPPorts = optionals cfg.turn.enable [ 3478 3479 5349 5350 ]; 144 | allowedUDPPortRanges = optional cfg.turn.enable { from = 50000; to = 54999; }; 145 | }; 146 | 147 | services = { 148 | postgresql = { 149 | enable = true; 150 | settings.synchronous_commit = "off"; 151 | }; 152 | 153 | nginx = { 154 | enable = true; 155 | virtualHosts = mkMerge [ 156 | { # Reverse Proxy for matrix-synapse 157 | ${cfg.fqdn} = { 158 | forceSSL = true; 159 | enableACME = true; 160 | 161 | locations = { 162 | "/".extraConfig = mkIf cfg.element.enable "return 302 https://${cfg.element.fqdn};"; 163 | "/_matrix" = { 164 | proxyPass = "http://127.0.0.1:8008"; 165 | priority = 30; 166 | extraConfig = '' 167 | add_header X-Content-Type-Options "nosniff" always; 168 | ''; 169 | }; 170 | "/_synapse/client" = { 171 | proxyPass = "http://127.0.0.1:8008"; 172 | priority = 30; 173 | extraConfig = '' 174 | add_header X-Content-Type-Options "nosniff" always; 175 | ''; 176 | }; 177 | "/_matrix/identity" = { 178 | proxyPass = "http://127.0.0.1:8090/_matrix/identity"; 179 | extraConfig = '' 180 | add_header Access-Control-Allow-Origin *; 181 | add_header Access-Control-Allow-Method 'GET, POST, PUT, DELETE, OPTIONS'; 182 | add_header X-Content-Type-Options "nosniff" always; 183 | ''; 184 | priority = 20; 185 | }; 186 | "/_matrix/client/r0/user_directory" = { 187 | proxyPass = "http://127.0.0.1:8090/_matrix/client/r0/user_directory"; 188 | priority = 10; 189 | extraConfig = '' 190 | add_header X-Content-Type-Options "nosniff" always; 191 | ''; 192 | }; 193 | }; 194 | }; 195 | } 196 | (mkIf (cfg.element.crossOriginRendererDomain != null) { 197 | ${cfg.element.crossOriginRendererDomain} = { 198 | forceSSL = true; 199 | enableACME = true; 200 | root = pkgs.fetchFromGitHub { 201 | owner = "matrix-org"; 202 | repo = "usercontent"; 203 | rev = "2c43f6dbbb64b4e589209965533f7c8a14806010"; 204 | sha256 = "074ln7hfwwwnjhgzx3a59ds6k6007mx0brp7m7lrz1pfn6v79b8j"; 205 | }; 206 | extraConfig = '' 207 | add_header X-Content-Type-Options "nosniff" always; 208 | ''; 209 | locations."/".extraConfig = "return 204;"; 210 | }; 211 | }) 212 | (mkIf cfg.element.enable { 213 | # element web frontend configuration 214 | ${cfg.element.fqdn} = { 215 | forceSSL = true; 216 | enableACME = true; 217 | 218 | locations = { 219 | "/" = { 220 | extraConfig = '' 221 | add_header X-Content-Type-Options "nosniff" always; 222 | ''; 223 | root = pkgs.element-web.override { 224 | #"welcomePageUrl": "home.html", 225 | conf = (flip recursiveUpdate cfg.element.extraConfig { 226 | "default_server_config" = { 227 | "m.homeserver" = { 228 | "base_url" = cfg.element.defaultHomeServerUrl; 229 | "server_name" = cfg.fqdn; 230 | }; 231 | "m.identity_server"."base_url" = cfg.element.defaultIdentityServerUrl; 232 | }; 233 | "disable_custom_urls" = cfg.element.disableCustomUrls; 234 | "disable_guests" = cfg.element.disableGuests; 235 | "disable_login_language_selector" = cfg.element.disableLoginLanguageSelector; 236 | "disable_3pid_login" = cfg.element.disable3pidLogin; 237 | "brand" = cfg.element.brand; 238 | "integrations_ui_url" = "https://scalar.vector.im/"; 239 | "integrations_rest_url" = "https://scalar.vector.im/api"; 240 | "integrations_jitsi_widget_url" = "https://scalar.vector.im/api/widgets/jitsi.html"; 241 | "defaultCountryCode" = "DE"; 242 | "showLabsSettings" = true; 243 | "features" = { 244 | "feature_groups" = "enable"; 245 | "feature_pinning" = "enable"; 246 | "feature_reactions" = "enable"; 247 | "feature_message_editing" = "labs"; 248 | }; 249 | "default_federate" = false; 250 | "default_theme" = "dark"; 251 | "roomDirectory" = { 252 | "servers" = [ 253 | cfg.fqdn "matrix.org" 254 | ]; 255 | }; 256 | "welcomeUserId" = null; 257 | "piwik" = false; 258 | "enable_presence_by_hs_url" = { 259 | "https://matrix.org" = false; 260 | }; 261 | }); 262 | }; 263 | }; 264 | }; 265 | }; 266 | }) 267 | ]; 268 | }; 269 | 270 | matrix-synapse = { 271 | enable = true; 272 | extraConfigFiles = mkIf cfg.turn.enable [ 273 | cfg.turn.synapseAuthSecretFile 274 | ]; 275 | settings = { 276 | server_name = cfg.fqdn; 277 | tls_certificate_path = "/var/lib/acme/${cfg.fqdn}/fullchain.pem"; 278 | tls_private_key_path = "/var/lib/acme/${cfg.fqdn}/key.pem"; 279 | enable_registration = false; 280 | enable_metrics = true; 281 | database.name = "psycopg2"; 282 | # turn configuration with coturn 283 | turn_uris = optionals cfg.turn.enable [ 284 | "turn:${cfg.fqdn}:3478?transport=udp" 285 | "turn:${cfg.fqdn}:3478?transport=tcp" 286 | ]; 287 | turn_user_lifetime = "86400000"; 288 | # For simplicity do not reverse-proxy the federation port 289 | # See https://github.com/matrix-org/synapse#reverse-proxying-the-federation-port 290 | listeners = [{ 291 | port = 8448; 292 | bind_addresses = [ "::" ]; 293 | type = "http"; 294 | tls = true; 295 | x_forwarded = false; 296 | resources = [ 297 | { names = ["federation"]; compress = false; } 298 | ]; 299 | } { 300 | port = 8008; 301 | bind_addresses = [ "127.0.0.1" "::1" ]; 302 | type = "http"; 303 | tls = false; 304 | x_forwarded = true; 305 | resources = [ 306 | { names = ["client"]; compress = false; } 307 | ]; 308 | } { 309 | port = 9092; 310 | bind_addresses = [ "0.0.0.0" ]; 311 | type = "metrics"; 312 | tls = false; 313 | resources = []; 314 | }]; 315 | }; 316 | }; 317 | 318 | mxisd = { 319 | enable = true; 320 | matrix.domain = cfg.fqdn; 321 | extraConfig = { 322 | dns.overwrite.homeserver.client = [ 323 | { name = cfg.fqdn; value = "http://127.0.0.1:8008"; } 324 | ]; 325 | session.policy.unbind = { 326 | enabled = true; 327 | notifications = false; 328 | }; 329 | session.policy.validation = { 330 | enabled = true; 331 | forLocal = { 332 | enabled = true; 333 | toLocal = true; 334 | toRemote.enabled = false; 335 | }; 336 | forRemote = { 337 | enabled = true; 338 | toLocal = true; 339 | toRemote.enabled = false; 340 | }; 341 | }; 342 | }; 343 | }; 344 | 345 | coturn = mkIf cfg.turn.enable { 346 | enable = true; 347 | listening-ips = cfg.turn.listenIPs; 348 | lt-cred-mech = true; 349 | use-auth-secret = true; 350 | static-auth-secret-file = cfg.turn.authSecretFile; 351 | realm = cfg.fqdn; 352 | cert = "/var/lib/acme/${cfg.fqdn}/fullchain.pem"; 353 | pkey = "/var/lib/acme/${cfg.fqdn}/key.pem"; 354 | min-port = 50000; 355 | max-port = 54999; 356 | no-tcp-relay = true; 357 | extraConfig = '' 358 | user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user. 359 | total-quota=1200 360 | ''; 361 | }; 362 | }; 363 | 364 | users.extraGroups.matrix-certs.members = [ 365 | config.services.nginx.user "matrix-synapse" 366 | ] ++ optional cfg.turn.enable "turnserver"; 367 | 368 | security.acme.certs = { 369 | ${cfg.fqdn} = { 370 | group = "matrix-certs"; 371 | postRun = '' 372 | systemctl restart matrix-synapse 373 | '' + optionalString cfg.turn.enable '' 374 | systemctl restart coturn 375 | ''; 376 | }; 377 | }; 378 | 379 | mayflower.matrix.element.defaultHomeServerUrl = lib.mkDefault "https://${cfg.fqdn}/"; 380 | }; 381 | } 382 | -------------------------------------------------------------------------------- /modules/monitoring/alert-rules.nix: -------------------------------------------------------------------------------- 1 | { filesystemFilter }: let 2 | ensureComma = x: if x == "" then "" else ",${x}"; 3 | in { 4 | node_deployed = { 5 | condition = excl: "node_deployed{${excl}} < time()-86400*14"; 6 | page = false; 7 | summary = "{{$labels.alias}}: Last deployed on {{$labels.date}} with version {{$labels.version}}"; 8 | description = "{{$labels.alias}}: Last deployed on {{$labels.date}} with version {{$labels.version}}"; 9 | }; 10 | node_down = { 11 | condition = excl: ''up{job="node"${ensureComma excl}} == 0''; 12 | summary = "{{$labels.alias}}: Node is down."; 13 | description = "{{$labels.alias}} has been down for more than 2 minutes."; 14 | }; 15 | node_collector_failed = { 16 | # FIXME remove `collector!="conntrack"` as soon as 17 | # https://github.com/prometheus/node_exporter/issues/2491 is resolved. 18 | condition = excl: ''node_scrape_collector_success{job="node",collector!="conntrack"${ensureComma excl}} == 0''; 19 | summary = "{{$labels.alias}}: Node collector {{$labels.collector}} failed."; 20 | description = "{{$labels.alias}}: The collector {{$labels.collector}} of node exporter instance {{$labels.instance}} failed."; 21 | }; 22 | node_systemd_service_failed = { 23 | condition = excl: ''node_systemd_unit_state{state="failed"${ensureComma excl}} == 1''; 24 | summary = "{{$labels.alias}}: Service {{$labels.name}} failed to start."; 25 | description = "{{$labels.alias}} failed to (re)start service {{$labels.name}}."; 26 | }; 27 | node_filesystem_full_80percent = { 28 | condition = excl: ''sort(node_filesystem_free_bytes{${filesystemFilter}${ensureComma excl}} < node_filesystem_size_bytes{${filesystemFilter}${ensureComma excl}} * 0.2) / 1024^3''; 29 | time = "10m"; 30 | summary = "{{$labels.alias}}: Filesystem is running out of space soon."; 31 | description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} got less than 20% space left on its filesystem."; 32 | }; 33 | node_filesystem_full_in_7d = { 34 | condition = excl: ''node_filesystem_free_bytes{${filesystemFilter}${ensureComma excl}} '' 35 | + ''and predict_linear(node_filesystem_free_bytes{${filesystemFilter}${ensureComma excl}}[2d], 7*24*3600) <= 0''; 36 | time = "1h"; 37 | summary = "{{$labels.alias}}: Filesystem is running out of space in 7 days."; 38 | description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} is running out of space in approx. 7 days"; 39 | }; 40 | node_filesystem_full_in_30d = { 41 | condition = excl: ''node_filesystem_free_bytes{${filesystemFilter}${ensureComma excl}} '' 42 | + ''and predict_linear(node_filesystem_free_bytes{${filesystemFilter}${ensureComma excl}}[30d], 30*24*3600) <= 0''; 43 | time = "1h"; 44 | summary = "{{$labels.alias}}: Filesystem is running out of space in 30 days."; 45 | description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} is running out of space in approx. 30 days"; 46 | }; 47 | node_filesystem_zfs_unhealthy = { 48 | condition = excl: ''zfs_pool_health{${excl}} != 0''; 49 | summary = "{{$labels.alias}}: zpool is unhealthy."; 50 | description = "{{$labels.alias}} zpool {{$labels.pool}} is unhealthy in status {{$value}} [0: ONLINE, 1: DEGRADED, 2: FAULTED, 3: OFFLINE, 4: UNAVAIL, 5: REMOVED, 6: SUSPENDED]"; 51 | }; 52 | node_filesystem_zfs_leaked_bytes = { 53 | condition = excl: ''zfs_pool_leaked_bytes{${excl}} > 0''; 54 | summary = "{{$labels.alias}}: zpool leaked bytes."; 55 | description = "{{$labels.alias}} zpool {{$labels.pool}} leaked {{$value}} bytes"; 56 | }; 57 | node_inodes_full_in_7d = { 58 | condition = excl: ''node_filesystem_files_free{${filesystemFilter}${ensureComma excl}} '' 59 | + ''and predict_linear(node_filesystem_files_free{${filesystemFilter}${ensureComma excl}}[2d], 7*24*3600) < 0''; 60 | time = "1h"; 61 | summary = "{{$labels.alias}}: Filesystem is running out of inodes in 7 days."; 62 | description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} is running out of inodes in approx. 7 days"; 63 | }; 64 | node_inodes_full_in_30d = { 65 | condition = excl: ''node_filesystem_files_free{${filesystemFilter}${ensureComma excl}} '' 66 | + ''and predict_linear(node_filesystem_files_free{${filesystemFilter}${ensureComma excl}}[30d], 30*24*3600) < 0''; 67 | time = "1h"; 68 | summary = "{{$labels.alias}}: Filesystem is running out of inodes in 30 days."; 69 | description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} is running out of inodes in approx. 30 days"; 70 | }; 71 | node_filedescriptors_full_in_3h = { 72 | condition = excl: ''node_filefd_allocated{${excl}} '' 73 | + ''and predict_linear(node_filefd_allocated{${excl}}[3h], 3*3600) >= node_filefd_maximum{${excl}}''; 74 | time = "20m"; 75 | summary = "{{$labels.alias}} is running out of available file descriptors in 3 hours."; 76 | description = "{{$labels.alias}} is running out of available file descriptors in approx. 3 hours"; 77 | }; 78 | node_filedescriptors_full_in_7d = { 79 | condition = excl: ''node_filefd_allocated{${excl}} '' 80 | + ''and predict_linear(node_filefd_allocated{${excl}}[7d], 7*24*3600) >= node_filefd_maximum{${excl}}''; 81 | time = "1h"; 82 | summary = "{{$labels.alias}} is running out of available file descriptors in 7 days."; 83 | description = "{{$labels.alias}} is running out of available file descriptors in approx. 7 days"; 84 | }; 85 | node_load15 = { 86 | condition = excl: ''node_load15{${excl}} / on(alias) count(node_cpu_seconds_total{mode="system"${ensureComma excl}}) by (alias) >= 1.0''; 87 | time = "10m"; 88 | summary = "{{$labels.alias}}: Running on high load: {{$value}}"; 89 | description = "{{$labels.alias}} is running with load15 > 1 for at least 5 minutes: {{$value}}"; 90 | }; 91 | node_ram_using_90percent_non_zfs_nodes = { 92 | condition = excl: "node_memory_MemAvailable_bytes{${excl}} < node_memory_MemTotal_bytes{${excl}} * 0.1 unless node_zfs_arc_size{${excl}}"; 93 | time = "1h"; 94 | summary = "{{$labels.alias}}: Using lots of RAM."; 95 | description = "{{$labels.alias}} is using at least 90% of its RAM for at least 1 hour."; 96 | }; 97 | node_ram_using_90percent_zfs_nodes = { 98 | condition = excl: "node_memory_MemAvailable_bytes{${excl}} + node_zfs_arc_size{${excl}} - node_zfs_arc_c_min{${excl}} < node_memory_MemTotal_bytes{${excl}} * 0.1"; 99 | time = "1h"; 100 | summary = "{{$labels.alias}}: Using lots of RAM."; 101 | description = "{{$labels.alias}} is using at least 90% of its RAM for at least 1 hour."; 102 | }; 103 | node_swap_using_30percent = { 104 | condition = excl: "node_memory_SwapTotal_bytes{${excl}} - (node_memory_SwapFree_bytes{${excl}} + node_memory_SwapCached_bytes{${excl}}) > node_memory_SwapTotal_bytes{${excl}} * 0.3"; 105 | time = "30m"; 106 | summary = "{{$labels.alias}}: Using more than 30% of its swap."; 107 | description = "{{$labels.alias}} is using 30% of its swap space for at least 30 minutes."; 108 | }; 109 | node_oom = { 110 | condition = excl: "rate(node_vmstat_oom_kill{${excl}}[1h]) > 0"; 111 | summary = "{{$labels.alias}}: OOM killer fired"; 112 | }; 113 | node_visible_confluence_space = { 114 | condition = excl: "node_visible_confluence_space{${excl}} != 0"; 115 | summary = "crowd prometheus cann see the {{$labels.space_name}} confluence space!"; 116 | description = "crowd user `prometheus` can see the `{{$labels.space_name}}` confluence space."; 117 | }; 118 | node_visible_jira_project = { 119 | condition = excl: "node_visible_jira_project{${excl}} != 0"; 120 | summary = "crowd user `prometheus` can see the `{{$labels.project_name}}` jira project."; 121 | description = "crowd user `prometheus` can see the `{{$labels.project_name}}` jira project."; 122 | }; 123 | node_zfs_errors = { 124 | condition = excl: "node_zfs_arc_l2_writes_error{${excl}} + node_zfs_arc_l2_io_error{${excl}} + node_zfs_arc_l2_writes_error{${excl}} > 0"; 125 | summary = "{{$labels.alias}}: ZFS IO errors: {{$value}}"; 126 | description = "{{$labels.alias}} reports: {{$value}} ZFS IO errors. Drive(s) are failing."; 127 | }; 128 | node_hwmon_temp = { 129 | condition = excl: "node_hwmon_temp_celsius{${excl}} > (node_hwmon_temp_crit_celsius{${excl}} > 0) - 5"; 130 | time = "5m"; 131 | summary = "{{$labels.alias}}: Sensor {{$labels.sensor}}/{{$labels.chip}} temp is high: {{$value}} "; 132 | description = "{{$labels.alias}} reports hwmon sensor {{$labels.sensor}}/{{$labels.chip}} temperature value is nearly critical: {{$value}}"; 133 | }; 134 | node_conntrack_limit = { 135 | condition = excl: "node_nf_conntrack_entries_limit{${excl}} - node_nf_conntrack_entries{${excl}} < 1000"; 136 | time = "5m"; 137 | summary = "{{$labels.alias}}: Number of tracked connections high"; 138 | description = "{{$labels.alias}} has only {{$value}} free slots for connection tracking available."; 139 | }; 140 | node_reboot = { 141 | condition = excl: "time() - node_boot_time_seconds{${excl}} < 300"; 142 | summary = "{{$labels.alias}}: Reboot"; 143 | description = "{{$labels.alias}} just rebooted."; 144 | }; 145 | node_uptime = { 146 | condition = excl: "time() - node_boot_time_seconds{${excl}} > 2592000"; 147 | page = false; 148 | summary = "{{$labels.alias}}: Uptime monster"; 149 | description = "{{$labels.alias}} has been up for more than 30 days."; 150 | }; 151 | blackbox_down = { 152 | condition = excl: ''min(up{job=~"blackbox.+"${ensureComma excl}}) by (source, job, instance) == 0''; 153 | time = "3m"; 154 | summary = "{{$labels.instance}}: {{$labels.job}} blackbox exporter from {{$labels.source}} is down."; 155 | }; 156 | blackbox_probe = { 157 | condition = excl: "probe_success{${excl}} == 0"; 158 | page = false; 159 | summary = "{{$labels.instance}}: {{$labels.job}} probe from {{$labels.source}} has failed!"; 160 | }; 161 | blackbox_probe_high_latency = { 162 | condition = excl: "probe_duration_seconds{${excl}} > 2 and probe_success{${excl}} == 1"; 163 | summary = "{{$labels.instance}}: {{$labels.job}} probe from {{$labels.source}} takes too long!"; 164 | description = "{{$labels.instance}}: {{$labels.job}} probe from {{$labels.source}} is encountering high latency!"; 165 | }; 166 | blackbox_probe_cert_expiry = { 167 | condition = excl: "probe_ssl_earliest_cert_expiry{${excl}} < 7*24*3600"; 168 | summary = "{{$labels.instance}}: TLS certificate from {{$labels.source}} is about to expire."; 169 | description = "{{$labels.instance}}: The TLS certificate from {{$labels.source}} will expire in less than 7 days: {{$value}}s"; 170 | }; 171 | unifi_devices_adopted_changed = { 172 | condition = excl: ''sum(abs(delta(unpoller_site_adopted{status="ok"${ensureComma excl}}[1h]))) >= 1''; 173 | summary = "Unifi: number of adopted devices has changed: {{$value}}"; 174 | }; 175 | unifi_device_excessive_memory_usage = { 176 | condition = excl: ''unpoller_device_memory_utilization_ratio{${excl}} >= 0.9''; 177 | summary = "Unifi: memory utilisation exceeds 90% on device {{$labels.name}}"; 178 | }; 179 | unifi_device_reboot = { 180 | condition = excl: ''unpoller_device_uptime_seconds{site_name!~"down.+"${ensureComma excl}} > 0 and unpoller_device_uptime_seconds{site_name!~"down.+"} < 300''; 181 | summary = "Unifi: device {{$labels.name}} reboot"; 182 | description = "Unifi: device {{$labels.name}} just rebooted"; 183 | }; 184 | unifi_device_down = { 185 | condition = excl: ''unpoller_site_adopted{status="error",site_name!~"down.+"${ensureComma excl}} >= 1''; 186 | summary = "Unifi: {{$value}} device(s) down in {{$labels.site_name}}"; 187 | }; 188 | mail_down = { 189 | condition = excl: ''up{job="mail"${ensureComma excl}} == 0''; 190 | summary = "{{$labels.alias}}: Mail exporter is down."; 191 | description = "Mail exporter on {{$labels.alias}} hasn't been responding more than 2 minutes."; 192 | }; 193 | mail_delivery_unsuccessful = { 194 | condition = excl: "mail_deliver_success{${excl}} == 0"; 195 | summary = "{{$labels.alias}}: Mail delivery unsuccessful"; 196 | }; 197 | mail_delivery_late = { 198 | condition = excl: "increase(mail_late_mails_total{${excl}}[1h]) >= 1"; 199 | summary = "{{$labels.alias}}: Mail delivery late"; 200 | }; 201 | mail_send_fails = { 202 | condition = excl: "increase(mail_send_fails_total{${excl}}[1h]) >= 1"; 203 | summary = "{{$labels.alias}}: Mail send failed"; 204 | }; 205 | postfix_queue_deferred_messages = { 206 | condition = excl: ''postfix_showq_message_size_bytes_count{queue="deferred"${ensureComma excl}} > 1''; 207 | summary = "{{$labels.alias}}: postfix has deferred messages in queue"; 208 | }; 209 | alerts_silences_changed = { 210 | condition = excl: ''round(delta(alertmanager_silences{state="active"${ensureComma excl}}[1h])) != 0''; 211 | summary = "alertmanager: number of active silences has changed: {{$value}}"; 212 | }; 213 | smart_critical_attributes = { 214 | condition = excl: ''smartmon_attr_raw_value{name=~".*_retry_count|reallocated_.*|current_pending_sector"${ensureComma excl}} != 0''; 215 | summary = "{{$labels.alias}}: {{$labels.disk}} is experiencing sector errors"; 216 | description = "{{$labels.alias}}: {{$labels.name}} on {{$labels.disk}} is {{$value}}"; 217 | }; 218 | nextcloud_down = { 219 | condition = excl: ''nextcloud_up{${excl}} < 1''; 220 | summary = "{{$labels.alias}}: nextcloud down"; 221 | time = "15m"; 222 | }; 223 | } 224 | -------------------------------------------------------------------------------- /modules/monitoring/alerting.nix: -------------------------------------------------------------------------------- 1 | { config, pkgs, lib, ... }: 2 | with lib; 3 | let 4 | cfg = config.mayflower.monitoring.server; 5 | alertRuleModule = types.submodule ({ name, config, ... }: { 6 | options = { 7 | enable = mkEnableOption (mdDoc "this alerting rule") // { default = true; }; 8 | page = mkEnableOption (mdDoc "paging for this alert") // { default = true; }; 9 | condition = mkOption { 10 | type = types.functionTo types.str; 11 | description = mdDoc "Alert condition"; 12 | }; 13 | summary = mkOption { 14 | type = types.str; 15 | description = mdDoc "Short summary description of the alert"; 16 | }; 17 | description = mkOption { 18 | type = types.nullOr types.str; 19 | description = mdDoc "Longer description of the alert"; 20 | default = null; 21 | }; 22 | time = mkOption { 23 | type = types.str; 24 | description = mdDoc "Duration for which the condition must hold for the alert to fire"; 25 | default = "2m"; 26 | }; 27 | renderedCondition = mkOption { 28 | internal = true; 29 | readOnly = true; 30 | type = types.str; 31 | description = mdDoc "condition, but with all exclusions added"; 32 | }; 33 | }; 34 | config = { 35 | description = lib.mkDefault config.summary; 36 | renderedCondition = config.condition (renderExclusions (exclusionsByHosts.${name} or [])); 37 | }; 38 | }); 39 | 40 | enabledRules = mapAttrs (n: v: removeAttrs v ["enable"]) (filterAttrs (n: v: v.enable) cfg.alertRules); 41 | 42 | renderExclusions = concatMapStringsSep 43 | "," 44 | (host: "instance!~'${host}(:[0-9]+)?'"); 45 | 46 | /* 47 | * List of excluded rules by host names. 48 | * Example: 49 | * { 50 | * "node_down" = [ "example.org" "container.example.org" "router.lan" ]; 51 | * "node_deployed" = [ "example.org" ]; 52 | * } 53 | */ 54 | exclusionsByHosts = foldl 55 | (excludedRules: currentHost: 56 | let 57 | inherit (currentHost.mayflower.monitoring) disabledAlertRules; 58 | 59 | appendChanges' = exclusions: rule: hostNameOrNames: (exclusions.${rule} or []) ++ (toList hostNameOrNames); 60 | appendChanges = appendChanges' excludedRules; 61 | 62 | /* 63 | * Creates a list of changes to append to the exclusion list. 64 | * 65 | * Example: 66 | * mkDelta [ "node_down" "node_deployed" ] "example.org" 67 | * ⇒ 68 | * { 69 | * "node_down" = "example.org"; 70 | * "node_deployed" = "example.org"; 71 | * } 72 | */ 73 | mkDelta = disabledAlertRules: hostName: 74 | listToAttrs 75 | (map (rule: nameValuePair rule hostName) disabledAlertRules); 76 | 77 | /* 78 | * Attrset of excluded rules from a node inside the deployment (later added via `appendChanges`) 79 | * to the final result. 80 | * 81 | * Example: 82 | * Given a node with 83 | * { 84 | * deployment.targetHost = "example.org"; 85 | * mayflower.monitoring.disabledAlertRules = [ "node_up" ]; 86 | * } 87 | * ⇒ 88 | * { 89 | * "node_deployed" = "node_up"; 90 | * } 91 | */ 92 | deltaHost = mkDelta disabledAlertRules currentHost.deployment.targetHost; 93 | 94 | /* 95 | * Attrset of excluded rules for containers on a given host. 96 | * 97 | * Example: 98 | * Given a node with: 99 | * { 100 | * mayflower.monitoring.containerDomains."br-lan" = "lan.example.org"; 101 | * containers.foo = { 102 | * hostBridge = "br-lan"; 103 | * config = { 104 | * networking.hostName = "container"; 105 | * mayflower.monitoring.disabledAlertRules = [ "node_up" ]; 106 | * }; 107 | * }; 108 | * } 109 | * ⇒ 110 | * { 111 | * "node_up" = [ "foo.lan.example.org" ]; 112 | * } 113 | */ 114 | deltaContainers = foldl 115 | (exclusions: container: 116 | let 117 | inherit (container.config.mayflower.monitoring) disabledAlertRules; 118 | hostName = container.config.networking.hostName 119 | + "." 120 | + currentHost.mayflower.monitoring.containerDomains.${container.hostBridge}; 121 | in exclusions // (mapAttrs (appendChanges' exclusions) (mkDelta disabledAlertRules hostName))) 122 | {} 123 | (attrValues currentHost.containers); 124 | in 125 | excludedRules 126 | // (mapAttrs appendChanges deltaHost) 127 | // (mapAttrs appendChanges deltaContainers)) 128 | {} 129 | (attrValues config.mayflower.machines); 130 | in { 131 | options.mayflower.monitoring.server = { 132 | alertRules = mkOption { 133 | type = types.attrsOf alertRuleModule; 134 | }; 135 | # A bit hacky, but better than not making it configurable at all I guess. 136 | filesystemFilter = mkOption { 137 | type = types.str; 138 | default = ''fstype!="ramfs",device!="rpc_pipefs",device!="lxcfs",device!="nsfs",device!="borgfs"''; 139 | }; 140 | }; 141 | options.mayflower.monitoring.disabledAlertRules = mkOption { 142 | type = types.listOf types.str; 143 | default = []; 144 | description = mdDoc '' 145 | Can be set on any node in the deployment. Ensures that 146 | each alert rule inside this list doesn't fire for that node. 147 | ''; 148 | }; 149 | config = { 150 | mayflower.monitoring.server.alertRules = import ./alert-rules.nix { inherit (cfg) filesystemFilter; }; 151 | services.prometheus.ruleFiles = singleton (pkgs.writeText "prometheus-rules.yml" (builtins.toJSON { 152 | groups = singleton { 153 | name = "mf-alerting-rules"; 154 | rules = flip mapAttrsToList enabledRules (name: opts: { 155 | alert = name; 156 | expr = opts.renderedCondition; 157 | for = opts.time; 158 | labels = optionalAttrs opts.page { severity = "page"; }; 159 | annotations = { 160 | inherit (opts) summary; 161 | } // optionalAttrs (opts.description != null) { inherit (opts) description; }; 162 | }); 163 | }; 164 | })); 165 | }; 166 | } 167 | -------------------------------------------------------------------------------- /modules/monitoring/blackbox-exporter.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, ... }: 2 | 3 | with lib; 4 | 5 | let 6 | cfg = config.mayflower.monitoring.blackboxExporter; 7 | in { 8 | options = { 9 | mayflower.monitoring.blackboxExporter = { 10 | enable = mkEnableOption (mdDoc "Mayflower Monitoring Blackbox Exporter"); 11 | staticBlackboxHttpTargets = mkOption { 12 | type = types.listOf types.str; 13 | default = []; 14 | description = mdDoc ""; 15 | }; 16 | staticBlackboxHttpsTargets = mkOption { 17 | type = types.listOf types.str; 18 | default = []; 19 | description = mdDoc ""; 20 | }; 21 | staticBlackboxIcmpTargets = mkOption { 22 | type = types.listOf types.str; 23 | default = []; 24 | description = mdDoc ""; 25 | }; 26 | staticBlackboxTcpTargets = mkOption { 27 | type = types.listOf types.str; 28 | default = []; 29 | description = mdDoc ""; 30 | }; 31 | checkIP6 = mkOption { 32 | type = types.bool; 33 | default = true; 34 | description = mdDoc ""; 35 | }; 36 | }; 37 | }; 38 | config = mkIf cfg.enable { 39 | systemd.services.prometheus-blackbox-exporter.serviceConfig.LimitNOFILE = 1024000; 40 | services.prometheus.exporters.blackbox = { 41 | enable = true; 42 | configFile = let 43 | if6 = name: if cfg.checkIP6 then name else null; 44 | in pkgs.writeText "blackbox-exporter.yaml" (builtins.toJSON { 45 | modules = { 46 | http_2xx = { 47 | prober = "http"; 48 | timeout = "5s"; 49 | }; 50 | https_2xx = { 51 | prober = "http"; 52 | timeout = "5s"; 53 | http = { 54 | fail_if_not_ssl = true; 55 | }; 56 | }; 57 | tcp_v4 = { 58 | prober = "tcp"; 59 | timeout = "5s"; 60 | tcp = { 61 | preferred_ip_protocol = "ip4"; 62 | }; 63 | }; 64 | ${if6 "tcp_v6"} = { 65 | prober = "tcp"; 66 | timeout = "5s"; 67 | tcp = { 68 | preferred_ip_protocol = "ip6"; 69 | }; 70 | }; 71 | icmp_v4 = { 72 | prober = "icmp"; 73 | timeout = "5s"; 74 | icmp = { 75 | preferred_ip_protocol = "ip4"; 76 | }; 77 | }; 78 | ${if6 "icmp_v6"} = { 79 | prober = "icmp"; 80 | timeout = "5s"; 81 | icmp = { 82 | preferred_ip_protocol = "ip6"; 83 | }; 84 | }; 85 | }; 86 | }); 87 | }; 88 | }; 89 | } 90 | -------------------------------------------------------------------------------- /modules/monitoring/default.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, ... }: 2 | 3 | with lib; 4 | 5 | let 6 | cfg = config.mayflower.monitoring; 7 | hostName = name: machine: machine.deployment.targetHost or 8 | "${machine.networking.hostName}.${machine.containerDomains.${machine.hostBridge}}"; 9 | hostNames = hosts: mapAttrsToList hostName hosts; 10 | 11 | #" machine config attrs -> { containerName = container machine config // hostBridge // containerDomains } 12 | containersOfMachine = m: filterAttrs (_: c: c.hostBridge != null) (flip mapAttrs m.containers (_: c: 13 | c.config // { hostBridge = c.hostBridge; containerDomains = m.mayflower.monitoring.containerDomains; } 14 | )); 15 | 16 | # All hosts in the same datacenter as this host 17 | allMachinesSameDC = optionalAttrs (cfg.datacenter != null) ( 18 | flip filterAttrs config.mayflower.machines (_: v: cfg.datacenter == v.mayflower.monitoring.datacenter)); 19 | allHostsSameDC = fold mergeAttrs allMachinesSameDC (mapAttrsToList (_: machine: containersOfMachine machine) allMachinesSameDC); 20 | allHostNamesSameDC = hostNames allHostsSameDC; 21 | 22 | allMachines = config.mayflower.machines; 23 | allHosts = fold mergeAttrs allMachines (mapAttrsToList (_: machine: containersOfMachine machine) allMachines); 24 | allHostNames = hostNames allHosts; 25 | 26 | alertmanagerHostNames = hostNames (flip filterAttrs allHosts (_: m: 27 | m.mayflower.monitoring.server.enable && m.mayflower.monitoring.server.enableAlertmanagerMeshing 28 | )); 29 | prometheusHostNamesSameDC = hostNames (flip filterAttrs allHostsSameDC (_: m: 30 | m.services.prometheus.enable 31 | )); 32 | prometheusHostNamesOtherDC = hostNames (flip filterAttrs allHosts (n: m: 33 | m.services.prometheus.enable && !(elem (hostName n m) prometheusHostNamesSameDC) 34 | )); 35 | grafanaHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 36 | m.services.grafana.enable 37 | )); 38 | unboundHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 39 | m.services.prometheus.exporters.unbound.enable 40 | )); 41 | nginxExporterHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 42 | m.services.prometheus.exporters.nginx.enable 43 | )); 44 | unifiExporterHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 45 | m.services.prometheus.exporters.unifi.enable 46 | )); 47 | unifiPollerExporterHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 48 | m.services.prometheus.exporters.unpoller.enable 49 | )); 50 | fritzboxExporterHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 51 | m.services.prometheus.exporters.fritzbox.enable 52 | )); 53 | nginxSSLVhosts = flatten (flip mapAttrsToList allHosts (_: m: 54 | optionals m.services.nginx.enable ( 55 | attrNames (flip filterAttrs m.services.nginx.virtualHosts (_: vh: 56 | vh.forceSSL || vh.addSSL || vh.onlySSL 57 | )) 58 | ) 59 | )); 60 | postfixExporterHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 61 | m.services.prometheus.exporters.postfix.enable 62 | )); 63 | dovecotExporterHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 64 | m.services.prometheus.exporters.dovecot.enable 65 | )); 66 | dockerRunnerHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 67 | m.mayflower.docker-runner.enable 68 | )); 69 | rspamdHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 70 | m.services.rspamd.enable 71 | )); 72 | mailExporterHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 73 | m.services.prometheus.exporters.mail.enable 74 | )); 75 | matrixSynapseHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 76 | m.mayflower.matrix.enable 77 | )); 78 | nextcloudExporterHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 79 | m.services.prometheus.exporters.nextcloud.enable 80 | )); 81 | zfsExporterHostNames = hostNames (flip filterAttrs allHostsSameDC (_: m: 82 | machineSupportsFileSystemZfs m 83 | )); 84 | 85 | extraScrapeConfigsSameDC = foldAttrs (esc: acc: acc//esc) {} (flip mapAttrsToList allHostsSameDC ( 86 | _: m: m.mayflower.monitoring.extraScrapeConfigs 87 | )); 88 | 89 | mkScrapeConfigs = configs: flip mapAttrsToList configs (k: v: 90 | let 91 | static_configs = flip map v.hostNames (name: { 92 | targets = [ "${name}:${toString v.port}" ]; 93 | labels.alias = name; 94 | }); 95 | in 96 | (mkIf (static_configs != []) ({ 97 | inherit static_configs; 98 | job_name = k; 99 | scrape_interval = "30s"; 100 | } // (removeAttrs v [ "hostNames" "port" ])))); 101 | 102 | mkBlackboxConfig = { hostname, module, targets, interval ? "60s" }: 103 | { 104 | job_name = "blackbox_${module}_${hostname}"; 105 | scrape_interval = interval; 106 | metrics_path = "/probe"; 107 | params = { 108 | module = singleton module; 109 | }; 110 | static_configs = [ 111 | { 112 | inherit targets; 113 | labels = { 114 | source = hostname; 115 | }; 116 | } 117 | ]; 118 | relabel_configs = [ 119 | { 120 | source_labels = [ "__address__" ]; 121 | regex = "(.*)(:80)?"; 122 | target_label = "__param_target"; 123 | replacement = "\${1}"; 124 | } 125 | { 126 | source_labels = [ "__param_target" ]; 127 | target_label = "instance"; 128 | } 129 | { 130 | source_labels = []; 131 | target_label = "__address__"; 132 | replacement = "${hostname}:9115"; 133 | } 134 | ]; 135 | }; 136 | 137 | mkSNMPConfig = { hostname, module, targets, timeout, interval ? "25s" }: let 138 | to_int_seconds = string: (list: ( 139 | number: unit: {"s" = 1; "m" = 60; "h" = 3600;}.${unit} * (lib.toInt number)) 140 | (builtins.elemAt list 0) (builtins.elemAt list 1) 141 | ) (builtins.match "([0-9]+)(.)" string); 142 | interval_int = to_int_seconds interval; 143 | interval_sec = "${toString interval_int}s"; 144 | timeout_int = to_int_seconds timeout; 145 | timeout_sec = "${toString timeout_int}s"; 146 | in { 147 | job_name = "snmp_${module}"; 148 | scrape_interval = if interval_int < timeout_int then timeout_sec else interval_sec; 149 | scrape_timeout = timeout_sec; 150 | metrics_path = "/snmp"; 151 | params.module = [module]; 152 | static_configs = [{ 153 | inherit targets; 154 | labels.source = hostname; 155 | }]; 156 | relabel_configs = [ 157 | { source_labels = [ "__address__" ]; target_label = "__param_target"; } 158 | { source_labels = [ "__param_target" ]; target_label = "instance"; } 159 | { target_label = "__address__"; replacement = "${hostname}:9116"; } 160 | ]; 161 | }; 162 | 163 | 164 | mountsFileSystemType = fsType: {} != filterAttrs (n: v: v.fsType == fsType) config.fileSystems; 165 | 166 | machineSupportsFileSystemZfs = config: config.boot.supportedFilesystems.zfs or false; 167 | supportsFileSystemZfs = machineSupportsFileSystemZfs config; 168 | 169 | in { 170 | imports = [ 171 | ./blackbox-exporter.nix 172 | ./snmp-exporter.nix 173 | ./smartmon-textfile.nix 174 | ./alerting.nix 175 | ]; 176 | options = { 177 | # extends base nginx.virtualHosts 178 | services.nginx.virtualHosts = mkOption { 179 | type = with types; attrsOf (submodule { 180 | options.expectedStatusCode = mkOption { 181 | type = types.int; 182 | description = mdDoc '' 183 | HTTP Status Code expected at / on the virtual Host. 184 | ''; 185 | }; 186 | }); 187 | }; 188 | 189 | mayflower.monitoring = { 190 | containerDomains = mkOption { 191 | type = types.attrsOf types.str; 192 | description = mdDoc '' 193 | Map of bridge names - assigned to containers - 194 | to domains appended to the container name to monitor them. 195 | ''; 196 | }; 197 | 198 | datacenter = mkOption { 199 | type = types.nullOr types.str; 200 | default = null; 201 | description = mdDoc '' 202 | Domain in which this node is located. 203 | ''; 204 | }; 205 | 206 | extraScrapeConfigs = mkOption { 207 | type = types.attrsOf types.attrs; 208 | default = {}; 209 | example = literalExample '' 210 | { 211 | confluence = { 212 | hostNames = [ "confluence.foo.bar" ]; 213 | port = 8090; 214 | metrics_path = "/plugins/servlet/prometheus/metrics"; 215 | params = { 216 | token = [ "verySecretAccessToken" ]; 217 | }; 218 | }; 219 | } 220 | ''; 221 | description = mdDoc '' 222 | Custom scrape configs added to the prometheus instances in the same datacenter. 223 | ''; 224 | }; 225 | 226 | server = { 227 | enable = mkEnableOption (mdDoc "Mayflower-oriented monitoring server with prometheus"); 228 | 229 | configurePrometheusAlertmanagers = mkOption { 230 | type = types.bool; 231 | default = true; 232 | description = mdDoc '' 233 | Automatically add all alertmanagers handled by this module to prometheus. 234 | ''; 235 | }; 236 | 237 | enableAlertmanagerMeshing = mkOption { 238 | type = types.bool; 239 | default = true; 240 | description = mdDoc "Add this host to the cluster peers of every other host"; 241 | }; 242 | 243 | alertmanagerExtraPeers = mkOption { 244 | type = types.listOf types.str; 245 | default = []; 246 | description = mdDoc "List of additional cluster peers"; 247 | }; 248 | 249 | alertmanagerPageReceiver = mkOption { 250 | type = types.attrs; 251 | default = {}; 252 | description = mdDoc "Receiver settings for alerts with severity page"; 253 | }; 254 | 255 | alertmanagerReceiver = mkOption { 256 | type = types.attrs; 257 | default = {}; 258 | description = mdDoc "Receiver settings for all alerts"; 259 | }; 260 | 261 | blackboxExporterHosts = mkOption { 262 | type = types.listOf types.str; 263 | default = []; 264 | description = mdDoc "Hostnames of blackboxExporter instances"; 265 | }; 266 | }; 267 | }; 268 | }; 269 | 270 | config = mkMerge [ 271 | { 272 | 273 | services.nginx.statusPage = true; 274 | services.prometheus.exporters = { 275 | nginx = { 276 | enable = config.services.nginx.enable; 277 | openFirewall = config.services.nginx.enable; 278 | }; 279 | postfix = { 280 | enable = config.services.postfix.enable; 281 | showqPath = "/var/lib/postfix/queue/public/showq"; 282 | systemd.enable = true; 283 | }; 284 | dovecot = { 285 | enable = config.services.dovecot2.enable; 286 | group = "dovecot2"; 287 | socketPath = "/var/run/dovecot2/old-stats"; 288 | scopes = [ "user" "global" ]; 289 | }; 290 | node = { 291 | enable = true; 292 | openFirewall = true; 293 | extraFlags = [ "--collector.disable-defaults" ]; 294 | enabledCollectors = [ 295 | "arp" 296 | "bcache" 297 | "conntrack" 298 | "filefd" 299 | "logind" 300 | "netclass" 301 | "netdev" 302 | "netstat" 303 | "sockstat" 304 | "softnet" 305 | "stat" 306 | "systemd" 307 | "textfile" 308 | "textfile.directory /run/prometheus-node-exporter" 309 | "thermal_zone" 310 | "time" 311 | "udp_queues" 312 | "uname" 313 | "vmstat" 314 | ] ++ optionals (!config.boot.isContainer) [ 315 | "cpu" 316 | "cpufreq" 317 | "diskstats" 318 | "edac" 319 | "entropy" 320 | "filesystem" 321 | "hwmon" 322 | "interrupts" 323 | "ksmd" 324 | "loadavg" 325 | "meminfo" 326 | "pressure" 327 | "timex" 328 | ] ++ ( 329 | optionals (config.services.nfs.server.enable) [ "nfsd" ] 330 | ) ++ ( 331 | optionals ("" != config.boot.swraid.mdadmConf) [ "mdadm" ] 332 | ) ++ ( 333 | optionals ({} != config.networking.bonds) [ "bonding" ] 334 | ) ++ ( 335 | optionals (mountsFileSystemType "nfs") [ "nfs" ] 336 | ) ++ ( 337 | optionals (mountsFileSystemType "xfs") [ "xfs" ] 338 | ) ++ ( 339 | optionals (supportsFileSystemZfs) [ "zfs" ] 340 | ); 341 | }; 342 | nextcloud = { 343 | enable = config.services.nextcloud.enable; 344 | openFirewall = config.services.nextcloud.enable; 345 | url = "https://${config.services.nextcloud.hostName}"; 346 | }; 347 | zfs = { 348 | enable = supportsFileSystemZfs; 349 | openFirewall = supportsFileSystemZfs; 350 | }; 351 | }; 352 | } 353 | (mkIf cfg.server.enable (mkMerge [ 354 | { 355 | systemd.services.alertmanager.serviceConfig.LimitNOFILE = 1024000; 356 | services.prometheus.alertmanager = { 357 | enable = true; 358 | clusterPeers = optionals cfg.server.enableAlertmanagerMeshing ( 359 | alertmanagerHostNames ++ cfg.server.alertmanagerExtraPeers 360 | ); 361 | configuration = { 362 | route = { 363 | receiver = "default"; 364 | routes = [ 365 | { group_by = [ "alertname" "alias" ]; 366 | group_wait = "5s"; 367 | group_interval = "2m"; 368 | repeat_interval = "2h"; 369 | match = { severity = "page"; }; 370 | receiver = "page"; 371 | } 372 | { group_by = [ "alertname" ]; 373 | group_wait = "5s"; 374 | group_interval = "2m"; 375 | repeat_interval = "2h"; 376 | match_re = { metric = ".+"; }; 377 | receiver = "page"; 378 | } 379 | { group_by = [ "alertname" "alias" ]; 380 | group_wait = "30s"; 381 | group_interval = "2m"; 382 | repeat_interval = "6h"; 383 | receiver = "all"; 384 | } 385 | ]; 386 | }; 387 | receivers = [ 388 | ({ name = "page"; } // cfg.server.alertmanagerPageReceiver) 389 | ({ name = "all"; } // cfg.server.alertmanagerReceiver) 390 | { name = "default"; } 391 | ]; 392 | }; 393 | }; 394 | systemd.services.prometheus.serviceConfig.LimitNOFILE = 1024000; 395 | services.prometheus = { 396 | enable = true; 397 | scrapeConfigs = (mkScrapeConfigs ({ 398 | prometheus = { 399 | hostNames = prometheusHostNamesSameDC; 400 | port = 9090; 401 | }; 402 | alertmanager = { 403 | hostNames = prometheusHostNamesSameDC; 404 | port = 9093; 405 | }; 406 | unbound = { 407 | hostNames = unboundHostNames; 408 | port = 9167; 409 | }; 410 | node = { 411 | hostNames = allHostNamesSameDC; 412 | port = 9100; 413 | }; 414 | nginx = { 415 | hostNames = nginxExporterHostNames; 416 | port = 9113; 417 | }; 418 | fritz = { 419 | hostNames = fritzboxExporterHostNames; 420 | port = 9133; 421 | }; 422 | unpoller = { 423 | hostNames = unifiPollerExporterHostNames; 424 | port = 9130; 425 | }; 426 | postfix = { 427 | hostNames = postfixExporterHostNames; 428 | port = 9154; 429 | }; 430 | dovecot = { 431 | hostNames = dovecotExporterHostNames; 432 | port = 9166; 433 | }; 434 | docker-runner = { 435 | hostNames = dockerRunnerHostNames; 436 | port = 9055; 437 | }; 438 | grafana = { 439 | hostNames = grafanaHostNames; 440 | port = 3000; 441 | }; 442 | rspamd = { 443 | hostNames = rspamdHostNames; 444 | port = 80; 445 | # Assumes that `localhost:11334` is exposed via `https://hostname/rspamd`. 446 | metrics_path = "/rspamd/metrics"; 447 | }; 448 | mail = { 449 | hostNames = mailExporterHostNames; 450 | port = 9225; 451 | }; 452 | synapse = { 453 | hostNames = matrixSynapseHostNames; 454 | port = 9092; 455 | }; 456 | nextcloud = { 457 | hostNames = nextcloudExporterHostNames; 458 | port = 9205; 459 | }; 460 | zfs = { 461 | hostNames = zfsExporterHostNames; 462 | port = 9134; 463 | }; 464 | } // extraScrapeConfigsSameDC)) ++ 465 | (flip concatMap cfg.server.blackboxExporterHosts (hostname: 466 | let withIpVersions = name: ["${name}_v4"] ++ optional cfg.blackboxExporter.checkIP6 "${name}_v6"; in 467 | (forEach (withIpVersions "icmp") (module: (mkBlackboxConfig 468 | { 469 | inherit hostname module; 470 | targets = (hostNames allHostsSameDC) 471 | ++ cfg.blackboxExporter.staticBlackboxIcmpTargets 472 | ++ prometheusHostNamesOtherDC; 473 | } 474 | ))) ++ 475 | (flip map (withIpVersions "tcp") (module: (mkBlackboxConfig 476 | { 477 | inherit hostname module; 478 | targets = cfg.blackboxExporter.staticBlackboxTcpTargets; 479 | } 480 | ))) ++ 481 | [(mkBlackboxConfig 482 | { 483 | inherit hostname; 484 | module = "https_2xx"; 485 | targets = (filter (n: n != "_" && n != "localhost") 486 | nginxSSLVhosts ++ cfg.blackboxExporter.staticBlackboxHttpsTargets); 487 | interval = "50s"; 488 | }) 489 | (mkBlackboxConfig 490 | { 491 | inherit hostname; 492 | module = "http_2xx"; 493 | targets = cfg.blackboxExporter.staticBlackboxHttpTargets; 494 | interval = "50s"; 495 | } 496 | )] 497 | )) ++ 498 | (forEach (attrNames cfg.snmpExporter.modules) (module: mkSNMPConfig { 499 | hostname = "localhost"; 500 | inherit module; 501 | inherit (cfg.snmpExporter.modules.${module}) targets timeout; 502 | }) 503 | ); 504 | }; 505 | } 506 | (mkIf cfg.server.configurePrometheusAlertmanagers { 507 | services.prometheus = { 508 | alertmanagers = singleton { 509 | static_configs = singleton { 510 | targets = flip map alertmanagerHostNames (n: "${n}:9093"); 511 | }; 512 | }; 513 | }; 514 | }) 515 | ]) 516 | )]; 517 | } 518 | -------------------------------------------------------------------------------- /modules/monitoring/smartmon-textfile.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, ... }: 2 | 3 | let 4 | prometheus-community-scripts = pkgs.fetchFromGitHub rec { 5 | owner = "prometheus-community"; 6 | repo = "node-exporter-textfile-collector-scripts"; 7 | rev = "71a86190cef829a8d4fdadf86bda811a5b1c04a1"; 8 | sha256 = "0xw77yk6a3mqbwjrrwpjvcsjy5nfljbfzkiridhm4masldx1hllr"; 9 | }; 10 | 11 | smartmon-script = "${prometheus-community-scripts}/smartmon.py"; 12 | metrics-file = "/run/prometheus-node-exporter/prometheus_smartmon.prom"; 13 | prometheus-group = config.systemd.services.prometheus-node-exporter.serviceConfig.Group; 14 | in 15 | { 16 | config = lib.mkIf (!config.boot.isContainer && !config.mayflower.kvm.enable) { 17 | systemd = { 18 | timers = { 19 | prometheus-smartmon-textfile = { 20 | enable = true; 21 | after = [ "prometheus-node-exporter.service" ]; 22 | bindsTo = [ "prometheus-node-exporter.service" ]; 23 | wantedBy = [ "timers.target" ]; 24 | timerConfig = { 25 | OnStartupSec = "60s"; 26 | OnUnitActiveSec = "15m"; 27 | Unit = "prometheus-smartmon-textfile.service"; 28 | }; 29 | }; 30 | }; 31 | tmpfiles.rules = [ 32 | "f ${metrics-file} 0640 root ${prometheus-group}" 33 | ]; 34 | services = { 35 | prometheus-smartmon-textfile = { 36 | enable = true; 37 | after = [ "prometheus-node-exporter.service" ]; 38 | bindsTo = [ "prometheus-node-exporter.service" ]; 39 | path = [ pkgs.smartmontools ]; 40 | script = '' 41 | ${pkgs.python3}/bin/python ${smartmon-script} | ${pkgs.moreutils}/bin/sponge ${metrics-file} 42 | ''; 43 | serviceConfig = { 44 | Type = "oneshot"; 45 | }; 46 | }; 47 | }; 48 | }; 49 | }; 50 | } 51 | -------------------------------------------------------------------------------- /modules/monitoring/snmp-exporter.nix: -------------------------------------------------------------------------------- 1 | { config, options, lib, pkgs, ... }: 2 | let 3 | cfg = config.mayflower.monitoring.snmpExporter; 4 | inherit (lib) mkEnableOption mkOption types; 5 | 6 | freeformType = (pkgs.formats.json {}).type; 7 | 8 | moduleType = types.submodule { 9 | options = { 10 | targets = mkOption { 11 | type = types.listOf types.str; 12 | description = lib.mdDoc "List of targets that should be scraped with this module."; 13 | }; 14 | walk = mkOption { 15 | type = types.listOf types.str; 16 | description = lib.mdDoc "List of OIDs to walk through in this module."; 17 | }; 18 | timeout = mkOption { 19 | type = types.str; 20 | default = "5s"; 21 | description = lib.mdDoc "Timeout for each individual SNMP request, defaults to 5s."; 22 | }; 23 | max_repetitions = mkOption { 24 | type = types.int; 25 | default = 25; 26 | description = lib.mdDoc '' 27 | How many objects to request with GET/GETBULK, defaults to 25. 28 | May need to be reduced for buggy devices. 29 | See https://github.com/prometheus/snmp_exporter/blob/v0.24.1/generator/README.md#file-format 30 | ''; 31 | }; 32 | lookups = mkOption { 33 | type = freeformType; 34 | description = lib.mdDoc '' 35 | Optional list of lookups to perform. 36 | See https://github.com/prometheus/snmp_exporter/blob/v0.24.1/generator/README.md#file-format 37 | ''; 38 | }; 39 | overrides = mkOption { 40 | type = freeformType; 41 | description = lib.mdDoc '' 42 | Allows for per-module overrides of bits of MIBs 43 | See https://github.com/prometheus/snmp_exporter/blob/v0.24.1/generator/README.md#file-format 44 | ''; 45 | }; 46 | }; 47 | }; 48 | in { 49 | options.mayflower.monitoring.snmpExporter = { 50 | enable = mkEnableOption (lib.mdDoc "Mayflower SNMP exporter"); 51 | extraMIBs = mkOption { 52 | type = types.listOf types.path; 53 | description = lib.mdDoc "List of directories to be added to the MIB search path."; 54 | }; 55 | auths = mkOption { 56 | type = freeformType; 57 | default = {}; 58 | description = lib.mdDoc "Auth configuration, see https://github.com/prometheus/snmp_exporter/blob/v0.24.1/generator/README.md#file-format"; 59 | }; 60 | modules = mkOption { 61 | type = types.attrsOf moduleType; 62 | default = {}; 63 | description = lib.mdDoc "SNMP exporter modules to define and use."; 64 | }; 65 | }; 66 | config = lib.mkIf cfg.enable { 67 | services.prometheus.exporters.snmp = { 68 | enable = true; 69 | configurationPath = pkgs.prometheus-snmp-exporter-generator { 70 | inherit (cfg) extraMIBs; 71 | config.auths = cfg.auths; 72 | config.modules = lib.mapAttrs (_: lib.flip builtins.removeAttrs ["targets" "interval"]) cfg.modules; 73 | }; 74 | }; 75 | }; 76 | } 77 | -------------------------------------------------------------------------------- /modules/opsdroid.nix: -------------------------------------------------------------------------------- 1 | { config, pkgs, lib, ... }: 2 | 3 | with lib; 4 | 5 | let 6 | cfg = config.services.opsdroid; 7 | env = pkgs.opsdroid.python.withPackages (ps: 8 | let skills = cfg.skills ps; in 9 | [ pkgs.opsdroid ] ++ skills ++ flatten (map (skill: (skill.extraDeps or (_: [])) ps) skills) 10 | ); 11 | configFile = format.generate "opsdroid-config.yaml" cfg.settings; 12 | format = pkgs.formats.json { }; 13 | in { 14 | options.services.opsdroid = { 15 | enable = mkEnableOption (mdDoc "opsdroid chatbot"); 16 | 17 | settings = mkOption { 18 | type = types.submodule { 19 | freeformType = format.type; 20 | options = { 21 | web.host = mkOption { 22 | type = types.str; 23 | default = "::1"; 24 | description = mdDoc '' 25 | Host opsdroid should listen to. 26 | ''; 27 | }; 28 | web.port = mkOption { 29 | type = types.port; 30 | default = 8080; 31 | description = mdDoc '' 32 | Port opsdroid should listen to. 33 | ''; 34 | }; 35 | }; 36 | }; 37 | description = mdDoc "configuration.yaml as json"; 38 | example = literalExample 39 | '' 40 | { 41 | connectors.matrix = { 42 | mxid = "@opsdroid:matrix.example.org"; 43 | password = "$password"; # to be substituted via `environmentFile` 44 | homeserver = "https://matrix.example.org"; 45 | nick = "dr01d"; 46 | rooms.main = "#notifications:matrix.example.org"; 47 | }; 48 | databases.sqlite.path = "/var/lib/opsdroid/opsdroid.db"; 49 | skills.atlassian.module = "atlassian"; # see example of `skills` 50 | } 51 | ''; 52 | }; 53 | 54 | skills = mkOption { 55 | default = (ps: []); 56 | type = types.functionTo (types.listOf types.package); 57 | description = mdDoc '' 58 | Python packages to be used for opsdroid. 59 | ''; 60 | example = literalExample 61 | '' 62 | [ 63 | (ps.buildPythonPackage { 64 | name = "opsdroid-atlassian"; 65 | src = pkgs.fetchFromGitHub { 66 | owner = "mayflower"; 67 | repo = "err-atlassian"; 68 | rev = "94877938a0ed21fc4d851a9dd38ce6e48f757ab2"; 69 | sha256 = "16vmxjvq8x4141abwr8lx69hhgh5hkvxx6sqcwhp866r5h8argkz"; 70 | }; 71 | format = "other"; 72 | passthru.extraDeps = (ps: with ps; [ markdown jira tlslite-ng oauth2 ]); 73 | installPhase = ''' 74 | install -vD *.py -t $out/''${ps.python.sitePackages}/atlassian 75 | '''; 76 | }) 77 | ] 78 | ''; 79 | }; 80 | 81 | environmentFile = mkOption { 82 | type = types.nullOr types.path; 83 | default = null; 84 | description = mdDoc '' 85 | File containing secrets to be substitued via `envsubst`. 86 | ''; 87 | }; 88 | }; 89 | 90 | config = lib.mkIf cfg.enable { 91 | systemd.services.opsdroid = { 92 | wantedBy = ["multi-user.target"]; 93 | environment.HOME = "/var/lib/opsdroid"; 94 | script = '' 95 | ${pkgs.envsubst}/bin/envsubst -o /run/opsdroid/configuration.yaml -i ${configFile} 96 | exec ${env}/bin/opsdroid start -f /run/opsdroid/configuration.yaml 97 | ''; 98 | serviceConfig = { 99 | DynamicUser = true; 100 | EnvironmentFile = mkIf (cfg.environmentFile != null) cfg.environmentFile; 101 | Restart = "always"; 102 | RuntimeDirectory = "opsdroid"; 103 | RuntimeDirectoryMode = "0750"; 104 | StateDirectory = "opsdroid"; 105 | }; 106 | }; 107 | }; 108 | } 109 | -------------------------------------------------------------------------------- /modules/prometheus-federation.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, ... }: 2 | 3 | let 4 | cfg = config.mayflower.prometheusFederation; 5 | 6 | inherit (lib.lists) 7 | forEach 8 | singleton; 9 | 10 | inherit (lib.modules) 11 | mkIf 12 | mkMerge; 13 | 14 | inherit (lib.options) 15 | mkEnableOption 16 | mkOption; 17 | 18 | inherit (lib.strings) 19 | optionalString; 20 | 21 | inherit (lib) 22 | types mdDoc; 23 | 24 | inherit (config.mayflower.wireguard.lib.star) 25 | extractConfigForNetwork 26 | extractRegularHostConfigsForNetwork; 27 | 28 | getScrapeTargets = forEach (extractRegularHostConfigsForNetwork cfg.networkName) ( 29 | hostConfig: let 30 | networkConfig = extractConfigForNetwork hostConfig cfg.networkName; 31 | in 32 | networkConfig.wireguard.inTunnelHostname 33 | ); 34 | 35 | prometheusServiceConfig = scrapeTargets: { 36 | scrapeConfigs = singleton { 37 | job_name = "federate"; 38 | scrape_interval = "120s"; 39 | honor_labels = true; 40 | metrics_path = "/federate"; 41 | params = { 42 | "match[]" = [ 43 | "{__name__!~\"go.*\",job!=\"\"}" 44 | ]; 45 | }; 46 | static_configs = singleton { 47 | targets = forEach scrapeTargets (hostName: hostName + ":9090"); 48 | }; 49 | }; 50 | alertmanagers = singleton { 51 | static_configs = singleton { 52 | targets = forEach scrapeTargets (hostName: hostName + ":9093"); 53 | }; 54 | }; 55 | alertmanager.clusterPeers = scrapeTargets; 56 | }; 57 | in 58 | { 59 | options.mayflower.prometheusFederation = { 60 | enable = mkEnableOption (mdDoc "the mayflower prometheus federation module"); 61 | 62 | networkName = mkOption { 63 | type = types.str; 64 | default = "prom"; 65 | }; 66 | 67 | isServer = mkEnableOption (mdDoc "config for the central prometheus instance"); 68 | }; 69 | 70 | config = mkIf cfg.enable { 71 | 72 | mayflower.wireguard = { 73 | enable = true; 74 | star.${cfg.networkName} = mkMerge [ 75 | { 76 | inherit (cfg) isServer; 77 | enable = true; 78 | centralPeerHostname = "prometheus-main"; 79 | } 80 | (mkIf (!cfg.isServer) { 81 | wireguard.inTunnelHostname = 82 | "${config.networking.hostName}-${config.mayflower.monitoring.datacenter}"; 83 | }) 84 | ]; 85 | }; 86 | 87 | networking.firewall = { 88 | extraCommands = '' 89 | ip46tables -A nixos-fw -i wg-${cfg.networkName} -p tcp --dport 9093 -m comment --comment "alertmanager mayflower.prometheus-federation" -j ACCEPT 90 | ip46tables -A nixos-fw -i wg-${cfg.networkName} -p tcp --dport 9094 -m comment --comment "alertmanager-mesh mayflower.prometheus-federation" -j ACCEPT 91 | '' + optionalString (!cfg.isServer) '' 92 | ip46tables -A nixos-fw -i wg-${cfg.networkName} -p tcp --dport 9090 -m comment --comment "prometheus mayflower.prometheus-federation" -j ACCEPT 93 | ''; 94 | }; 95 | 96 | services = mkMerge [ 97 | (mkIf cfg.isServer { 98 | prometheus = prometheusServiceConfig getScrapeTargets; 99 | }) 100 | (mkIf (!cfg.isServer) { 101 | prometheus.alertmanagers = singleton { 102 | static_configs = singleton { 103 | targets = [ 104 | "${config.mayflower.wireguard.star.${cfg.networkName}.centralPeerHostname}:9093" 105 | ]; 106 | }; 107 | }; 108 | }) 109 | ]; 110 | 111 | sops.secrets = { 112 | "wireguard-${cfg.networkName}-privatekey" = {}; 113 | }; 114 | 115 | mayflower.monitoring.server = mkIf (!cfg.isServer) { 116 | alertmanagerExtraPeers = [ 117 | config.mayflower.wireguard.star.${cfg.networkName}.centralPeerHostname 118 | ]; 119 | }; 120 | 121 | }; 122 | } 123 | -------------------------------------------------------------------------------- /modules/service-overview.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, config, ... }: 2 | 3 | with lib; 4 | 5 | let 6 | cfg = config.mayflower.serviceOverview; 7 | services = fold mergeAttrs {} (mapAttrsToList (_: c: 8 | c.mayflower.serviceOverview.services 9 | ) config.mayflower.machines); 10 | in 11 | { 12 | options = { 13 | mayflower.serviceOverview = { 14 | enable = mkOption { 15 | type = types.bool; 16 | default = false; 17 | description = mdDoc '' 18 | Generate the service overview and serve it on this host. 19 | ''; 20 | }; 21 | host = mkOption { 22 | type = types.str; 23 | example = "services.example.com"; 24 | description = mdDoc '' 25 | Name of the nginx vhost. 26 | ''; 27 | }; 28 | services = mkOption { 29 | type = types.attrsOf (types.submodule { 30 | options = { 31 | address = mkOption { 32 | type = types.str; 33 | example = "https://example.service"; 34 | description = mdDoc '' 35 | Address of the provided service. 36 | ''; 37 | }; 38 | description = mkOption { 39 | type = types.str; 40 | example = "Very helpful service"; 41 | description = mdDoc '' 42 | A short description of the provided service. 43 | ''; 44 | }; 45 | status = mkOption { 46 | type = types.enum [ 47 | "alpha" "beta" "production" "deprecated" "obsolete" 48 | ]; 49 | description = mdDoc '' 50 | Specify the support status for the provided service. 51 | ''; 52 | }; 53 | }; 54 | }); 55 | description = mdDoc "Set of services provided by this host."; 56 | example = literalExample ''{ 57 | exampleService = { 58 | address = "https://example.service"; 59 | description = "Very helpful service"; 60 | status = "beta"; 61 | }; 62 | }''; 63 | }; 64 | }; 65 | }; 66 | 67 | config = mkIf cfg.enable { 68 | services.nginx.enable = mkDefault true; 69 | services.nginx.virtualHosts."${cfg.host}" = { 70 | locations."/" = { 71 | index = "index.html"; 72 | root = pkgs.serviceOverview.override { inherit services; }; 73 | }; 74 | }; 75 | }; 76 | } 77 | -------------------------------------------------------------------------------- /modules/sops-extension.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, inputs, ... }: 2 | 3 | let 4 | globalCfg = config; 5 | 6 | inherit (globalCfg.sops) secretsBaseDir; 7 | 8 | inherit (lib) types mdDoc; 9 | inherit (lib.modules) mkIf mkMerge; 10 | inherit (lib.options) mkDefault mkOption; 11 | 12 | associatedServiceSubModule = types.submodule ( 13 | { config, ... }: 14 | let 15 | subCfg = config; 16 | in 17 | { 18 | options = { 19 | associatedService = mkOption { 20 | type = types.nullOr types.str; 21 | default = null; 22 | example = "grafana"; 23 | description = mdDoc '' 24 | Specify the name of the systemd unit from which to inherit User and Group 25 | for file permission. 26 | ''; 27 | }; 28 | relKeyPath = mkOption { 29 | type = types.nullOr types.str; 30 | default = null; 31 | description = mdDoc '' 32 | Path to the directory containg the secret file, relative to "secrets/" 33 | in the deployment directory. 34 | By default a secret with name "example" for host "server" will 35 | be expected to be in "secrets/server/secrets.sops.yaml". 36 | If the secret is needed for a declarative container named "cont" on the 37 | same host, the expected path would be "secrets/server/cont/secrets.sops.yaml" 38 | ''; 39 | }; 40 | }; 41 | config = mkMerge [ 42 | { mode = lib.mkDefault "0440"; } 43 | (mkIf (subCfg.associatedService != null) { 44 | owner = globalCfg.systemd.services.${subCfg.associatedService}.serviceConfig.User; 45 | group = globalCfg.systemd.services.${subCfg.associatedService}.serviceConfig.Group; 46 | }) 47 | (mkIf (subCfg.relKeyPath != null) { 48 | sopsFile = "${secretsBaseDir}/${subCfg.relKeyPath}/secrets.sops.yaml"; 49 | }) 50 | ]; 51 | } 52 | ); 53 | in 54 | 55 | { 56 | options.sops = { 57 | secrets = mkOption { 58 | type = types.attrsOf associatedServiceSubModule; 59 | }; 60 | secretsBaseDir = mkOption { 61 | type = types.path; 62 | description = mdDoc '' 63 | Path to the secrets directory containing one subdirectory for each host. 64 | ''; 65 | }; 66 | defaultRelKeyPath = mkOption { 67 | default = config.networking.hostName; 68 | defaultText = "config.networking.hostName"; 69 | type = types.str; 70 | description = mdDoc '' 71 | `relKeyPath`, but for all secrets. 72 | ''; 73 | }; 74 | }; 75 | 76 | config.sops = { 77 | defaultSopsFile = lib.mkDefault ( 78 | "${secretsBaseDir}/${config.sops.defaultRelKeyPath}/secrets.sops.yaml" 79 | ); 80 | age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ]; 81 | }; 82 | } 83 | -------------------------------------------------------------------------------- /modules/wireguard.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, nodes, ... }: 2 | 3 | let 4 | cfg = config.mayflower.wireguard; 5 | 6 | inherit (lib.attrsets) 7 | mapAttrs' 8 | mapAttrsToList; 9 | 10 | inherit (lib.options) 11 | mkOption 12 | mkEnableOption; 13 | 14 | inherit (lib.modules) 15 | mkMerge 16 | mkIf; 17 | 18 | inherit (lib.strings) 19 | concatStrings; 20 | 21 | inherit (lib.lists) 22 | concatMap 23 | flatten 24 | head 25 | length 26 | optional 27 | optionals; 28 | 29 | inherit (lib) 30 | types 31 | mdDoc; 32 | 33 | # TODO assertion for network name -> wg-... (max 15B, case sensitive, utf-8, no /, no WS) 34 | # TODO maybe support wg PSKs 35 | # TODO maybe make opening UDP port configurable 36 | 37 | wireguardEndpointOptions = with types; { 38 | hostname = mkOption { 39 | type = nullOr str; 40 | default = null; 41 | }; 42 | ip4 = mkOption { 43 | type = nullOr str; 44 | default = null; 45 | }; 46 | ip6 = mkOption { 47 | type = nullOr str; 48 | default = null; 49 | }; 50 | listenPort = mkOption { 51 | type = nullOr port; 52 | }; 53 | }; 54 | 55 | assertionsForEndpoints = networks: flatten (mapAttrsToList (networkName: networkConf: 56 | let 57 | endpointConf = networkConf.wireguard.endpoint; 58 | in 59 | optionals (endpointConf !=null) [ { 60 | assertion = ((endpointConf.ip4 != null && endpointConf.ip6 != null) -> endpointConf.hostname != null); 61 | message = '' 62 | Error in config for network '${networkName}': 63 | Both IPv4 and IPv6 endpoint addresses are defined, but no hostname for /etc/hosts 64 | ''; 65 | } { 66 | assertion = (endpointConf.ip4 != null || endpointConf.ip6 != null || endpointConf.hostname != null); 67 | message = '' 68 | Error in config for network '${networkName}': 69 | None of IPv4, IPv6 endpoint addresses or hostname are defined. 70 | ''; 71 | } 72 | ]) networks); 73 | 74 | /* construct host port combination from non-null endpointConf */ 75 | endpointStringFromConf = endpointConf: 76 | if endpointConf.hostname != null 77 | then "${endpointConf.hostname}:${toString endpointConf.listenPort}" 78 | else concatStrings [ 79 | "${if endpointConf.ip4 != null then endpointConf.ip4 else endpointConf.ip6}" 80 | ":${toString endpointConf.port}" 81 | ]; 82 | 83 | /* construct legacy peer config from module-specific wireguard config */ 84 | legacyPeerConfigFromConf = wireguardConf: 85 | let 86 | endpointConf = wireguardConf.endpoint; 87 | in mkMerge [ 88 | { 89 | inherit (wireguardConf) persistentKeepalive publicKey; 90 | allowedIPs = [ 91 | "${wireguardConf.tunnelIPv4Address}/32" 92 | "${wireguardConf.tunnelIPv6Address}/128" 93 | ]; 94 | } 95 | (mkIf (endpointConf != null) { 96 | endpoint = endpointStringFromConf endpointConf; 97 | }) 98 | ]; 99 | 100 | /* construct networkd peer config from module-specific wireguard config */ 101 | peerConfigFromConf = wireguardConf: 102 | let 103 | endpointConf = wireguardConf.endpoint; 104 | in { 105 | wireguardPeerConfig = { 106 | PersistentKeepalive = wireguardConf.persistentKeepalive; 107 | PublicKey = wireguardConf.publicKey; 108 | AllowedIPs = [ 109 | "${wireguardConf.tunnelIPv4Address}/32" 110 | "${wireguardConf.tunnelIPv6Address}/128" 111 | ]; 112 | Endpoint = mkIf (endpointConf != null) (endpointStringFromConf endpointConf); 113 | }; 114 | }; 115 | 116 | topologies = { 117 | /* definitions for the star network topology */ 118 | star = { 119 | /* helper functions for star network topology */ 120 | util = rec { 121 | /* extract module-specific config for host in network */ 122 | extractConfigForNetwork = hostConfig: networkName: 123 | hostConfig.mayflower.wireguard.star.${networkName}; 124 | 125 | /* Filter hosts & declarative containers from a set of nodes if the node's 126 | config matches `cond` (i.e. `cond config == true`). */ 127 | _extractHostConfigs = cond: 128 | concatMap 129 | ({ config, ... }: optional (cond config) config 130 | ++ concatMap 131 | ({ config, ... }: optional (cond config) config) 132 | (builtins.attrValues config.containers)) 133 | (builtins.attrValues nodes); 134 | 135 | /* extract list of all configs for hosts in this network */ 136 | extractRegularHostConfigsForNetwork = networkName: _extractHostConfigs 137 | (host: 138 | host.mayflower.wireguard.star.${networkName}.enable or false 139 | && ! (host.mayflower.wireguard.star.${networkName}.isServer or false)); 140 | 141 | /* helper function for centralHostConfigFornetwork */ 142 | _extractCentralHostConfigsForNetwork = networkName: _extractHostConfigs 143 | (host: 144 | host.mayflower.wireguard.star.${networkName}.enable or false 145 | && host.mayflower.wireguard.star.${networkName}.isServer or false); 146 | 147 | /* extract config of central host in this network */ 148 | centralHostConfigForNetwork = networkName: 149 | let 150 | centralHosts = (_extractCentralHostConfigsForNetwork networkName); 151 | in 152 | # there is exactly one central host with star topology 153 | assert (length centralHosts == 1); 154 | (head centralHosts); 155 | 156 | /* generate list of all peer attrsets for this host and this network 157 | helper function for _legacyWireguardConfigForNetwork */ 158 | _legacyWireguardPeersForNetwork = networkName: networkConfig: if networkConfig.isServer 159 | then (map (peerHostConfig: 160 | legacyPeerConfigFromConf (extractConfigForNetwork peerHostConfig networkName).wireguard 161 | ) (extractRegularHostConfigsForNetwork networkName)) 162 | else let 163 | centralHostConfig = centralHostConfigForNetwork networkName; 164 | in 165 | [ 166 | (legacyPeerConfigFromConf (extractConfigForNetwork centralHostConfig networkName).wireguard) 167 | ]; 168 | 169 | /* generate wireguard config for this host and this network 170 | helper function for legacyWireguardConfigs */ 171 | _legacyWireguardConfigForNetwork = networkName: networkConfig: mkMerge [ 172 | { 173 | inherit (networkConfig.wireguard) privateKeyFile; 174 | ips = [ 175 | "${networkConfig.wireguard.tunnelIPv4Address}/32" 176 | "${networkConfig.wireguard.tunnelIPv6Address}/128" 177 | ]; 178 | peers = (_legacyWireguardPeersForNetwork networkName networkConfig); 179 | } 180 | (mkIf (networkConfig.wireguard.endpoint != null && networkConfig.wireguard.endpoint.listenPort != null) { 181 | inherit (networkConfig.wireguard.endpoint) listenPort; 182 | }) 183 | (mkIf (networkConfig.wireguard.fwMark != null) { 184 | postSetup = '' 185 | wg set "$DEVICE" fwmark ${toString networkConfig.wireguard.fwMark} 186 | ''; 187 | }) 188 | ]; 189 | 190 | /* generate legacy config for all wireguard interfaces on this host */ 191 | legacyWireguardConfigs = networks: mapAttrs' (networkName: networkConfig: { 192 | name = "wg-${networkName}"; 193 | value = _legacyWireguardConfigForNetwork networkName networkConfig; 194 | }) networks; 195 | 196 | /* generate list of all peer attrsets for this host and this network 197 | helper function for wireguardConfigs */ 198 | _wireguardPeersForNetwork = networkName: networkConfig: if networkConfig.isServer 199 | then (map (peerHostConfig: 200 | peerConfigFromConf (extractConfigForNetwork peerHostConfig networkName).wireguard 201 | ) (extractRegularHostConfigsForNetwork networkName)) 202 | else let 203 | centralHostConfig = centralHostConfigForNetwork networkName; 204 | in [ 205 | (peerConfigFromConf (extractConfigForNetwork centralHostConfig networkName).wireguard) 206 | ]; 207 | 208 | /* generate list of address configs for wireguard interfaces for this host and this network 209 | helper function for wireguardConfigs */ 210 | _wireguardAddressConfigForNetwork = networkName: networkConfig: if networkConfig.isServer 211 | then [ 212 | { addressConfig = { Address = "${networkConfig.wireguard.tunnelIPv4Address}/32"; }; } 213 | { addressConfig = { Address = "${networkConfig.wireguard.tunnelIPv6Address}/128"; }; } 214 | ] 215 | else let 216 | centralHostConfig = centralHostConfigForNetwork networkName; 217 | centralWGConfig = (extractConfigForNetwork centralHostConfig networkName).wireguard; 218 | in [ 219 | { addressConfig = { Address = "${networkConfig.wireguard.tunnelIPv4Address}/32"; Peer = "${centralWGConfig.tunnelIPv4Address}/32"; }; } 220 | { addressConfig = { Address = "${networkConfig.wireguard.tunnelIPv6Address}/128"; Peer = "${centralWGConfig.tunnelIPv6Address}/128"; }; } 221 | ]; 222 | 223 | /* generate list of route configs for wireguard interfaces for this host and this network 224 | (only used for the 'server' host in star topology) 225 | helper function for wireguardConfigs */ 226 | _wireguardRouteConfigForNetwork = networkName: networkConfig: flatten (map (peerHostConfig: 227 | let 228 | peerWGConfig = (extractConfigForNetwork peerHostConfig networkName).wireguard; 229 | in [ 230 | { routeConfig = { Destination = peerWGConfig.tunnelIPv4Address; Scope = "link"; }; } 231 | { routeConfig = { Destination = peerWGConfig.tunnelIPv6Address; }; } 232 | ] 233 | ) (extractRegularHostConfigsForNetwork networkName)); 234 | 235 | 236 | /* generate systemd-networkd config for all wireguard interfaces on this host */ 237 | wireguardConfigs = networks: { 238 | netdevs = mapAttrs' (networkName: networkConfig: { 239 | name = "30-wg-${networkName}"; 240 | value = { 241 | netdevConfig = { 242 | Description = "WireGuard interface for ${networkName} star network"; 243 | Name = "wg-${networkName}"; 244 | Kind = "wireguard"; 245 | }; 246 | wireguardConfig = mkMerge [ 247 | { 248 | # FIXME: instead of hard-coding /run/credentials/ we should use ${CREDENTIALS_DIRECTORY} 249 | # but this can at this time not be used in networkd config files 250 | # and the variable is also not available during ExecStartPre 251 | PrivateKeyFile = "/run/credentials/systemd-networkd.service/wireguard-${networkName}-privatekey"; 252 | } 253 | (mkIf (networkConfig.wireguard.endpoint != null && networkConfig.wireguard.endpoint.listenPort != null) { 254 | ListenPort = networkConfig.wireguard.endpoint.listenPort; 255 | }) 256 | (mkIf (networkConfig.wireguard.fwMark != null) { 257 | FirewallMark = networkConfig.wireguard.fwMark; 258 | }) 259 | ]; 260 | wireguardPeers = _wireguardPeersForNetwork networkName networkConfig; 261 | }; 262 | }) networks; 263 | networks = mapAttrs' (networkName: networkConfig: { 264 | name = "30-wg-${networkName}"; 265 | value = mkMerge [ 266 | { 267 | matchConfig.Name = "wg-${networkName}"; 268 | addresses = _wireguardAddressConfigForNetwork networkName networkConfig; 269 | } 270 | (mkIf networkConfig.isServer { 271 | routes = _wireguardRouteConfigForNetwork networkName networkConfig; 272 | }) 273 | ]; 274 | }) networks; 275 | }; 276 | 277 | /* generate list of mappings from ID to the path of the private key file of each network 278 | for this host. See systemd.exec(5) for LoadCredentials */ 279 | wireguardLoadCredentials = networks: mapAttrsToList (networkName: networkConfig: 280 | "wireguard-${networkName}-privatekey:${networkConfig.wireguard.privateKeyFile}" 281 | ) networks; 282 | 283 | extractPortsToOpenInFirewall = networks: flatten (mapAttrsToList (_: networkConfig: 284 | if networkConfig.enable && networkConfig.wireguard.endpoint != null 285 | then networkConfig.wireguard.endpoint.listenPort 286 | else [] 287 | ) networks); 288 | 289 | /* helper function for extraHostsForEndpoints */ 290 | _extraHostsFromConfigsForNetwork = hostConfigs: networkName: flatten (map (hostConfig: let 291 | hostEndpoint = (extractConfigForNetwork hostConfig networkName).wireguard.endpoint; 292 | in 293 | optionals (hostEndpoint != null && hostEndpoint.hostname != null) ( 294 | (optional (hostEndpoint.ip4 != null) { 295 | name = hostEndpoint.ip4; 296 | value = [ hostEndpoint.hostname ]; 297 | }) 298 | ++ 299 | (optional (hostEndpoint.ip6 != null) { 300 | name = hostEndpoint.ip6; 301 | value = [ hostEndpoint.hostname ]; 302 | }) 303 | ) 304 | ) hostConfigs); 305 | 306 | /* generate entries for /etc/hosts for peers which specified both hostname and addresses */ 307 | extraHostsForEndpoints = networks: mkMerge (builtins.attrValues (builtins.mapAttrs (networkName: networkConfig: ( 308 | builtins.listToAttrs (_extraHostsFromConfigsForNetwork ( 309 | if networkConfig.isServer 310 | then (extractRegularHostConfigsForNetwork networkName) 311 | else (_extractCentralHostConfigsForNetwork networkName) 312 | ) networkName) 313 | )) networks )); 314 | 315 | 316 | /* generate entries for /etc/hosts for tunnel addresses of central peer on all hosts per network */ 317 | centralHostEntries = networks: builtins.listToAttrs (flatten (mapAttrsToList (networkName: networkConfig: let 318 | centralHostConfig = centralHostConfigForNetwork networkName; 319 | centralHostNetworkConfig = extractConfigForNetwork centralHostConfig networkName; 320 | in [ 321 | { name = centralHostNetworkConfig.wireguard.tunnelIPv4Address; value = [ networkConfig.centralPeerHostname ];} 322 | { name = centralHostNetworkConfig.wireguard.tunnelIPv6Address; value = [ networkConfig.centralPeerHostname ];} 323 | ]) networks) 324 | ); 325 | 326 | /* generate entries for /etc/hosts for tunnel addresses of regular peers on central host per network */ 327 | regularHostEntries = networks: builtins.listToAttrs (flatten (mapAttrsToList (networkName: networkConfig: 328 | let 329 | regularHostConfigs = extractRegularHostConfigsForNetwork networkName; 330 | generateHostEntries = (regularHostConfig: 331 | let 332 | regularHostNetworkConfig = extractConfigForNetwork regularHostConfig networkName; 333 | inTunnelHostname = regularHostNetworkConfig.wireguard.inTunnelHostname; 334 | in 335 | optionals (inTunnelHostname != null) [ 336 | { name = regularHostNetworkConfig.wireguard.tunnelIPv4Address; value = [ inTunnelHostname ];} 337 | { name = regularHostNetworkConfig.wireguard.tunnelIPv6Address; value = [ inTunnelHostname ];} 338 | ]); 339 | in 340 | if networkConfig.isServer then map generateHostEntries regularHostConfigs else [] 341 | ) networks )); 342 | 343 | assertionsForNetworks = networks: flatten (mapAttrsToList (networkName: networkConfig: [ 344 | { 345 | assertion = let 346 | centralHostHasEndpoint = (extractConfigForNetwork (centralHostConfigForNetwork networkName) networkName).wireguard.endpoint != null; 347 | allRegularHostsHaveEndpoints = builtins.all (hostConfig: 348 | ((extractConfigForNetwork hostConfig networkName).wireguard.endpoint != null) 349 | ) (extractRegularHostConfigsForNetwork networkName); 350 | in (centralHostHasEndpoint || allRegularHostsHaveEndpoints); 351 | message = '' 352 | Error in config for network '${networkName}': 353 | mayflower.wireguard.star.${networkName}.wireguard.endpoint needs to be defined 354 | for all regular hosts in this network if it is not defined for the central host. 355 | ''; 356 | } 357 | { 358 | assertion = networkConfig.isServer -> ( 359 | networkConfig.wireguard.inTunnelHostname == null 360 | || networkConfig.wireguard.inTunnelHostname == networkConfig.centralPeerHostname 361 | ); 362 | message = '' 363 | Error in config for network '${networkName}': 364 | If mayflower.wireguard.star.${networkName}.isServer is true 365 | mayflower.wireguard.star.${networkName}.wireguard.inTunnelHostname must either be null 366 | or equal to mayflower.wireguard.star.${networkName}.centralPeerHostname. 367 | ''; 368 | } 369 | ]) networks); 370 | }; 371 | 372 | /* submodule options for star network topology */ 373 | options = name: { 374 | enable = mkEnableOption (mdDoc "this network with star topology"); 375 | isServer = mkEnableOption (mdDoc "central host in this network"); 376 | centralPeerHostname = mkOption { 377 | type = types.str; 378 | default = "${name}-server"; 379 | description = mdDoc '' 380 | Hostname that should resolve to the in-tunnel addresses of the central host of this network. 381 | Entries in /etc/hosts are created on each regular host in this network. 382 | ''; 383 | }; 384 | wireguard = { 385 | endpoint = mkOption { 386 | type = types.nullOr (types.submodule { 387 | options = wireguardEndpointOptions; 388 | }); 389 | default = null; 390 | }; 391 | privateKeyFile = mkOption { 392 | type = types.str; 393 | default = config.sops.secrets."wireguard-${name}-privatekey".path; 394 | }; 395 | publicKey = mkOption { 396 | type = types.str; 397 | }; 398 | tunnelIPv4Address = mkOption { 399 | type = types.str; 400 | }; 401 | tunnelIPv6Address = mkOption { 402 | type = types.str; 403 | }; 404 | persistentKeepalive = mkOption { 405 | type = types.nullOr types.int; 406 | default = 10; 407 | }; 408 | inTunnelHostname = mkOption { 409 | type = types.nullOr types.str; 410 | default = null; 411 | description = mdDoc '' 412 | Hostname that should resolve to the in-tunnel addresses of this host in this network. 413 | Entries in /etc/hosts are created on the central host of this network. 414 | Note that configuring this option for the central host has no effect on regular hosts. 415 | ''; 416 | }; 417 | fwMark = mkOption { 418 | type = types.nullOr (types.ints.between 1 4294967295); 419 | default = null; 420 | description = mdDoc '' 421 | Sets a firewall mark on outgoing WireGuard packets from this interface. 422 | ''; 423 | }; 424 | }; 425 | }; 426 | }; 427 | }; 428 | in 429 | { 430 | options.mayflower.wireguard = { 431 | enable = mkEnableOption (mdDoc "mayflower wireguard network module"); 432 | 433 | lib = { 434 | star = { 435 | extractConfigForNetwork = mkOption { 436 | type = types.functionTo types.unspecified; 437 | default = topologies.star.util.extractConfigForNetwork; 438 | readOnly = true; 439 | }; 440 | extractRegularHostConfigsForNetwork = mkOption { 441 | type = types.functionTo (types.listOf types.attrs); 442 | default = topologies.star.util.extractRegularHostConfigsForNetwork; 443 | readOnly = true; 444 | }; 445 | }; 446 | }; 447 | 448 | star = mkOption { 449 | type = types.attrsOf (types.submodule ({ name, ... }: { 450 | options = topologies.star.options name; 451 | })); 452 | default = {}; 453 | }; 454 | /* TODO add more topologies 455 | fullMesh = mkOption {}; 456 | pointToPoint ... 457 | */ 458 | }; 459 | 460 | config = mkIf cfg.enable { 461 | assertions = (assertionsForEndpoints cfg.star) 462 | ++ (topologies.star.util.assertionsForNetworks cfg.star); 463 | 464 | networking.hosts = mkMerge [ 465 | (topologies.star.util.extraHostsForEndpoints cfg.star) 466 | (topologies.star.util.centralHostEntries cfg.star) 467 | (topologies.star.util.regularHostEntries cfg.star) 468 | ]; 469 | 470 | systemd.network = mkIf config.networking.useNetworkd ( 471 | topologies.star.util.wireguardConfigs cfg.star 472 | ); 473 | 474 | systemd.services.systemd-networkd.serviceConfig = mkIf config.networking.useNetworkd { 475 | LoadCredential = topologies.star.util.wireguardLoadCredentials cfg.star; 476 | }; 477 | 478 | networking.wireguard.interfaces = mkIf (!config.networking.useNetworkd) ( 479 | topologies.star.util.legacyWireguardConfigs cfg.star 480 | ); 481 | 482 | networking.firewall.allowedUDPPorts = ( 483 | topologies.star.util.extractPortsToOpenInFirewall cfg.star 484 | ); 485 | 486 | environment.systemPackages = optional config.networking.useNetworkd pkgs.wireguard-tools; 487 | }; 488 | } 489 | -------------------------------------------------------------------------------- /overlay.nix: -------------------------------------------------------------------------------- 1 | self: super: 2 | 3 | { 4 | python3 = super.python3.override { packageOverrides = import ./pkgs/python-packages.nix; }; 5 | 6 | mailmanPackages = super.mailmanPackages.extend (_: mailmanSuper: { 7 | postorius = mailmanSuper.postorius.overrideAttrs ({ patches ? [], ... }: { 8 | patches = patches ++ [ 9 | ./pkgs/postorius_users_can_create_lists.patch 10 | ]; 11 | }); 12 | 13 | python3 = mailmanSuper.python3.override { 14 | overlay = pythonSelf: pythonSuper: { 15 | django-allauth = pythonSuper.django-allauth.overridePythonAttrs ({ patches ? [], ... }: { 16 | # patch is minimally-invasive on purpose, so tests aren't touched in there. 17 | doCheck = false; 18 | patches = patches ++ [ 19 | ./pkgs/python/django-allauth/0001-Automatically-link-social-login-users-against-existi.patch 20 | ./pkgs/python/django-allauth/0002-Implement-superuser-to-oidc-mapping-similar-to-the-a.patch 21 | ./pkgs/python/django-allauth/0003-Prohibit-authentication-against-local-users.patch 22 | ]; 23 | }); 24 | }; 25 | }; 26 | }); 27 | 28 | hagrid = super.callPackage pkgs/hagrid {}; 29 | matrix-alertmanager = super.callPackage pkgs/matrix-alertmanager { }; 30 | serviceOverview = super.callPackage pkgs/service-overview { }; 31 | 32 | # `libxcrypt` is a dependency pretty high up in the tree. So it's hard to determine 33 | # from where the version comes that dovecot gets linked against (i.e. if you add `libxcrypt-legacy` 34 | # to `buildInputs` it's not sufficient for instance). 35 | # Also, patching out every possible occurrence is pretty error-prone if internal structures 36 | # of nixpkgs change. So instead, it's way simpler (implementation-wise, not for our Hydra) 37 | # to just instantiate a new nixpkgs with `libxcrypt` supporting weak hashes. 38 | # 39 | # Rather than investing more energy into a potentially nicer workaround, we should fix 40 | # the unterlying problem instead anyways. 41 | dovecot = (import self.path { 42 | inherit (self.stdenv) system; 43 | overlays = [ 44 | (_: _: { libxcrypt = self.libxcrypt-legacy; }) 45 | ]; 46 | }).dovecot.override { 47 | withPgSQL = true; 48 | }; 49 | postfix = super.postfix.override { withPgSQL = true; }; 50 | 51 | bitwarden_rs = super.bitwarden_rs.overrideAttrs (oldAttrs: { 52 | postPatch = (oldAttrs.postPatch or "") + '' 53 | substituteInPlace src/api/admin.rs --replace \ 54 | 'let org_name = "bitwarden_rs";' \ 55 | 'let org_name = "Mayflower GmbH";' 56 | ''; 57 | }); 58 | 59 | # https://github.com/prometheus/node_exporter/issues/2849 60 | prometheus-node-exporter = let 61 | version = "unstable-20240201"; 62 | src = super.fetchFromGitHub { 63 | rev = "57de74a5f63feb222d4506afd2e8f384247fc51a"; 64 | owner = "prometheus"; 65 | repo = "node_exporter"; 66 | sha256 = "sha256-4Zed9joc2JfMwkQoxk32hWuPa6L6OzQfx8IcyUKh+dE="; 67 | }; 68 | in (super.prometheus-node-exporter.override { 69 | buildGoModule = args: super.buildGoModule.override {} (args // { 70 | inherit src version; 71 | vendorHash = "sha256-HIDfRaDoI2lrY7ru43mSipCTabLCasS77l0P6d5ltko="; 72 | }); 73 | }); 74 | 75 | prometheus-snmp-exporter = let 76 | version = "0.24.1"; 77 | src = super.fetchFromGitHub { 78 | owner = "prometheus"; 79 | repo = "snmp_exporter"; 80 | rev = "v${version}"; 81 | sha256 = "sha256-DFphnztS3JX5xmoKB3VVzAP26d9PeCFWyDHHs2Yi+gA="; 82 | }; 83 | in (super.prometheus-snmp-exporter.override { 84 | buildGoModule = args: super.buildGoModule.override {} (args // { 85 | inherit src version; 86 | vendorHash = "sha256-tf+FIc39a5J05LHCEHs17rkwPdc0SujNm8OV7hsfK3U="; 87 | }); 88 | }); 89 | 90 | prometheus-snmp-exporter-generator = super.callPackage ./pkgs/prometheus-snmp-exporter-generator.nix { 91 | prometheus-snmp-exporter = self.prometheus-snmp-exporter; 92 | }; 93 | 94 | defaultGemConfig = super.defaultGemConfig // { 95 | oxidized = (attrs: rec { 96 | tplinkPatch = (super.fetchpatch { 97 | url = "https://patch-diff.githubusercontent.com/raw/ytti/oxidized/pull/1443.diff"; 98 | sha256 = "09dyf1hnxgdxfkh9l6y63qmm1ds5wgb2d52vvrwwc0s4gl0b1yad"; 99 | }); 100 | postInstall = '' 101 | patch -p1 -d $(cat $out/nix-support/gem-meta/install-path) -i ${tplinkPatch} 102 | ''; 103 | }); 104 | }; 105 | 106 | mxisd = super.runCommandNoCC "override-mxisd" { 107 | preferLocalBuild = true; 108 | } '' 109 | mkdir -p $out/bin 110 | ln -vs ${super.ma1sd}/bin/ma1sd $out/bin/mxisd 111 | ''; 112 | 113 | nixosTests = super.nixosTests // { 114 | wireguard-star = self.callPackage ./tests/wireguard-star.nix { }; 115 | }; 116 | } 117 | -------------------------------------------------------------------------------- /pkgs/hagrid/default.nix: -------------------------------------------------------------------------------- 1 | { lib 2 | , fetchFromGitLab 3 | , gettext 4 | , llvmPackages 5 | , nettle 6 | , pkg-config 7 | , rustPlatform 8 | , zsh 9 | , brand ? "keys.mayflower.de" 10 | }: 11 | 12 | rustPlatform.buildRustPackage rec { 13 | pname = "hagrid"; 14 | version = "2023-12-28"; 15 | 16 | src = fetchFromGitLab { 17 | owner = "keys.openpgp.org"; 18 | repo = pname; 19 | rev = "da4665306e501e35f349f9d6c84148b9ff3a0da5"; 20 | sha256 = "sha256-mZyRNOJuAf5I9ybzEZcZZ0smGBKHZidU57fv+kGgYLc="; 21 | }; 22 | 23 | cargoLock = { 24 | lockFile = ./Cargo.lock; 25 | outputHashes = { 26 | "rocket_i18n-0.5.0" = "sha256-EbUE8Z3TQBnDnptl9qWK6JvsACCgP7EXTxcA7pouYbc="; 27 | }; 28 | }; 29 | 30 | cargoBuildFlags = [ "--workspace" ]; 31 | 32 | LIBCLANG_PATH = "${llvmPackages.libclang.lib}/lib"; 33 | 34 | buildInputs = [ nettle ]; 35 | nativeBuildInputs = [ pkg-config rustPlatform.bindgenHook gettext zsh ]; 36 | 37 | patches = [ 38 | ./remove-news-and-stats.patch 39 | ]; 40 | 41 | postPatch = '' 42 | while read -r file ; do 43 | substituteInPlace "$file" --replace ">keys.openpgp.org<" ">${brand}<" 44 | done < <(grep -r '>keys.openpgp.org<' -l) 45 | 46 | zsh ./make-translated-templates 47 | 48 | substituteInPlace src/mail.rs \ 49 | --replace 'SendmailTransport::new()' \ 50 | 'SendmailTransport::new_with_command("/run/wrappers/bin/sendmail")' 51 | ''; 52 | 53 | postInstall = '' 54 | cp -r dist $out/ 55 | cp nginx.conf hagrid-routes.conf Rocket.toml.dist $out/ 56 | ''; 57 | 58 | meta = with lib; { 59 | description = "Hagrid as in, \"keeper of keys\". Verifying OpenPGP keyserver, written in Rust, running on https://keys.openpgp.org"; 60 | homepage = "https://keys.openpgp.org"; 61 | license = licenses.agpl3Only; 62 | maintainers = [ ]; 63 | }; 64 | } 65 | -------------------------------------------------------------------------------- /pkgs/hagrid/remove-news-and-stats.patch: -------------------------------------------------------------------------------- 1 | diff --git a/dist/templates/about/about.html.hbs b/dist/templates/about/about.html.hbs 2 | index 104a821..0b2f30c 100644 3 | --- a/dist/templates/about/about.html.hbs 4 | +++ b/dist/templates/about/about.html.hbs 5 | @@ -1,6 +1,6 @@ 6 | {{#> layout }} 7 |
8 | -

About | News | Usage | FAQ | Stats | Privacy

9 | +

About | Usage | FAQ | Privacy

10 | 11 |

12 | The keys.openpgp.org server is a public service for the 13 | diff --git a/dist/templates/about/api.html.hbs b/dist/templates/about/api.html.hbs 14 | index cc6b7e3..b639626 100644 15 | --- a/dist/templates/about/api.html.hbs 16 | +++ b/dist/templates/about/api.html.hbs 17 | @@ -1,6 +1,6 @@ 18 | {{#> layout }} 19 |

20 | -

About | News | Usage | FAQ | Stats | Privacy

21 | +

About | Usage | FAQ | Privacy

22 | 23 |

24 | Hagrid implements both the legacy HKP interface, as well as our 25 | diff --git a/dist/templates/about/faq.html.hbs b/dist/templates/about/faq.html.hbs 26 | index 411fd8b..642695f 100644 27 | --- a/dist/templates/about/faq.html.hbs 28 | +++ b/dist/templates/about/faq.html.hbs 29 | @@ -1,6 +1,6 @@ 30 | {{#> layout }} 31 |

32 | -

About | News | Usage | FAQ | Stats | Privacy

33 | +

About | Usage | FAQ | Privacy

34 | 35 |

36 | For instructions, see our usage guide. 37 | diff --git a/dist/templates/about/news.html.hbs b/dist/templates/about/news.html.hbs 38 | index 34f996c..d0b319c 100644 39 | --- a/dist/templates/about/news.html.hbs 40 | +++ b/dist/templates/about/news.html.hbs 41 | @@ -1,6 +1,6 @@ 42 | {{#> layout }} 43 |

44 | -

About | News | Usage | FAQ | Stats | Privacy

45 | +

About | News | Usage | FAQ | Privacy

46 | 47 |

48 |
2023-04-28 📅
49 | diff --git a/dist/templates/about/privacy.html.hbs b/dist/templates/about/privacy.html.hbs 50 | index 56158cf..c2376d4 100644 51 | --- a/dist/templates/about/privacy.html.hbs 52 | +++ b/dist/templates/about/privacy.html.hbs 53 | @@ -1,6 +1,6 @@ 54 | {{#> layout }} 55 |
56 | -

About | News | Usage | FAQ | Stats | Privacy

57 | +

About | Usage | FAQ | Privacy

58 | 59 |

Name and contact details

60 |

61 | diff --git a/dist/templates/about/stats.html.hbs b/dist/templates/about/stats.html.hbs 62 | index df3c728..3a12d27 100644 63 | --- a/dist/templates/about/stats.html.hbs 64 | +++ b/dist/templates/about/stats.html.hbs 65 | @@ -1,6 +1,6 @@ 66 | {{#> layout }} 67 |

68 | -

About | News | Usage | FAQ | Stats | Privacy

69 | +

About | Usage | FAQ | Stats | Privacy

70 | 71 |

Verified email addresses

72 | 73 | diff --git a/dist/templates/about/usage.html.hbs b/dist/templates/about/usage.html.hbs 74 | index 2c395f1..9011f24 100644 75 | --- a/dist/templates/about/usage.html.hbs 76 | +++ b/dist/templates/about/usage.html.hbs 77 | @@ -1,6 +1,6 @@ 78 | {{#> layout }} 79 |
80 | -

About | News | Usage | FAQ | Stats | Privacy

81 | +

About | Usage | FAQ | Privacy

82 | 83 |

84 | On this page, we collect information on how to use 85 | diff --git a/templates-translated/de/about/about.html.hbs b/templates-translated/de/about/about.html.hbs 86 | index 184a93d..fbb026d 100644 87 | --- a/templates-translated/de/about/about.html.hbs 88 | +++ b/templates-translated/de/about/about.html.hbs 89 | @@ -1,5 +1,5 @@ 90 |

91 | -

Übersicht | News | Nutzung | FAQ | Statistik | Privacy Policy 92 | +

Übersicht | Nutzung | FAQ | Statistik | Privacy Policy 93 |

94 | 95 |

Der keys.openpgp.org Server ist ein öffentlicher Service für die Verteilung von OpenPGP-Schlüsseln, üblicherweise als "Keyserver" bezeichnet.

96 | diff --git a/templates-translated/de/about/faq.html.hbs b/templates-translated/de/about/faq.html.hbs 97 | index 83eb0b7..4323e25 100644 98 | --- a/templates-translated/de/about/faq.html.hbs 99 | +++ b/templates-translated/de/about/faq.html.hbs 100 | @@ -1,6 +1,6 @@ 101 |
102 |

103 | -Übersicht | News | Nutzung | FAQ | Statistik | Privacy Policy 104 | +Übersicht | Nutzung | FAQ | Statistik | Privacy Policy 105 |

106 | 107 |

Für Details zur Nutzung, siehe Nutzungshinweise.

108 | diff --git a/templates-translated/de/about/stats.html.hbs b/templates-translated/de/about/stats.html.hbs 109 | index eb3e5ae..2cb6189 100644 110 | --- a/templates-translated/de/about/stats.html.hbs 111 | +++ b/templates-translated/de/about/stats.html.hbs 112 | @@ -1,6 +1,6 @@ 113 |
114 |

115 | -Übersicht | News | Nutzung | FAQ | Statistik | Privacy Policy 116 | +Übersicht | Nutzung | FAQ | Statistik | Privacy Policy 117 |

118 | 119 |

Bestätigte Email-Adressen

120 | diff --git a/templates-translated/de/about/usage.html.hbs b/templates-translated/de/about/usage.html.hbs 121 | index 198f61d..5276595 100644 122 | --- a/templates-translated/de/about/usage.html.hbs 123 | +++ b/templates-translated/de/about/usage.html.hbs 124 | @@ -1,6 +1,6 @@ 125 |
126 |

127 | -Übersicht | News | Nutzung | FAQ | Statistik | Privacy Policy 128 | +Übersicht | Nutzung | FAQ | Statistik | Privacy Policy 129 |

130 | 131 |

Auf dieser Seite sammeln wir Anleitungen zur Nutzung von keys.openpgp.org mit unterschiedlichen OpenPGP-Anwendungen. Wir sind noch dabei, weitere Anleitungen hinzuzufügen - falls du eine bestimmte vermisst, lass es uns einfach wissen.

132 | diff --git a/templates-untranslated/about/about.html.hbs b/templates-untranslated/about/about.html.hbs 133 | index d2b8fb0..9a86029 100644 134 | --- a/templates-untranslated/about/about.html.hbs 135 | +++ b/templates-untranslated/about/about.html.hbs 136 | @@ -1,5 +1,5 @@ 137 |
138 | -

About | News | Usage | FAQ | Stats | Privacy

139 | +

About | Usage | FAQ | Privacy

140 | 141 |

142 | The keys.openpgp.org server is a public service for the 143 | diff --git a/templates-untranslated/about/faq.html.hbs b/templates-untranslated/about/faq.html.hbs 144 | index 76e99f9..b1f2294 100644 145 | --- a/templates-untranslated/about/faq.html.hbs 146 | +++ b/templates-untranslated/about/faq.html.hbs 147 | @@ -1,5 +1,5 @@ 148 |

149 | -

About | News | Usage | FAQ | Stats | Privacy

150 | +

About | Usage | FAQ | Privacy

151 | 152 |

153 | For instructions, see our usage guide. 154 | diff --git a/templates-untranslated/about/news.html.hbs b/templates-untranslated/about/news.html.hbs 155 | index c9c10e5..cd5e87e 100644 156 | --- a/templates-untranslated/about/news.html.hbs 157 | +++ b/templates-untranslated/about/news.html.hbs 158 | @@ -1,5 +1,5 @@ 159 |

160 | -

About | News | Usage | FAQ | Stats | Privacy

161 | +

About | News | Usage | FAQ | Privacy

162 | 163 |

164 |
2023-04-28 📅
165 | diff --git a/templates-untranslated/about/privacy.html.hbs b/templates-untranslated/about/privacy.html.hbs 166 | index 415f1b5..4dd2d78 100644 167 | --- a/templates-untranslated/about/privacy.html.hbs 168 | +++ b/templates-untranslated/about/privacy.html.hbs 169 | @@ -1,5 +1,5 @@ 170 |
171 | -

About | News | Usage | FAQ | Stats | Privacy

172 | +

About | Usage | FAQ | Privacy

173 | 174 |

Name and contact details

175 |

176 | diff --git a/templates-untranslated/about/stats.html.hbs b/templates-untranslated/about/stats.html.hbs 177 | index 89bd873..c71d3ed 100644 178 | --- a/templates-untranslated/about/stats.html.hbs 179 | +++ b/templates-untranslated/about/stats.html.hbs 180 | @@ -1,5 +1,5 @@ 181 |

182 | -

About | News | Usage | FAQ | Stats | Privacy

183 | +

About | Usage | FAQ | Stats | Privacy

184 | 185 |

Verified email addresses

186 | 187 | diff --git a/templates-untranslated/about/usage.html.hbs b/templates-untranslated/about/usage.html.hbs 188 | index 8225180..337d5c4 100644 189 | --- a/templates-untranslated/about/usage.html.hbs 190 | +++ b/templates-untranslated/about/usage.html.hbs 191 | @@ -1,5 +1,5 @@ 192 |
193 | -

About | News | Usage | FAQ | Stats | Privacy

194 | +

About | Usage | FAQ | Privacy

195 | 196 |

197 | On this page, we collect information on how to use 198 | -------------------------------------------------------------------------------- /pkgs/matrix-alertmanager/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv, lib, fetchFromGitHub, makeWrapper, nodejs, yarn2nix-moretea }: 2 | 3 | yarn2nix-moretea.mkYarnPackage rec { 4 | pname = "matrix-alertmanager"; 5 | version = "0.1.0"; 6 | 7 | src = fetchFromGitHub { 8 | owner = "jaywink"; 9 | repo = pname; 10 | rev = "v${version}"; 11 | sha256 = "1kik0246zxp4cvznyl9hs142glhspxl9vx548z0ajbbnfrpm3kjn"; 12 | }; 13 | 14 | yarnNix = ./yarn.nix; 15 | yarnLock = ./yarn.lock; 16 | packageJSON = ./package.json; 17 | 18 | dontBuild = true; 19 | dontInstall = true; 20 | 21 | nativeBuildInputs = [ makeWrapper ]; 22 | 23 | distPhase = '' 24 | runHook preDist 25 | 26 | cd deps/matrix-alertmanager 27 | 28 | mkdir -p $out/{bin,lib} 29 | cp -R src/*.js $out 30 | cp -R "$node_modules" $out/lib/node_modules 31 | 32 | cat > $out/bin/matrix-alertmanager < 7 |

8 | {% endif %} 9 | - {% if user.is_superuser %} 10 | + {% if user.is_authenticated %} 11 |
12 | {% comment %} 13 | The span below exists only because d-flex will strech the child elements completely. So, we wrap 14 | them in span so that the width the span increases but the button remains original height. 15 | {% endcomment %} 16 | 17 | - {% if domain_count < 1 %} 18 | + {% if domain_count < 1 and user.is_superuser %} 19 | {% trans 'Create New Domain' %} 20 | {% else %} 21 | {% trans 'Create New List' %} 22 | diff --git a/src/postorius/tests/mailman_api_tests/test_list_new.py b/src/postorius/tests/mailman_api_tests/test_list_new.py 23 | index 4581412..be39a4b 100644 24 | --- a/src/postorius/tests/mailman_api_tests/test_list_new.py 25 | +++ b/src/postorius/tests/mailman_api_tests/test_list_new.py 26 | @@ -40,7 +40,6 @@ class ListCreationTest(ViewTestCase): 27 | self.domain = self.mm_client.create_domain('example.com') 28 | 29 | def test_permission_denied(self): 30 | - self.client.login(username='user', password='pwd') 31 | response = self.client.get(reverse('list_new')) 32 | self.assertEqual(response.status_code, 403) 33 | 34 | diff --git a/src/postorius/views/list.py b/src/postorius/views/list.py 35 | index 0fd0ee0..e99a7ae 100644 36 | --- a/src/postorius/views/list.py 37 | +++ b/src/postorius/views/list.py 38 | @@ -54,7 +54,6 @@ from django_mailman3.signals import ( 39 | from postorius.auth.decorators import ( 40 | list_moderator_required, 41 | list_owner_required, 42 | - superuser_required, 43 | ) 44 | from postorius.auth.mixins import UserShowListMembersMixin 45 | from postorius.forms import ( 46 | @@ -973,7 +972,6 @@ def _get_default_style(): 47 | 48 | 49 | @login_required 50 | -@superuser_required 51 | def list_new(request, template='postorius/lists/new.html'): 52 | """ 53 | Add a new mailing list. 54 | -------------------------------------------------------------------------------- /pkgs/privacyidea/0001-remove-subscription-check.patch: -------------------------------------------------------------------------------- 1 | From a000ec22ebce1e49949375ed446a9169fd5f6a7e Mon Sep 17 00:00:00 2001 2 | From: Maximilian Bosch 3 | Date: Mon, 5 Jun 2023 19:26:09 +0200 4 | Subject: [PATCH] remove subscription check 5 | 6 | --- 7 | privacyidea/lib/subscriptions.py | 42 -------------------------------- 8 | 1 file changed, 42 deletions(-) 9 | 10 | diff --git a/privacyidea/lib/subscriptions.py b/privacyidea/lib/subscriptions.py 11 | index 0003ea68..ceb8309c 100644 12 | --- a/privacyidea/lib/subscriptions.py 13 | +++ b/privacyidea/lib/subscriptions.py 14 | @@ -104,19 +104,6 @@ def subscription_status(component="privacyidea", tokentype=None): 15 | 16 | :return: subscription state 17 | """ 18 | - token_count = get_tokens(assigned=True, active=True, count=True, tokentype=tokentype) 19 | - if token_count <= APPLICATIONS.get(component, 50): 20 | - return 0 21 | - 22 | - subscriptions = get_subscription(component) 23 | - if len(subscriptions) == 0: 24 | - return 1 25 | - 26 | - try: 27 | - check_subscription(component) 28 | - except SubscriptionError as exx: 29 | - log.warning(u"{0}".format(exx)) 30 | - return 2 31 | 32 | return 3 33 | 34 | @@ -259,35 +246,6 @@ def check_subscription(application, max_free_subscriptions=None): 35 | without a subscription file. If not given, the default is used. 36 | :return: bool 37 | """ 38 | - if application.lower() in APPLICATIONS: 39 | - subscriptions = get_subscription(application) or get_subscription( 40 | - application.lower()) 41 | - # get the number of users with active tokens 42 | - token_users = get_users_with_active_tokens() 43 | - free_subscriptions = max_free_subscriptions or APPLICATIONS.get(application.lower()) 44 | - if len(subscriptions) == 0: 45 | - if subscription_exceeded_probability(token_users, free_subscriptions): 46 | - raise SubscriptionError(description="No subscription for your client.", 47 | - application=application) 48 | - else: 49 | - subscription = subscriptions[0] 50 | - expire_date = subscription.get("date_till") 51 | - if expire_date < datetime.datetime.now(): 52 | - # subscription has expired 53 | - if raise_exception_probability(subscription): 54 | - raise SubscriptionError(description="Your subscription " 55 | - "expired.", 56 | - application=application) 57 | - else: 58 | - # subscription is still valid, so check the signature. 59 | - check_signature(subscription) 60 | - allowed_tokennums = subscription.get("num_tokens") 61 | - if subscription_exceeded_probability(token_users, allowed_tokennums): 62 | - # subscription is exceeded 63 | - raise SubscriptionError(description="Too many users " 64 | - "with assigned tokens. " 65 | - "Subscription exceeded.", 66 | - application=application) 67 | 68 | return True 69 | 70 | -- 71 | 2.40.1 72 | 73 | -------------------------------------------------------------------------------- /pkgs/privacyidea/add-description.patch: -------------------------------------------------------------------------------- 1 | diff --git a/privacyidea/static/components/token/views/token.enroll.spass.html b/privacyidea/static/components/token/views/token.enroll.spass.html 2 | index aea3fe15..83735ade 100644 3 | --- a/privacyidea/static/components/token/views/token.enroll.spass.html 4 | +++ b/privacyidea/static/components/token/views/token.enroll.spass.html 5 | @@ -2,3 +2,9 @@ 6 | The Simple Pass Token does not take additional arguments. You only need to 7 | specify a OTP PIN. 8 |

9 | +
10 | + 11 | + 14 | +
15 | diff --git a/privacyidea/static/components/token/views/token.enroll.u2f.html b/privacyidea/static/components/token/views/token.enroll.u2f.html 16 | index 525a49dd..c167bb86 100644 17 | --- a/privacyidea/static/components/token/views/token.enroll.u2f.html 18 | +++ b/privacyidea/static/components/token/views/token.enroll.u2f.html 19 | @@ -3,3 +3,9 @@ 20 | You can register this token with any webservice and with as many web 21 | services you wish to. 22 |

23 | +
24 | + 25 | + 28 | +
29 | -------------------------------------------------------------------------------- /pkgs/prometheus-snmp-exporter-generator.nix: -------------------------------------------------------------------------------- 1 | /* 2 | Use the snmp_exporter's `generator` tool[1] to fill in an snmp.yml 3 | using MIB definitions from librenms. 4 | 5 | Pass an attribute set containing: 6 | - config, an attrset representing `generator.yml` 7 | - (optionally) extraMIBs, a list of paths to include in the Net-SNMP search path 8 | to this function, and it will produce a derivation which in turn 9 | produces `snmp.yml`. 10 | 11 | [1]: https://github.com/prometheus/snmp_exporter/tree/master/generator 12 | */ 13 | { stdenvNoCC, lib, prometheus-snmp-exporter, net-snmp, fetchFromGitHub }: 14 | { config, extraMIBs ? [] }: 15 | 16 | let 17 | librenms_source = fetchFromGitHub { 18 | owner = "librenms"; 19 | repo = "librenms"; 20 | rev = "23.10.0"; 21 | sha256 = "sha256-IssTPxDv1AsIDtxiNGcC2AvTsq8pDHlYNR2c+fCgXlI="; 22 | }; 23 | in stdenvNoCC.mkDerivation { 24 | name = "snmp.yml"; 25 | nativeBuildInputs = [ prometheus-snmp-exporter ]; 26 | configJSON = builtins.toJSON config; 27 | MIBDIRS = lib.concatStringsSep ":" extraMIBs; 28 | buildCommand = '' 29 | echo "$configJSON" > generator.yml 30 | export MIBDIRS="${net-snmp.out}/share/snmp/mibs:${librenms_source}/mibs:$MIBDIRS" 31 | generator generate 32 | mv snmp.yml $out 33 | ''; 34 | } 35 | -------------------------------------------------------------------------------- /pkgs/python-packages.nix: -------------------------------------------------------------------------------- 1 | self: super: { 2 | automx = self.callPackage ./python/automx { }; 3 | colorhash = self.callPackage ./python/colorhash { }; 4 | duckling = self.callPackage ./python/duckling { }; 5 | fbmessenger = self.callPackage ./python/fbmessenger { }; 6 | mattermostwrapper = self.callPackage ./python/mattermostwrapper { }; 7 | rasa-core = self.callPackage ./python/rasa-core { }; 8 | rasa-nlu = self.callPackage ./python/rasa-nlu { }; 9 | sklearn-crfsuite = self.callPackage ./python/sklearn-crfsuite { }; 10 | } 11 | -------------------------------------------------------------------------------- /pkgs/python/automx/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv, lib, buildPythonPackage, fetchFromGitHub, lxml, dateutil }: 2 | 3 | buildPythonPackage rec { 4 | pname = "automx"; 5 | version = "1.1.2"; 6 | 7 | src = fetchFromGitHub { 8 | owner = "sys4"; 9 | repo = "automx"; 10 | rev = "v${version}"; 11 | sha256 = "1wmmsmfkrfxxxsjknj2bd80abfq5agrgfby8k3mfk4j2d6hizxj9"; 12 | }; 13 | 14 | propagatedBuildInputs = [ lxml dateutil ]; 15 | 16 | # no tests 17 | doCheck = false; 18 | 19 | postInstall = '' 20 | mkdir -p $out/share 21 | cp -Rv doc/man $out/share 22 | install -vD src/automx_wsgi.py -t $out/share/automx 23 | cp -Rv src/html $out/share/automx 24 | install -v src/html/index.html.en $out/share/automx/html/index.html 25 | install -vD src/conf/automx.conf* -t $out/etc 26 | ''; 27 | 28 | meta = with lib; { 29 | description = "A mail client account configuration service, combining various autoconfiguration techniques in one webservice"; 30 | homepage = "https://automx.org"; 31 | license = licenses.gpl3Plus; 32 | maintainers = with maintainers; [ globin ]; 33 | }; 34 | } 35 | -------------------------------------------------------------------------------- /pkgs/python/colorhash/default.nix: -------------------------------------------------------------------------------- 1 | { buildPythonPackage, fetchPypi }: 2 | 3 | buildPythonPackage rec { 4 | pname = "colorhash"; 5 | version = "1.0.2"; 6 | 7 | src = fetchPypi { 8 | inherit pname version; 9 | sha256 = "0r777ry4c8d24j7g7p8b95smnfr64liljfc0zvjxcpidv31jbyg0"; 10 | extension = "tar.bz2"; 11 | }; 12 | 13 | #propagatedBuildInputs = [ humanfriendly verboselogs ]; 14 | 15 | doCheck = false; 16 | } 17 | -------------------------------------------------------------------------------- /pkgs/python/django-allauth/0001-Automatically-link-social-login-users-against-existi.patch: -------------------------------------------------------------------------------- 1 | From 323caf1e53bb5d029a02289ba4081ed7600196ad Mon Sep 17 00:00:00 2001 2 | From: Maximilian Bosch 3 | Date: Fri, 11 Aug 2023 17:23:12 +0000 4 | Subject: [PATCH 1/2] Automatically link social login users against existing 5 | users by email 6 | 7 | This patch is tailored for how authentication works against mailman in 8 | our case. Specifically we have two kinds of users that should be allowed 9 | to authenticate: 10 | 11 | * new users that only ever authenticated with OIDC 12 | * existing users that authenticated in the past via LDAP 13 | 14 | The first case is trivial: auto-signup kicks in and the user is set up 15 | correctly. 16 | 17 | The second isn't: by default, a signup form would be opened, but 18 | this one can't be completed because the username isn't unique. To work 19 | around this, this case is caught early on (by checking if the user 20 | already exists in the database) and then linking the social account 21 | to the existing user and also doing a login rather than processing 22 | with the signup. 23 | 24 | In our case this is OK because the logic will only be used in with 25 | our OIDC provider where email addresses are guaranteed to be unique 26 | company-wide. 27 | 28 | Both signup and auto-signup immediately return True because that's 29 | the desired behavior and at least the signup check would otherwise 30 | always return false because our mailman has arbitrary signups 31 | disabled. 32 | 33 | While it's OK to use emails as identifiers here from a security 34 | perspective because we also control the source, it should be noted 35 | that when a user request to change their name (e.g. after marriage), 36 | the entries in the DB must be fixed up manually (though it's worth 37 | noting that mailman's DB had to be touched in this case already). 38 | --- 39 | allauth/socialaccount/adapter.py | 3 ++- 40 | allauth/socialaccount/helpers.py | 25 +++++++++++++++++++++++++ 41 | 2 files changed, 27 insertions(+), 1 deletion(-) 42 | 43 | diff --git a/allauth/socialaccount/adapter.py b/allauth/socialaccount/adapter.py 44 | index c6f6dbd..06e1f35 100644 45 | --- a/allauth/socialaccount/adapter.py 46 | +++ b/allauth/socialaccount/adapter.py 47 | @@ -156,6 +156,7 @@ class DefaultSocialAccountAdapter(object): 48 | ) 49 | 50 | def is_auto_signup_allowed(self, request, sociallogin): 51 | + return True 52 | # If email is specified, check for duplicate and if so, no auto signup. 53 | auto_signup = app_settings.AUTO_SIGNUP 54 | return auto_signup 55 | @@ -167,7 +168,7 @@ class DefaultSocialAccountAdapter(object): 56 | Next to simply returning True/False you can also intervene the 57 | regular flow by raising an ImmediateHttpResponse 58 | """ 59 | - return get_account_adapter(request).is_open_for_signup(request) 60 | + return True 61 | 62 | def get_signup_form_initial_data(self, sociallogin): 63 | user = sociallogin.user 64 | diff --git a/allauth/socialaccount/helpers.py b/allauth/socialaccount/helpers.py 65 | index a2749b2..587ad6a 100644 66 | --- a/allauth/socialaccount/helpers.py 67 | +++ b/allauth/socialaccount/helpers.py 68 | @@ -1,4 +1,5 @@ 69 | from django.contrib import messages 70 | +from django.contrib.auth import get_user_model 71 | from django.forms import ValidationError 72 | from django.http import HttpResponseRedirect 73 | from django.shortcuts import render 74 | @@ -22,6 +23,18 @@ from allauth.socialaccount.models import SocialLogin 75 | from allauth.socialaccount.providers.base import AuthError, AuthProcess 76 | 77 | 78 | +def _find_existing_user_by_email(email): 79 | + """ 80 | + Searches for an existing user by the email, but in contrast to `..utils.email_address_exists` 81 | + this actually returns the user model. 82 | + """ 83 | + email_field = account_settings.USER_MODEL_EMAIL_FIELD 84 | + assert email_field 85 | + users = get_user_model().objects.filter(**{email_field + "__iexact": email}) 86 | + assert len(users) < 2 87 | + return users.first() 88 | + 89 | + 90 | def _process_auto_signup(request, sociallogin): 91 | auto_signup = get_adapter().is_auto_signup_allowed(request, sociallogin) 92 | if not auto_signup: 93 | @@ -29,6 +42,18 @@ def _process_auto_signup(request, sociallogin): 94 | email = user_email(sociallogin.user) 95 | # Let's check if auto_signup is really possible... 96 | if email: 97 | + # Does the user already exist in database (i.e. was created via LDAP in the past?) 98 | + if (existing_user := _find_existing_user_by_email(email)) is not None: 99 | + # when a user (givenname.surname@mayflower.de) exists in the database, it 100 | + # means that the user was authenticated via LDAP in the past. 101 | + # That means we link the existing account with this one. 102 | + sociallogin.connect(request, existing_user) 103 | + signals.social_account_added.send( 104 | + sender=SocialLogin, request=request, sociallogin=sociallogin 105 | + ) 106 | + resp = complete_social_signup(request, sociallogin) 107 | + return True, resp 108 | + 109 | assessment = assess_unique_email(email) 110 | if assessment is True: 111 | # Auto signup is fine. 112 | -------------------------------------------------------------------------------- /pkgs/python/django-allauth/0002-Implement-superuser-to-oidc-mapping-similar-to-the-a.patch: -------------------------------------------------------------------------------- 1 | From 1d1b16066fe3f75a178c9aad324b7200f0ceafa2 Mon Sep 17 00:00:00 2001 2 | From: Maximilian Bosch 3 | Date: Sat, 12 Aug 2023 09:42:55 +0000 4 | Subject: [PATCH 2/2] Implement superuser-to-oidc mapping similar to the 5 | approach from LDAP auth 6 | 7 | With LDAP auth it was possible to define a `superuser group`, i.e. 8 | each member of that group became a superuser in mailman. This patch 9 | replicates this behavior with OIDC authentication: 10 | 11 | * If no `groups`-output is part of the response (i.e. `extra_data`), 12 | nothing happens - as before. 13 | * Otherwise it's checked if the user is member of a hardcoded `admin` 14 | group. And depending on whether or not, the superuser flag in `auth_user` 15 | is true or false. 16 | --- 17 | allauth/socialaccount/helpers.py | 9 +++++++++ 18 | 1 file changed, 9 insertions(+) 19 | 20 | diff --git a/allauth/socialaccount/helpers.py b/allauth/socialaccount/helpers.py 21 | index 587ad6a..00f2e18 100644 22 | --- a/allauth/socialaccount/helpers.py 23 | +++ b/allauth/socialaccount/helpers.py 24 | @@ -252,6 +252,13 @@ def _social_login_redirect(request, sociallogin): 25 | return HttpResponseRedirect(next_url) 26 | 27 | 28 | +def _update_superuser_status(sociallogin): 29 | + user = _find_existing_user_by_email(user_email(sociallogin.user)) 30 | + is_superuser = 'admin' in sociallogin.account.extra_data.get('groups', []) 31 | + user.is_superuser = is_superuser 32 | + user.save() 33 | + 34 | + 35 | def _complete_social_login(request, sociallogin): 36 | if request.user.is_authenticated: 37 | get_account_adapter(request).logout(request) 38 | @@ -259,9 +266,11 @@ def _complete_social_login(request, sociallogin): 39 | record_authentication(request, sociallogin) 40 | # Login existing user 41 | ret = _login_social_account(request, sociallogin) 42 | + _update_superuser_status(sociallogin) 43 | else: 44 | # New social user 45 | ret = _process_signup(request, sociallogin) 46 | + _update_superuser_status(sociallogin) 47 | return ret 48 | 49 | 50 | -------------------------------------------------------------------------------- /pkgs/python/django-allauth/0003-Prohibit-authentication-against-local-users.patch: -------------------------------------------------------------------------------- 1 | From cc2f03583a3feb42467ad0fea92bd731d7efde17 Mon Sep 17 00:00:00 2001 2 | From: Maximilian Bosch 3 | Date: Sat, 12 Aug 2023 14:56:59 +0200 4 | Subject: [PATCH 3/3] Prohibit authentication against local users 5 | 6 | To avoid confusion: users are only supposed to authenticate themselves 7 | via OIDC. 8 | --- 9 | allauth/account/forms.py | 2 ++ 10 | 1 file changed, 2 insertions(+) 11 | 12 | diff --git a/allauth/account/forms.py b/allauth/account/forms.py 13 | index 5bfe86b..b9fb343 100644 14 | --- a/allauth/account/forms.py 15 | +++ b/allauth/account/forms.py 16 | @@ -179,6 +179,7 @@ class LoginForm(forms.Form): 17 | return ret 18 | 19 | def clean(self): 20 | + raise exceptions.PermissionDenied() 21 | super(LoginForm, self).clean() 22 | if self._errors: 23 | return 24 | @@ -200,6 +201,7 @@ class LoginForm(forms.Form): 25 | return self.cleaned_data 26 | 27 | def login(self, request, redirect_url=None): 28 | + raise exceptions.PermissionDenied() 29 | credentials = self.user_credentials() 30 | extra_data = { 31 | field: credentials.get(field) 32 | 33 | -- 34 | 2.40.1 35 | 36 | -------------------------------------------------------------------------------- /pkgs/python/duckling/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv, lib, fetchFromGitHub, buildPythonPackage 2 | , pytestrunner, dateutil, JPype1, pytestcov, pytest 3 | , autoPatchelfHook 4 | }: 5 | 6 | buildPythonPackage rec { 7 | pname = "duckling"; 8 | version = "1.8.0"; 9 | 10 | src = fetchFromGitHub { 11 | owner = "FraBle"; 12 | repo = "python-duckling"; 13 | rev = "v${version}"; 14 | sha256 = "1w1ckjs3i6hmq6d6pc8mzckvqckp69a69rmj9j105n042f9rz1z0"; 15 | }; 16 | 17 | propagatedBuildInputs = [ dateutil JPype1 pytestrunner ]; 18 | checkInputs = [ pytestcov pytest ]; 19 | doCheck = false; 20 | 21 | meta = with lib; { 22 | description = "Python wrapper for wit.ai's Duckling Clojure library"; 23 | license = licenses.asl20; 24 | maintainers = with maintainers; [ globin ]; 25 | }; 26 | } 27 | -------------------------------------------------------------------------------- /pkgs/python/fbmessenger/default.nix: -------------------------------------------------------------------------------- 1 | { buildPythonPackage, fetchPypi, requests, responses, pytestcov, pytest 2 | }: 3 | 4 | buildPythonPackage rec { 5 | pname = "fbmessenger"; 6 | version = "5.1.0"; 7 | 8 | src = fetchPypi { 9 | inherit pname version; 10 | sha256 = "1rs5kplli85y23ji9b2kdjl1ws8478p4jx73niy9b2sgjid2srs4"; 11 | }; 12 | 13 | propagatedBuildInputs = [ requests ]; 14 | checkInputs = [ responses pytestcov pytest ]; 15 | 16 | postPatch = '' 17 | sed -i /pytest-catchlog/d setup.py 18 | ''; 19 | 20 | doCheck = false; 21 | } 22 | -------------------------------------------------------------------------------- /pkgs/python/mailmanclient/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv, lib, buildPythonPackage, fetchFromGitLab, httplib2, six }: 2 | 3 | buildPythonPackage rec { 4 | name = "mailmanclient-${version}"; 5 | version = "3.2.1"; 6 | 7 | src = fetchFromGitLab { 8 | owner = "mailman"; 9 | repo = "mailmanclient"; 10 | rev = version; 11 | sha256 = "13in4lf7vsp9cdbdy0ld5dw4sj3aqn3jjc1fs8hngh7rb12i15j8"; 12 | }; 13 | 14 | propagatedBuildInputs = [ httplib2 six ]; 15 | 16 | # needs access to mailman REST API 17 | doCheck = false; 18 | 19 | meta = with lib; { 20 | homepage = "http://www.gnu.org/software/mailman/"; 21 | description = "REST client for driving Mailman 3"; 22 | license = licenses.lgpl3; 23 | platforms = platforms.linux; 24 | maintainers = [ maintainers.globin ]; 25 | }; 26 | } 27 | -------------------------------------------------------------------------------- /pkgs/python/mattermostwrapper/default.nix: -------------------------------------------------------------------------------- 1 | { buildPythonPackage, fetchPypi, requests }: 2 | 3 | buildPythonPackage rec { 4 | pname = "mattermostwrapper"; 5 | version = "2.1"; 6 | 7 | src = fetchPypi { 8 | inherit pname version; 9 | sha256 = "0w0d8fnv70w57x740bsr217pylzb7jfmadrkqs3ci9c2d8z6r7va"; 10 | }; 11 | 12 | propagatedBuildInputs = [ requests ]; 13 | } 14 | -------------------------------------------------------------------------------- /pkgs/python/rasa-core/default.nix: -------------------------------------------------------------------------------- 1 | { buildPythonPackage, fetchFromGitHub, coloredlogs, graphviz, Keras 2 | , ConfigArgParse, requests, networkx, fakeredis, typing, flask-cors 3 | , flask, rasa-nlu, slackclient, APScheduler, pykwalify, jsonpickle 4 | , twilio, fbmessenger, colorhash, python-telegram-bot, scikitlearn 5 | , h5py, mattermostwrapper, ruamel_yaml, tensorflow, grpcio, termcolor 6 | , astor, gast, pygraphviz 7 | }: 8 | 9 | buildPythonPackage rec { 10 | pname = "rasa-core"; 11 | version = "0.9.6"; 12 | 13 | src = fetchFromGitHub { 14 | owner = "RasaHQ"; 15 | repo = "rasa_core"; 16 | rev = version; 17 | sha256 = "06mncqagnh7iysx74f9033gffbax661zbx4x7r18g24q5182xpib"; 18 | }; 19 | 20 | propagatedBuildInputs = [ 21 | coloredlogs graphviz Keras ConfigArgParse requests networkx 22 | fakeredis typing flask-cors flask rasa-nlu slackclient APScheduler 23 | pykwalify jsonpickle twilio fbmessenger colorhash python-telegram-bot 24 | scikitlearn h5py mattermostwrapper ruamel_yaml tensorflow grpcio 25 | termcolor astor gast 26 | ]; 27 | checkInputs = [ pygraphviz ]; 28 | doCheck = false; 29 | 30 | # postPatch = '' 31 | # substituteInPlace setup.py \ 32 | # --replace 'pykwalify<=1.6.0' pykwalify 33 | # ''; 34 | } 35 | -------------------------------------------------------------------------------- /pkgs/python/rasa-nlu/default.nix: -------------------------------------------------------------------------------- 1 | { buildPythonPackage, fetchFromGitHub, numpy, gevent, matplotlib 2 | , future, typing, boto3, tqdm, jsonschema, pyyaml, cloudpickle 3 | , klein, requests, pathlib2, treq, duckling, sklearn-crfsuite 4 | }: 5 | 6 | buildPythonPackage rec { 7 | pname = "rasa-nlu"; 8 | version = "0.12.3"; 9 | 10 | src = fetchFromGitHub { 11 | owner = "RasaHQ"; 12 | repo = "rasa_nlu"; 13 | rev = version; 14 | sha256 = "1pck78x208178j1hf6v7i4djc161w7rzzy64w9kydz3875pflbxy"; 15 | }; 16 | 17 | patchPhase = '' 18 | sed -i /pathlib/d setup.py 19 | ''; 20 | 21 | propagatedBuildInputs = [ 22 | numpy gevent matplotlib future typing boto3 tqdm jsonschema 23 | pyyaml cloudpickle klein requests pathlib2 duckling sklearn-crfsuite 24 | ]; 25 | 26 | doCheck = false; 27 | checkInputs = [ 28 | treq 29 | ]; 30 | } 31 | -------------------------------------------------------------------------------- /pkgs/python/sklearn-crfsuite/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv, lib, fetchPypi, buildPythonPackage, tabulate, python-crfsuite, tqdm, six, scikitlearn, pytest }: 2 | 3 | buildPythonPackage rec { 4 | pname = "sklearn-crfsuite"; 5 | version = "0.3.6"; 6 | 7 | src = fetchPypi { 8 | inherit pname version; 9 | sha256 = "07l62xvvw4cbpjjqls2jif77h15c3j4m4qwslxwaf0ay0p9sln9g"; 10 | }; 11 | 12 | propagatedBuildInputs = [ tabulate python-crfsuite tqdm six ]; 13 | checkInputs = [ scikitlearn pytest ]; 14 | 15 | checkPhase = '' 16 | py.test 17 | ''; 18 | 19 | meta = with lib; { 20 | description = "A thin CRFsuite wrapper which provides interface simlar to scikit-learn"; 21 | license = licenses.mit; 22 | maintainers = with maintainers; [ globin ]; 23 | }; 24 | } 25 | -------------------------------------------------------------------------------- /pkgs/service-overview/assets/css/custom.css: -------------------------------------------------------------------------------- 1 | @font-face { 2 | font-family: 'Roboto'; 3 | font-style: normal; 4 | font-weight: 400; 5 | src: local('Roboto'), local('Roboto-Regular'), url(../fonts/jfcsbkflji4mp59nc2v5vgajb51bjxdf-Roboto-Regular.ttf) format('truetype'); 6 | } 7 | body { 8 | font-family: 'Roboto'; 9 | } 10 | .logo { 11 | margin: 5px; 12 | height: 40px; 13 | } 14 | .logo-text { 15 | margin-top: 20px; 16 | maring-bottom: 20px; 17 | } 18 | a, a:visited { 19 | color: #302ecd; 20 | } 21 | a.entry-address { 22 | color: #50596c; 23 | } 24 | .btn, .btn:hover, .btn:focus, .btn:active { 25 | color: #fff; 26 | background: #ec7404; 27 | border-color: #d46905; 28 | } 29 | .btn-primary, .btn-primary:hover, .btn-primary:focus, .btn-primary:active { 30 | color: #fff; 31 | background: #ec7404; 32 | border-color: #d46905; 33 | } 34 | .btn.btn-primary, .btn.btn-primary:hover, .btn.btn-primary:focus, .btn.btn-primary:active { 35 | color: #fff; 36 | background: #ec7404; 37 | border-color: #d46905; 38 | } 39 | . 40 | -------------------------------------------------------------------------------- /pkgs/service-overview/assets/img/favicons/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mayflower/nixexprs/96d17d5aab0276b07d655ef8caaa5de96d5249df/pkgs/service-overview/assets/img/favicons/favicon.ico -------------------------------------------------------------------------------- /pkgs/service-overview/assets/img/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mayflower/nixexprs/96d17d5aab0276b07d655ef8caaa5de96d5249df/pkgs/service-overview/assets/img/logo.png -------------------------------------------------------------------------------- /pkgs/service-overview/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv, lib, fetchFromGitHub, fetchurl, runCommand, writeText, services ? {} }: 2 | 3 | let 4 | header = import templates/header.nix { inherit lib; }; 5 | footer = import templates/footer.nix { inherit lib; }; 6 | 7 | entry = name: val: import templates/entry.nix ({ inherit lib name; } // val); 8 | 9 | order = [ 10 | "production" 11 | "external" 12 | "beta" 13 | "alpha" 14 | "deprecated" 15 | "obsolete" 16 | ]; 17 | 18 | generateEntries = srvAttrs: lib.concatStrings ( 19 | lib.flatten (map (stat: lib.mapAttrsToList (n: v: ( 20 | if v.status == stat then entry n v else "" 21 | )) srvAttrs) order)); 22 | 23 | #genHtml = services: minifyHTML '' 24 | genHtml = services: writeText "index.html" '' 25 | ${header} 26 | ${generateEntries services} 27 | ${footer} 28 | ''; 29 | 30 | spectreCSS = fetchFromGitHub { 31 | owner = "picturepan2"; 32 | repo = "spectre"; 33 | rev = "v0.5.3"; 34 | sha256 = "09sdgjpcai2l1ydmkin1rgac83q4k0h8xbrkknqnx9nhc0v58a7v"; 35 | }; 36 | 37 | robotoFont = fetchurl { 38 | url = "https://github.com/google/fonts/raw/08b5f47811dfdea8be45ca44836c4229fc306953/apache/roboto/Roboto-Regular.ttf"; 39 | sha256 = "15bdh52jl469fbdqwib5ayd4m0j7dljss8ixdc8c5njp8r053s3r"; 40 | }; 41 | in 42 | 43 | stdenv.mkDerivation rec { 44 | name = "service-overview-${version}"; 45 | version = "0.1.0"; 46 | src = ./.; 47 | 48 | dontConfigure = true; 49 | dontBuild = true; 50 | 51 | installPhase = '' 52 | mkdir -p $out/assets/{fonts,css} 53 | cp -R $src/assets/* $out/assets 54 | cp ${robotoFont} $out/assets/fonts 55 | cp ${spectreCSS}/dist/* $out/assets/css 56 | cp ${genHtml services} $out/index.html 57 | ''; 58 | 59 | passthru = { inherit spectreCSS robotoFont; }; 60 | } 61 | -------------------------------------------------------------------------------- /pkgs/service-overview/demo.nix: -------------------------------------------------------------------------------- 1 | # build with `nix-build demo.nix` 2 | # then open `result/index.html` in your browser 3 | 4 | with import {}; 5 | 6 | callPackage ./default.nix { 7 | services = { 8 | "Example Service" = { 9 | description = "very fancy service"; 10 | status="alpha"; 11 | address="http://localhost:2342"; 12 | }; 13 | OtherService = { 14 | description = "some other fancy service"; 15 | status="beta"; 16 | address="https://domain.tld"; 17 | }; 18 | }; 19 | } 20 | -------------------------------------------------------------------------------- /pkgs/service-overview/templates/entry.nix: -------------------------------------------------------------------------------- 1 | { lib 2 | , name ? "" 3 | , address ? "" 4 | , status ? "" 5 | , description ? "" 6 | , ...}: 7 | 8 | let 9 | labelFor = s: { 10 | "alpha" = "warning"; 11 | "beta" = "warning"; 12 | "production" = "success"; 13 | "deprecated" = "error"; 14 | "obsolete" = "error"; 15 | "external" = "primary"; 16 | "" = ""; 17 | }.${s}; 18 | 19 | toKebabCase = s: lib.toLower (lib.replaceStrings [" "] ["-"] s); 20 | in 21 | 22 | '' 23 | 24 | 25 | ${name} 26 | Info 27 | 51 | 52 | 53 | ${address} 54 | 55 | ${description} 56 | 57 | 58 | ${status} 59 | 60 | 61 | 62 | '' 63 | -------------------------------------------------------------------------------- /pkgs/service-overview/templates/footer.nix: -------------------------------------------------------------------------------- 1 | { lib, extraHtml ? "" }: 2 | 3 | lib.optionalString (extraHtml != "") extraHtml + '' 4 | 5 | 6 |
7 |
8 | 9 | 10 | '' 11 | -------------------------------------------------------------------------------- /pkgs/service-overview/templates/header.nix: -------------------------------------------------------------------------------- 1 | { lib 2 | , lang ? "en" 3 | , title ? "Service Overview" 4 | , robots ? "noindex, nofollow" 5 | , author ? "" }: 6 | 7 | '' 8 | 9 | 10 | 11 | ${title} 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | '' + lib.optionalString (author != "") '' 23 | 24 | '' + '' 25 | 26 | 27 | 37 |
38 |
39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | '' 50 | -------------------------------------------------------------------------------- /tests/wireguard-star.nix: -------------------------------------------------------------------------------- 1 | { testers, ... }: 2 | 3 | let 4 | inherit (testers) nixosTest; 5 | keypairs = { 6 | peer0 = { 7 | private = "QBgNpLmX0D6mFItuXb5HiD76fCAYGO19gee925xYhXs="; 8 | public = "ulMFud03eE41dGmK7c7tU90LFQ9zv5e0ebLLGzX+7xA="; 9 | }; 10 | peer1 = { 11 | private = "gFWHdy/GegWlN0OoAa/v46g1C2DnR3WBcYlMmaapOVY="; 12 | public = "jbXTjY7xhchg7pW1QM2lOUywxTWEyZzfooCQCUmas10="; 13 | }; 14 | container0 = { 15 | private = "ECmlWBOsunu1xHfDTh55hjhrtEK1VAuenOGmgjYkEFw="; 16 | public = "qVJWZiRm+IWjicx+uxVxUsQq8FwgG39NKrXXEG7G6iA="; 17 | }; 18 | }; 19 | base = { 20 | imports = [ ../modules/wireguard.nix ]; 21 | networking.useNetworkd = true; 22 | mayflower.wireguard = { 23 | enable = true; 24 | star.test.enable = true; 25 | }; 26 | }; 27 | in nixosTest { 28 | name = "wireguard-star"; 29 | nodes = { 30 | peer0 = { nodes, pkgs, ... }: { 31 | virtualisation.vlans = [ 1 ]; 32 | imports = [ base ]; 33 | networking.useNetworkd = true; 34 | mayflower.wireguard.star.test = { 35 | isServer = true; 36 | wireguard = { 37 | tunnelIPv4Address = "10.99.0.1"; 38 | tunnelIPv6Address = "fc00::99:1"; 39 | publicKey = keypairs.peer0.public; 40 | privateKeyFile = "${pkgs.writeText "supersecret" keypairs.peer0.private}"; 41 | endpoint = { 42 | hostname = "192.168.1.1"; 43 | listenPort = 23542; 44 | }; 45 | }; 46 | }; 47 | }; 48 | peer1 = { nodes, pkgs, inputs, ... }: { 49 | virtualisation.vlans = [ 1 ]; 50 | imports = [ base ]; 51 | mayflower.wireguard.star.test.wireguard = { 52 | tunnelIPv4Address = "10.99.0.2"; 53 | tunnelIPv6Address = "fc00::99:2"; 54 | publicKey = keypairs.peer1.public; 55 | privateKeyFile = "${pkgs.writeText "supersecret" keypairs.peer1.private}"; 56 | }; 57 | networking.firewall.extraCommands = '' 58 | ip46tables -P FORWARD DROP 59 | ip46tables -F FORWARD 60 | ip46tables -A FORWARD -i ve-+ -m comment --comment "outgoing traffic from container0" -j ACCEPT 61 | ip46tables -A FORWARD -o ve-+ -m conntrack --ctstate RELATED,ESTABLISHED -m comment --comment "return traffic to container0" -j ACCEPT 62 | ip46tables -t nat -F POSTROUTING 63 | ip46tables -t nat -A POSTROUTING -o eth1 -j MASQUERADE 64 | ''; 65 | containers.container0 = { 66 | autoStart = true; 67 | privateNetwork = true; 68 | hostAddress = "172.29.23.1"; 69 | hostAddress6 = "fc23::23:1"; 70 | localAddress = "172.29.23.10"; 71 | localAddress6 = "fc23::23:10"; 72 | config = { config, lib, ... }: { 73 | _module.args = { nodes = with lib; mapAttrs (const (config: { inherit config; })) nodes; }; 74 | imports = [ base ]; 75 | services.resolved.enable = false; 76 | mayflower.wireguard.star.test.wireguard = { 77 | tunnelIPv4Address = "10.99.0.3"; 78 | tunnelIPv6Address = "fc00::99:3"; 79 | publicKey = keypairs.container0.public; 80 | privateKeyFile = "${pkgs.writeText "supersecret" keypairs.container0.private}"; 81 | }; 82 | }; 83 | }; 84 | }; 85 | }; 86 | testScript = let 87 | runInContainer = container: command: "systemd-run -M ${container} --pty -- /bin/sh --login -c '${command}' >&2"; 88 | in '' 89 | start_all() 90 | 91 | peer0.wait_for_unit("multi-user.target") 92 | peer1.wait_for_unit("multi-user.target") 93 | 94 | with subtest("Basic connectivity"): 95 | peer0.wait_until_succeeds("ping -c4 >&2 192.168.1.1") 96 | peer0.wait_until_succeeds("ping -c4 >&2 192.168.1.2") 97 | peer1.wait_until_succeeds("ping -c4 >&2 192.168.1.1") 98 | peer1.wait_until_succeeds("ping -c4 >&2 192.168.1.2") 99 | 100 | with subtest("Container connectivity"): 101 | peer1.succeed("${runInContainer "container0" "ping -c4 172.29.23.10"}") 102 | peer1.succeed("${runInContainer "container0" "ping -c4 172.29.23.1"}") 103 | peer1.succeed("${runInContainer "container0" "ping -c4 192.168.1.2"}") 104 | peer1.succeed("${runInContainer "container0" "ping -c4 192.168.1.1"}") 105 | 106 | with subtest("WireGuard traffic between peers"): 107 | peer0.wait_until_succeeds("ping -c4 >&2 10.99.0.1") 108 | peer0.wait_until_succeeds("ping -c4 >&2 10.99.0.2") 109 | peer1.wait_until_succeeds("ping -c4 >&2 10.99.0.2") 110 | peer1.wait_until_succeeds("ping -c4 >&2 10.99.0.1") 111 | peer1.wait_until_succeeds("ping -c4 >&2 fc00::99:2") 112 | peer1.wait_until_succeeds("ping -c4 >&2 fc00::99:1") 113 | peer0.wait_until_succeeds("ping -c4 >&2 fc00::99:1") 114 | peer0.wait_until_succeeds("ping -c4 >&2 fc00::99:2") 115 | 116 | with subtest("WireGuard traffic from/to container"): 117 | peer1.succeed("${runInContainer "container0" "ping -c4 >&2 10.99.0.1"}") 118 | peer1.succeed("${runInContainer "container0" "ping -c4 >&2 fc00::99:1"}") 119 | 120 | with subtest("/etc/hosts is configured correctly"): 121 | peer1.succeed("ping -c4 test-server >&2") 122 | peer1.succeed("${runInContainer "container0" "ping -c4 test-server >&2"}") 123 | 124 | peer0.shutdown() 125 | peer1.shutdown() 126 | ''; 127 | } 128 | --------------------------------------------------------------------------------
Service
Address
Info
Status