├── .gitignore ├── CODEOWNERS ├── LICENSE ├── README.md ├── default.nix ├── deployments └── deployers.nix ├── example ├── .gitignore ├── clusters │ └── example.nix ├── create-aws.sh ├── create-libvirtd.sh ├── default.nix ├── deployments │ ├── example-aws.nix │ └── example-libvirtd.nix ├── globals.nix ├── nix │ ├── default.nix │ ├── sources.json │ └── sources.nix ├── overlays │ ├── default.nix │ └── overlay-list.nix ├── release.nix ├── shell.nix └── static │ ├── dead-mans-snitch.nix │ ├── default.nix │ ├── grafana-creds.nix │ ├── oauth.nix │ └── pager-duty.nix ├── flake.lock ├── flake.nix ├── globals-deployers.nix ├── modules ├── aws.nix ├── clickhouse-custom.nix ├── common.nix ├── default.nix ├── deployer.nix ├── gen-graylog-creds.nix ├── grafana │ └── generic │ │ ├── nginx-basic.json │ │ ├── nginx-vts.json │ │ ├── node-system-dashboard.json │ │ └── varnish.json ├── graylog │ ├── graylogConfig.json │ └── graylogPreload.sh ├── monitoring-exporters.nix ├── monitoring-services.nix ├── nginx │ ├── cardano-large.svg │ ├── cardano-small.svg │ └── monitoring-index-template.html ├── oauth.nix ├── sentry │ ├── default.nix │ └── services │ │ ├── sentry.nix │ │ ├── snuba.nix │ │ └── symbolicator.nix ├── vim-michael-bishop.nix ├── vim-michael-fellinger.nix └── vims.nix ├── nix ├── default.nix ├── sources.json └── sources.nix ├── overlays ├── aws-instances.json ├── default.nix ├── nginx-monitoring.nix ├── nixops.nix ├── overlay-list.nix ├── packages.nix ├── rust.nix └── ssh-keys.nix ├── physical ├── aws │ ├── c4.large.nix │ ├── c5.2xlarge.nix │ ├── c5.4xlarge.nix │ ├── c5.9xlarge.nix │ ├── c5a.4xlarge.nix │ ├── common.nix │ ├── default.nix │ ├── m5.12xlarge.nix │ ├── m5.2xlarge.nix │ ├── m5.4xlarge.nix │ ├── m5.8xlarge.nix │ ├── m5.xlarge.nix │ ├── m5ad.4xlarge.nix │ ├── m5ad.xlarge.nix │ ├── r5.2xlarge.nix │ ├── r5.large.nix │ ├── r5.xlarge.nix │ ├── r5a.2xlarge.nix │ ├── r5a.xlarge.nix │ ├── security-groups │ │ ├── allow-all-to-tcp-port.nix │ │ ├── allow-all-to-udp-port.nix │ │ ├── allow-all.nix │ │ ├── allow-deployer-ssh.nix │ │ ├── allow-graylog.nix │ │ ├── allow-monitoring-collection.nix │ │ ├── allow-public-www-https.nix │ │ ├── allow-to-tcp-port.nix │ │ └── default.nix │ ├── t2.large.nix │ ├── t2.nano.nix │ ├── t2.xlarge.nix │ ├── t3.2xlarge-monitor.nix │ ├── t3.2xlarge.nix │ ├── t3.xlarge.nix │ ├── t3a.2xlarge.nix │ ├── t3a.large.nix │ ├── t3a.medium.nix │ ├── t3a.nano.nix │ ├── t3a.small.nix │ ├── t3a.xlarge-monitor.nix │ └── t3a.xlarge.nix ├── default.nix ├── libvirtd │ ├── common.nix │ ├── default.nix │ ├── large.nix │ ├── medium.nix │ └── tiny.nix └── packet │ ├── c1.small.nix │ ├── default.nix │ ├── s1.large.nix │ └── t1.small.nix ├── pkgs ├── sentry │ ├── default.nix │ ├── executable.patch │ ├── requirements.nix │ ├── requirements_frozen.txt │ ├── requirements_override.nix │ ├── semaphore │ │ ├── default.nix │ │ ├── python.nix │ │ ├── rust.nix │ │ ├── semaphore.patch │ │ ├── setup-py.patch │ │ └── source.nix │ ├── symbolic │ │ ├── Cargo.lock │ │ ├── default.nix │ │ ├── python.nix │ │ ├── python.patch │ │ ├── rust.nix │ │ ├── rust.patch │ │ └── source.nix │ └── xmlsec │ │ ├── lxml-workaround.patch │ │ └── no-black-format.patch ├── snuba │ ├── configurable-host.patch │ ├── dashboard.patch │ ├── default.nix │ ├── requirements.nix │ ├── requirements_frozen.txt │ └── requirements_override.nix └── symbolicator │ └── default.nix ├── release.nix ├── roles ├── default.nix └── monitor.nix ├── scripts ├── gen-graylog-creds.nix └── gen-sentry-secret-key.nix └── shell.nix /.gitignore: -------------------------------------------------------------------------------- 1 | result* 2 | secret* 3 | keys 4 | *.swp 5 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @devops 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ops-lib 2 | NixOps deployment configuration library for IOHK devops 3 | -------------------------------------------------------------------------------- /default.nix: -------------------------------------------------------------------------------- 1 | { system ? builtins.currentSystem 2 | , crossSystem ? null 3 | , config ? {} 4 | , sourcesOverride ? {} 5 | , pkgs ? import ./nix { inherit system crossSystem config sourcesOverride; } 6 | , withRustOverlays ? false 7 | }: 8 | with pkgs; { 9 | inherit nixops nginxStable nginxMainline; 10 | overlays = import ./overlays sourcePaths withRustOverlays; 11 | shell = mkShell { 12 | buildInputs = [ niv nixops nix dnsutils ]; 13 | NIX_PATH = "nixpkgs=${path}"; 14 | NIXOPS_DEPLOYMENT = "${globals.deploymentName}"; 15 | }; 16 | } 17 | -------------------------------------------------------------------------------- /deployments/deployers.nix: -------------------------------------------------------------------------------- 1 | let 2 | region = "eu-central-1"; 3 | org = "IOHK"; 4 | accessKeyId = "root-account"; 5 | pkgs = import ../nix { config.allowUnfree = true; }; 6 | in { 7 | defaults = { resources, name, config, lib, nodes, ... }: { 8 | options = { 9 | local.username = lib.mkOption { 10 | type = lib.types.str; 11 | }; 12 | }; 13 | imports = [ 14 | ../modules/deployer.nix 15 | ../modules/vims.nix 16 | ]; 17 | config = { 18 | deployment.targetEnv = "ec2"; 19 | deployment.ec2 = { 20 | inherit region accessKeyId; 21 | keyPair = resources.ec2KeyPairs.deployers; 22 | instanceType = lib.mkDefault "r5a.xlarge"; 23 | ebsInitialRootDiskSize = 200; 24 | elasticIPv4 = resources.elasticIPs."${name}-ip"; 25 | securityGroups = with resources; [ 26 | ec2SecurityGroups.allow-ssh 27 | ec2SecurityGroups.allow-wireguard 28 | ]; 29 | }; 30 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 31 | programs = { 32 | bash.interactiveShellInit = '' 33 | eval "$(direnv hook bash)" 34 | ''; 35 | fzf = { 36 | fuzzyCompletion = true; 37 | keybindings = true; 38 | }; 39 | starship = { 40 | enable = true; 41 | settings = { 42 | git_commit = { 43 | tag_disabled = false; 44 | only_detached = false; 45 | }; 46 | git_metrics = { 47 | disabled = false; 48 | }; 49 | memory_usage = { 50 | disabled = false; 51 | format = "via $symbol[\${ram_pct}]($style) "; 52 | threshold = -1; 53 | }; 54 | shlvl = { 55 | disabled = false; 56 | symbol = "↕"; 57 | threshold = -1; 58 | }; 59 | status = { 60 | disabled = false; 61 | map_symbol = true; 62 | pipestatus = true; 63 | }; 64 | time = { 65 | disabled = false; 66 | format = "[\\[ $time \\]]($style) "; 67 | }; 68 | }; 69 | }; 70 | }; 71 | # Used by starship for fonts 72 | fonts.packages = with pkgs; [ 73 | (nerdfonts.override {fonts = ["FiraCode"];}) 74 | ]; 75 | environment = { 76 | systemPackages = with pkgs; [ 77 | direnv 78 | ]; 79 | variables.DEPLOYER_IP = toString config.networking.publicIPv4; 80 | }; 81 | nixpkgs = { inherit pkgs; }; 82 | users.users = { 83 | ${config.local.username} = { 84 | isNormalUser = true; 85 | openssh.authorizedKeys.keys = with pkgs.iohk-ops-lib.ssh-keys; allKeysFrom devOps; 86 | }; 87 | }; 88 | networking.firewall.allowedUDPPorts = [ 17777 ]; 89 | }; 90 | }; 91 | 92 | mainnet-deployer = { pkgs, lib, nodes, ... }: { 93 | local.username = "mainnet"; 94 | users.users.ci = { 95 | openssh.authorizedKeys.keys = with pkgs.iohk-ops-lib.ssh-keys; allKeysFrom devOps; 96 | isNormalUser = true; 97 | }; 98 | users.users.exchanges = { 99 | openssh.authorizedKeys.keys = with pkgs.iohk-ops-lib.ssh-keys; allKeysFrom devOps; 100 | isNormalUser = true; 101 | }; 102 | nix.trustedUsers = [ "root" "ci" ]; 103 | environment.etc."client_ssh_sample".text = lib.concatStringsSep "\n" (map 104 | (name: '' 105 | Host ${name} 106 | User ${nodes.${name}.config.local.username} 107 | HostName ${toString nodes.${name}.config.networking.publicIPv4} 108 | '') [ "mainnet-deployer" "staging-deployer" "testnet-deployer" "dev-deployer" ]); 109 | 110 | deployment.keys."mainnet-deployer.wgprivate" = { 111 | destDir = "/etc/wireguard"; 112 | keyFile = ../secrets/mainnet-deployer.wgprivate; 113 | }; 114 | 115 | networking.wireguard.interfaces.wg0 = { 116 | ips = [ "10.90.1.1/32" ]; 117 | listenPort = 17777; 118 | privateKeyFile = "/etc/wireguard/mainnet-deployer.wgprivate"; 119 | peers = [ 120 | { # mac-mini-1 121 | publicKey = "nvKCarVUXdO0WtoDsEjTzU+bX0bwWYHJAM2Y3XhO0Ao="; 122 | allowedIPs = [ "192.168.20.21/32" ]; 123 | persistentKeepalive = 25; 124 | } 125 | { # mac-mini-2 126 | publicKey = "VcOEVp/0EG4luwL2bMmvGvlDNDbCzk7Vkazd3RRl51w="; 127 | allowedIPs = [ "192.168.20.22/32" ]; 128 | persistentKeepalive = 25; 129 | } 130 | ]; 131 | }; 132 | services.tarsnap = { 133 | enable = true; 134 | keyfile = "/run/keys/tarsnap"; 135 | archives = { inherit (import ../secrets/tarsnap-archives.nix) mainnet-deployer; }; 136 | }; 137 | deployment.keys.tarsnap = { 138 | destDir = "/run/keys"; 139 | keyFile = ../secrets/tarsnap-mainnet-deployer-readwrite.secret; 140 | }; 141 | }; 142 | 143 | staging-deployer = { 144 | local.username = "staging"; 145 | services.tarsnap = { 146 | enable = true; 147 | keyfile = "/run/keys/tarsnap"; 148 | archives = { inherit (import ../secrets/tarsnap-archives.nix) staging-deployer; }; 149 | }; 150 | deployment.keys.tarsnap = { 151 | destDir = "/run/keys"; 152 | keyFile = ../secrets/tarsnap-staging-deployer-readwrite.secret; 153 | }; 154 | }; 155 | 156 | testnet-deployer = { 157 | local.username = "testnet"; 158 | deployment.ec2.instanceType = "r5a.xlarge"; 159 | services.tarsnap = { 160 | enable = true; 161 | keyfile = "/run/keys/tarsnap"; 162 | archives = { inherit (import ../secrets/tarsnap-archives.nix) testnet-deployer; }; 163 | }; 164 | deployment.keys.tarsnap = { 165 | destDir = "/run/keys"; 166 | keyFile = ../secrets/tarsnap-testnet-deployer-readwrite.secret; 167 | }; 168 | }; 169 | 170 | dev-deployer = { pkgs, ... }: { 171 | local.username = "dev"; 172 | users.users.dev = { 173 | openssh.authorizedKeys.keys = with pkgs.iohk-ops-lib.ssh-keys; (allKeysFrom csl-developers ++ allKeysFrom plutus-developers); 174 | }; 175 | services.tarsnap = { 176 | enable = true; 177 | keyfile = "/run/keys/tarsnap"; 178 | archives = { inherit (import ../secrets/tarsnap-archives.nix) dev-deployer; }; 179 | }; 180 | deployment.keys.tarsnap = { 181 | destDir = "/run/keys"; 182 | keyFile = ../secrets/tarsnap-dev-deployer-readwrite.secret; 183 | }; 184 | }; 185 | 186 | bench-deployer = { pkgs, ... }: { 187 | local.username = "dev"; 188 | fileSystems."/home" = 189 | { device = "/dev/disk/by-label/home"; 190 | fsType = "ext4"; 191 | }; 192 | deployment.ec2.instanceType = "c5.9xlarge"; 193 | deployment.ec2.ebsInitialRootDiskSize = pkgs.lib.mkForce 2000; 194 | users.users.dev = { 195 | openssh.authorizedKeys.keys = with pkgs.iohk-ops-lib.ssh-keys; allKeysFrom csl-developers; 196 | }; 197 | nix.nrBuildUsers = pkgs.lib.mkForce 36; 198 | }; 199 | 200 | resources = { 201 | elasticIPs = let 202 | ip = { 203 | inherit region accessKeyId; 204 | }; 205 | in { 206 | mainnet-deployer-ip = ip; 207 | staging-deployer-ip = ip; 208 | testnet-deployer-ip = ip; 209 | dev-deployer-ip = ip; 210 | bench-deployer-ip = ip; 211 | }; 212 | ec2KeyPairs.deployers = { 213 | inherit region accessKeyId; 214 | }; 215 | ec2SecurityGroups = let 216 | fn = x: builtins.head (builtins.attrValues x); 217 | in with (import ../physical/aws/security-groups); { 218 | allow-ssh = fn (allow-ssh { inherit pkgs region org accessKeyId; }); 219 | allow-wireguard = fn (allow-wireguard { inherit pkgs region org accessKeyId; }); 220 | }; 221 | }; 222 | } 223 | -------------------------------------------------------------------------------- /example/.gitignore: -------------------------------------------------------------------------------- 1 | globals.nix 2 | static 3 | -------------------------------------------------------------------------------- /example/clusters/example.nix: -------------------------------------------------------------------------------- 1 | pkgs: 2 | { targetEnv 3 | , tiny 4 | , medium 5 | , large 6 | }: 7 | let 8 | 9 | inherit (pkgs) sourcePaths lib iohk-ops-lib; 10 | inherit (lib) recursiveUpdate mapAttrs; 11 | inherit (iohk-ops-lib) roles modules; 12 | 13 | nodes = { 14 | monitoring = mkNode { 15 | imports = [ tiny roles.monitor ]; 16 | deployment.ec2.region = "eu-central-1"; 17 | deployment.packet.facility = "ams1"; 18 | node = { 19 | org = "default"; 20 | roles.isMonitor = true; 21 | }; 22 | }; 23 | }; 24 | 25 | mkNode = args: 26 | recursiveUpdate { 27 | deployment.targetEnv = targetEnv; 28 | nixpkgs.overlays = import ../overlays sourcePaths; 29 | } args; 30 | 31 | in { 32 | network.description = "example-cluster"; 33 | network.enableRollback = true; 34 | } // nodes 35 | -------------------------------------------------------------------------------- /example/create-aws.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euxo pipefail 4 | 5 | # Credential setup 6 | if [ ! -f ./static/graylog-creds.nix ]; then 7 | nix-shell -A gen-graylog-creds 8 | fi 9 | 10 | nixops destroy || true 11 | nixops delete || true 12 | nixops create ./deployments/example-aws.nix -I nixpkgs=./nix 13 | nixops deploy --show-trace 14 | -------------------------------------------------------------------------------- /example/create-libvirtd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euxo pipefail 4 | 5 | # https://nixos.org/nixops/manual/#idm140737322394336 6 | # Needed for libvirtd: 7 | # 8 | # virtualisation.libvirtd.enable = true; 9 | # networking.firewall.checkReversePath = false; 10 | 11 | # See also: https://github.com/simon3z/virt-deploy/issues/8#issuecomment-73111541 12 | 13 | if [ ! -d /var/lib/libvirt/images ]; then 14 | sudo mkdir -p /var/lib/libvirt/images 15 | sudo chgrp libvirtd /var/lib/libvirt/images 16 | sudo chmod g+w /var/lib/libvirt/images 17 | fi 18 | 19 | # Credential setup 20 | if [ ! -f ./static/graylog-creds.nix ]; then 21 | nix-shell -A gen-graylog-creds 22 | fi 23 | 24 | nixops destroy || true 25 | nixops delete || true 26 | nixops create ./deployments/example-libvirtd.nix -I nixpkgs=./nix 27 | nixops deploy --show-trace 28 | -------------------------------------------------------------------------------- /example/default.nix: -------------------------------------------------------------------------------- 1 | { sourcePaths ? import ./nix/sources.nix 2 | , system ? builtins.currentSystem 3 | , crossSystem ? null 4 | , config ? {} 5 | }@args: with import ./nix args; { 6 | shell = mkShell { 7 | buildInputs = [ niv nixops nix ]; 8 | NIX_PATH = "nixpkgs=${path}"; 9 | NIXOPS_DEPLOYMENT = "${globals.deploymentName}"; 10 | passthru = { 11 | gen-graylog-creds = iohk-ops-lib.scripts.gen-graylog-creds { staticPath = ./static; }; 12 | }; 13 | }; 14 | } 15 | -------------------------------------------------------------------------------- /example/deployments/example-aws.nix: -------------------------------------------------------------------------------- 1 | with import ../nix {}; 2 | let 3 | inherit (pkgs.lib) 4 | attrValues attrNames filter filterAttrs flatten foldl' hasAttrByPath listToAttrs 5 | mapAttrs' mapAttrs nameValuePair recursiveUpdate unique optional any concatMap; 6 | 7 | inherit (globals.ec2.credentials) accessKeyIds; 8 | inherit (iohk-ops-lib.physical) aws; 9 | 10 | cluster = import ../clusters/example.nix pkgs { 11 | inherit (aws) targetEnv; 12 | tiny = aws.t2nano; 13 | medium = aws.t2xlarge; 14 | large = aws.t3xlarge; 15 | }; 16 | 17 | nodes = filterAttrs (name: node: 18 | ((node.deployment.targetEnv or null) == "ec2") 19 | && ((node.deployment.ec2.region or null) != null)) cluster; 20 | 21 | regions = 22 | unique (map (node: node.deployment.ec2.region) (attrValues nodes)); 23 | 24 | orgs = 25 | unique (map (node: node.node.org) (attrValues nodes)); 26 | 27 | securityGroups = with aws.security-groups; [ 28 | { 29 | nodes = filterAttrs (_: n: n.node.roles.isMonitor) nodes; 30 | groups = [ 31 | allow-public-www-https 32 | allow-graylog 33 | ]; 34 | } 35 | { 36 | inherit nodes; 37 | groups = [ 38 | allow-deployer-ssh 39 | ] 40 | ++ optional (any (n: n.node.roles.isMonitor) (attrValues nodes)) 41 | allow-monitoring-collection; 42 | } 43 | ]; 44 | 45 | importSecurityGroup = node: securityGroup: 46 | securityGroup { 47 | inherit pkgs lib nodes; 48 | region = node.deployment.ec2.region; 49 | org = node.node.org; 50 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.${node.node.org}; 51 | }; 52 | 53 | 54 | importSecurityGroups = {nodes, groups}: 55 | mapAttrs 56 | (_: n: foldl' recursiveUpdate {} (map (importSecurityGroup n) groups)) 57 | nodes; 58 | 59 | securityGroupsByNode = 60 | foldl' recursiveUpdate {} (map importSecurityGroups securityGroups); 61 | 62 | settings = { 63 | resources = { 64 | ec2SecurityGroups = 65 | foldl' recursiveUpdate {} (attrValues securityGroupsByNode); 66 | 67 | elasticIPs = mapAttrs' (name: node: 68 | nameValuePair "${name}-ip" { 69 | accessKeyId = accessKeyIds.${node.node.org}; 70 | inherit (node.deployment.ec2) region; 71 | }) nodes; 72 | 73 | ec2KeyPairs = listToAttrs (concatMap (region: 74 | map (org: 75 | nameValuePair "example-keypair-${org}-${region}" { 76 | inherit region; 77 | accessKeyId = accessKeyIds.${org}; 78 | } 79 | ) orgs) 80 | regions); 81 | }; 82 | defaults = { name, resources, config, ... }: { 83 | _file = ./example-aws.nix; 84 | deployment.ec2 = { 85 | keyPair = resources.ec2KeyPairs."example-keypair-${config.node.org}-${config.deployment.ec2.region}"; 86 | securityGroups = map (sgName: resources.ec2SecurityGroups.${sgName}) 87 | (attrNames (securityGroupsByNode.${name} or {})); 88 | }; 89 | }; 90 | }; 91 | in 92 | cluster // settings 93 | -------------------------------------------------------------------------------- /example/deployments/example-libvirtd.nix: -------------------------------------------------------------------------------- 1 | with import ../nix {}; 2 | import ../clusters/example.nix pkgs iohk-ops-lib.physical.libvirtd 3 | -------------------------------------------------------------------------------- /example/globals.nix: -------------------------------------------------------------------------------- 1 | self: super: { 2 | globals = rec { 3 | 4 | deployerIp = "127.0.0.1"; 5 | 6 | static = import ./static; 7 | 8 | deploymentName = "example"; 9 | 10 | domain = "${deploymentName}.aws.iohkdev.io"; 11 | 12 | extraPrometheusExportersPorts = []; 13 | 14 | ec2 = { 15 | credentials = { 16 | accessKeyIds = { 17 | default = "default"; 18 | }; 19 | }; 20 | }; 21 | }; 22 | } 23 | -------------------------------------------------------------------------------- /example/nix/default.nix: -------------------------------------------------------------------------------- 1 | { sourcePaths ? import ./sources.nix 2 | , system ? builtins.currentSystem 3 | , crossSystem ? null 4 | , config ? {} }: 5 | import sourcePaths.nixpkgs { 6 | overlays = import ../overlays sourcePaths; 7 | inherit system crossSystem config; 8 | } 9 | -------------------------------------------------------------------------------- /example/nix/sources.json: -------------------------------------------------------------------------------- 1 | { 2 | "gitignore": { 3 | "branch": "master", 4 | "description": "Nix function for filtering local git sources", 5 | "homepage": "", 6 | "owner": "hercules-ci", 7 | "repo": "gitignore", 8 | "rev": "f9e996052b5af4032fe6150bba4a6fe4f7b9d698", 9 | "sha256": "0jrh5ghisaqdd0vldbywags20m2cxpkbbk5jjjmwaw0gr8nhsafv", 10 | "type": "tarball", 11 | "url": "https://github.com/hercules-ci/gitignore/archive/f9e996052b5af4032fe6150bba4a6fe4f7b9d698.tar.gz", 12 | "url_template": "https://github.com///archive/.tar.gz" 13 | }, 14 | "nixpkgs": { 15 | "branch": "nixos-unstable", 16 | "description": "A read-only mirror of NixOS/nixpkgs tracking the released channels. Send issues and PRs to", 17 | "homepage": "https://github.com/NixOS/nixpkgs", 18 | "owner": "NixOS", 19 | "repo": "nixpkgs-channels", 20 | "rev": "94500c93dc239761bd144128a1684abcd08df6b7", 21 | "sha256": "0p6bd16mb1k5c3zihl1c3d62m419rii102xfb340pccgbd8j34bc", 22 | "type": "tarball", 23 | "url": "https://github.com/NixOS/nixpkgs-channels/archive/94500c93dc239761bd144128a1684abcd08df6b7.tar.gz", 24 | "url_template": "https://github.com///archive/.tar.gz" 25 | }, 26 | "ops-lib": { 27 | "branch": "master", 28 | "description": "NixOps deployment configuration library for IOHK devops ", 29 | "homepage": null, 30 | "owner": "input-output-hk", 31 | "repo": "ops-lib", 32 | "rev": "eb084ca24d050d49f0aec5f648c5fc54ba786d1a", 33 | "sha256": "1a3njs95xmjcl52qy2rbv16ai87lpsf4snfdfmxlpprp315mfh2h", 34 | "type": "tarball", 35 | "url": "https://github.com/input-output-hk/ops-lib/archive/eb084ca24d050d49f0aec5f648c5fc54ba786d1a.tar.gz", 36 | "url_template": "https://github.com///archive/.tar.gz" 37 | }, 38 | "naersk": { 39 | "branch": "master", 40 | "description": "Build rust crates in Nix. No configuration, no code generation, no IFD. Sandbox friendly.", 41 | "homepage": "", 42 | "owner": "nmattia", 43 | "repo": "naersk", 44 | "rev": "254fa90956d3e0b6747fbd2b6bea71169c740e6f", 45 | "sha256": "04z5wibrmsad2yhx09hxkyvgn2kmdjd40fmxa9g7yla0vdxldg0m", 46 | "type": "tarball", 47 | "url": "https://github.com/nmattia/naersk/archive/254fa90956d3e0b6747fbd2b6bea71169c740e6f.tar.gz", 48 | "url_template": "https://github.com///archive/.tar.gz" 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /example/nix/sources.nix: -------------------------------------------------------------------------------- 1 | # This file has been generated by Niv. 2 | 3 | # A record, from name to path, of the third-party packages 4 | with rec 5 | { 6 | pkgs = 7 | if hasNixpkgsPath 8 | then 9 | if hasThisAsNixpkgsPath 10 | then import (builtins_fetchTarball { inherit (sources_nixpkgs) url sha256; }) {} 11 | else import {} 12 | else 13 | import (builtins_fetchTarball { inherit (sources_nixpkgs) url sha256; }) {}; 14 | 15 | sources_nixpkgs = 16 | if builtins.hasAttr "nixpkgs" sources 17 | then sources.nixpkgs 18 | else abort 19 | '' 20 | Please specify either (through -I or NIX_PATH=nixpkgs=...) or 21 | add a package called "nixpkgs" to your sources.json. 22 | ''; 23 | 24 | sources_gitignore = 25 | if builtins.hasAttr "gitignore" sources 26 | then sources.gitignore 27 | else abort 28 | '' 29 | Please add "gitignore" to your sources.json: 30 | niv add hercules-ci/gitignore 31 | ''; 32 | 33 | inherit (import (builtins_fetchTarball { inherit (sources_gitignore) url sha256; }) { 34 | inherit (pkgs) lib; 35 | }) gitignoreSource; 36 | 37 | # fetchTarball version that is compatible between all the versions of Nix 38 | builtins_fetchTarball = 39 | { url, sha256 ? null }@attrs: 40 | let 41 | inherit (builtins) lessThan nixVersion fetchTarball; 42 | in 43 | if sha256 == null || lessThan nixVersion "1.12" then 44 | fetchTarball { inherit url; } 45 | else 46 | fetchTarball attrs; 47 | 48 | # fetchurl version that is compatible between all the versions of Nix 49 | builtins_fetchurl = 50 | { url, sha256 ? null }@attrs: 51 | let 52 | inherit (builtins) lessThan nixVersion fetchurl; 53 | in 54 | if sha256 == null || lessThan nixVersion "1.12" then 55 | fetchurl { inherit url; } 56 | else 57 | fetchurl attrs; 58 | 59 | # A wrapper around pkgs.fetchzip that has inspectable arguments, 60 | # annoyingly this means we have to specify them 61 | fetchzip = { url, sha256 ? null }@attrs: if sha256 == null 62 | then builtins.fetchTarball { inherit url; } 63 | else pkgs.fetchzip attrs; 64 | 65 | # A wrapper around pkgs.fetchurl that has inspectable arguments, 66 | # annoyingly this means we have to specify them 67 | fetchurl = { url, sha256 }@attrs: pkgs.fetchurl attrs; 68 | 69 | hasNixpkgsPath = (builtins.tryEval ).success; 70 | hasThisAsNixpkgsPath = 71 | (builtins.tryEval ).success && == ./.; 72 | 73 | sources = builtins.fromJSON (builtins.readFile ./sources.json); 74 | 75 | mapAttrs = builtins.mapAttrs or 76 | (f: set: with builtins; 77 | listToAttrs (map (attr: { name = attr; value = f attr set.${attr}; }) (attrNames set))); 78 | 79 | # borrowed from nixpkgs 80 | functionArgs = f: f.__functionArgs or (builtins.functionArgs f); 81 | callFunctionWith = autoArgs: f: args: 82 | let auto = builtins.intersectAttrs (functionArgs f) autoArgs; 83 | in f (auto // args); 84 | 85 | getFetcher = spec: 86 | let fetcherName = 87 | if builtins.hasAttr "type" spec 88 | then builtins.getAttr "type" spec 89 | else "builtin-tarball"; 90 | in builtins.getAttr fetcherName { 91 | "tarball" = fetchzip; 92 | "builtin-tarball" = builtins_fetchTarball; 93 | "file" = fetchurl; 94 | "builtin-url" = builtins_fetchurl; 95 | }; 96 | }; 97 | # NOTE: spec must _not_ have an "outPath" attribute 98 | mapAttrs (name: spec: 99 | if builtins.hasAttr "outPath" spec 100 | then abort 101 | "The values in sources.json should not have an 'outPath' attribute" 102 | else 103 | let 104 | host = if (name == "nixpkgs") then "custom_nixpkgs" else name; 105 | tryFromPath = builtins.tryEval (builtins.findFile builtins.nixPath host); 106 | defaultSpec = (if builtins.hasAttr "url" spec && builtins.hasAttr "sha256" spec 107 | then spec // 108 | { outPath = callFunctionWith spec (getFetcher spec) { }; } 109 | else spec) // (if tryFromPath.success 110 | then let path = tryFromPath.value; 111 | in { 112 | outPath = builtins.trace "using search host <${host}>" ( 113 | if pkgs.lib.hasPrefix "/nix/store" (builtins.toString path) 114 | then path else gitignoreSource path); 115 | } 116 | else {}); 117 | in if builtins.hasAttr "rev" spec && builtins.hasAttr "url" spec then 118 | defaultSpec // 119 | { revOverride = rev: if (rev == null) then defaultSpec else 120 | let 121 | spec' = removeAttrs (spec // { 122 | rev = rev; 123 | url = builtins.replaceStrings [defaultSpec.rev] [rev] defaultSpec.url; 124 | }) [ "sha256" ]; 125 | in 126 | spec' // 127 | { outPath = callFunctionWith spec' (getFetcher spec') { }; }; 128 | } 129 | else defaultSpec 130 | ) sources 131 | -------------------------------------------------------------------------------- /example/overlays/default.nix: -------------------------------------------------------------------------------- 1 | sourcePaths: 2 | # overlays from ops-lib (include ops-lib sourcePaths): 3 | (import sourcePaths.ops-lib {}).overlays 4 | # our own overlays: 5 | ++ map import (import ./overlay-list.nix) 6 | # merge upstream sourcePaths with our own: 7 | ++ [( _: super: { sourcePaths = if (super ? sourcePaths) then super.sourcePaths // sourcePaths else sourcePaths ;})] 8 | -------------------------------------------------------------------------------- /example/overlays/overlay-list.nix: -------------------------------------------------------------------------------- 1 | [ 2 | ../globals.nix 3 | ] 4 | -------------------------------------------------------------------------------- /example/release.nix: -------------------------------------------------------------------------------- 1 | ############################################################################ 2 | # 3 | # Hydra release jobset. 4 | # 5 | # The purpose of this file is to select jobs defined in default.nix and map 6 | # them to all supported build platforms. 7 | # 8 | ############################################################################ 9 | 10 | # The project sources 11 | { ops-lib-example ? { outPath = ./.; rev = "abcdef"; } 12 | 13 | # Function arguments to pass to the project 14 | , projectArgs ? { config = { allowUnfree = false; inHydra = true; }; } 15 | 16 | # The systems that the jobset will be built for. 17 | , supportedSystems ? [ "x86_64-linux" "x86_64-darwin" ] 18 | 19 | # The systems used for cross-compiling 20 | , supportedCrossSystems ? [ "x86_64-linux" ] 21 | 22 | # A Hydra option 23 | , scrubJobs ? true 24 | 25 | # Import pkgs 26 | , pkgs ? import ./nix {} 27 | }: 28 | 29 | with import ((import pkgs.sourcePaths.iohk-nix) {}).release-lib { 30 | inherit pkgs; 31 | 32 | inherit supportedSystems supportedCrossSystems scrubJobs projectArgs; 33 | packageSet = import ops-lib; 34 | gitrev = ops-lib.rev; 35 | }; 36 | 37 | with pkgs.lib; 38 | 39 | let 40 | jobs = { 41 | native = mapTestOn (packagePlatforms project); 42 | } 43 | // { 44 | # This aggregate job is what IOHK Hydra uses to update 45 | # the CI status in GitHub. 46 | required = mkRequiredJob ( 47 | # project executables: 48 | [ jobs.native.nixops.x86_64-linux 49 | jobs.native.nginxStable.x86_64-linux 50 | jobs.native.nginxMainline.x86_64-linux 51 | ] 52 | ); 53 | } 54 | # Build the shell derivation in Hydra so that all its dependencies 55 | # are cached. 56 | // mapTestOn (packagePlatforms { inherit (project) shell; }); 57 | 58 | in jobs 59 | -------------------------------------------------------------------------------- /example/shell.nix: -------------------------------------------------------------------------------- 1 | (import ./. {}).shell 2 | -------------------------------------------------------------------------------- /example/static/dead-mans-snitch.nix: -------------------------------------------------------------------------------- 1 | { 2 | pingUrl = ""; 3 | } 4 | -------------------------------------------------------------------------------- /example/static/default.nix: -------------------------------------------------------------------------------- 1 | { 2 | deadMansSnitch = import ./dead-mans-snitch.nix; 3 | grafanaCreds = import ./grafana-creds.nix; 4 | graylogCreds = import ./graylog-creds.nix; 5 | oauth = import ./oauth.nix; 6 | pagerDuty = import ./pager-duty.nix; 7 | } 8 | -------------------------------------------------------------------------------- /example/static/grafana-creds.nix: -------------------------------------------------------------------------------- 1 | { 2 | user = "root"; 3 | password = ""; 4 | } 5 | -------------------------------------------------------------------------------- /example/static/oauth.nix: -------------------------------------------------------------------------------- 1 | { 2 | clientID = ""; 3 | clientSecret = ""; 4 | cookie.secret = ""; 5 | emailDomain = "iohk.io"; 6 | } 7 | -------------------------------------------------------------------------------- /example/static/pager-duty.nix: -------------------------------------------------------------------------------- 1 | { 2 | serviceKey = null; 3 | } 4 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "nixpkgs": { 4 | "locked": { 5 | "lastModified": 1611878570, 6 | "narHash": "sha256-BnAaAtUZJp2mvBawi/Rulp74cDo46JfalaB1Svg87ik=", 7 | "owner": "NixOS", 8 | "repo": "nixpkgs", 9 | "rev": "5a1c008bae84cba12d75662d1154077fa311cd5e", 10 | "type": "github" 11 | }, 12 | "original": { 13 | "id": "nixpkgs", 14 | "type": "indirect" 15 | } 16 | }, 17 | "root": { 18 | "inputs": { 19 | "nixpkgs": "nixpkgs" 20 | } 21 | } 22 | }, 23 | "root": "root", 24 | "version": 7 25 | } 26 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "Utilities for ops and packaging nix"; 3 | 4 | outputs = { self, nixpkgs }: let 5 | 6 | # This is meant to make it easy to create hydraJobs with flakes 7 | # the two issues with numtide-flakeutils, is that it will create the packages 8 | # at the wrong attr path. E.g. hydraJobs.x86_64-linux.package. And the 9 | # other issue is that it also makes it hard to scope the jobs to 10 | # only as subset of platforms. 11 | # 12 | # Example usage: 13 | # 14 | # let 15 | # pkgsForSystem = system: (import nixpkgs) { inherit system; overlays = [ 16 | # (import ./nix/overlay.nix) 17 | # ];}; 18 | # hydraUtils = ops-lib.lib.mkHydraUtils pkgsForSystem; 19 | # inherit (hydraUtils) collectHydraSets mkHydraSet; 20 | # in (collectHydraSets [ 21 | # (mkHydraSet [ "mantis-docker" ] [ "x86_64-linux" ]) 22 | # (mkHydraSet [ "mantis" "mantis-explorer" ] [ "x86_64-linux" "x86_64-darwin" ]) 23 | # ]) 24 | # 25 | # This flake will evaluate to: 26 | # { hydraJobs = { 27 | # mantis.x86_64-linux = ; 28 | # mantis.x86_64-darwin = ; 29 | # mantis-explorer.x86_64-linux = ; 30 | # mantis-explorer.x86_64-darwin = ; 31 | # mantis-docker.x86_64-linux = ; 32 | # };} 33 | mkHydraUtils = mkPkgs: let 34 | inherit (nixpkgs) lib; 35 | # [attrset] -> attrset 36 | recursiveMerge = lib.foldr lib.recursiveUpdate {}; 37 | mkHydraJobsForSystem = attrs: system: 38 | recursiveMerge (map (n: { "${n}"."${system}" = (mkPkgs system)."${n}"; }) attrs); 39 | in { 40 | collectHydraSets = jobSets: { hydraJobs = recursiveMerge jobSets; }; 41 | mkHydraSet = attrs: systems: recursiveMerge (map (mkHydraJobsForSystem attrs) systems); 42 | }; 43 | in { 44 | lib = { inherit mkHydraUtils; }; 45 | }; 46 | } 47 | -------------------------------------------------------------------------------- /globals-deployers.nix: -------------------------------------------------------------------------------- 1 | self: super: { 2 | globals = rec { 3 | deploymentName = "deployers"; 4 | ec2 = { 5 | credentials = { 6 | accessKeyIds = { 7 | default = "root-account"; 8 | "IOHK" = "root-account"; 9 | }; 10 | }; 11 | }; 12 | }; 13 | } 14 | -------------------------------------------------------------------------------- /modules/aws.nix: -------------------------------------------------------------------------------- 1 | { lib, config, pkgs, ... }: 2 | with lib; 3 | with types; { 4 | options = { 5 | node = { 6 | ## 7 | ## Mandatory configuration: 8 | region = mkOption { 9 | type = enum [ 10 | "us-east-1" # US East (N. Virginia) 11 | "us-east-2" # US East (Ohio) 12 | "us-west-1" # US West (N. California) 13 | "us-west-2" # US West (Oregon) 14 | "ca-central-1" # Canada (Central) 15 | "eu-central-1" # EU (Frankfurt) 16 | "eu-west-1" # EU (Ireland) 17 | "eu-west-2" # EU (London) 18 | "eu-west-3" # EU (Paris) 19 | "eu-north-1" # EU (Stockholm) 20 | "ap-east-1" # Asia Pacific (Hong Kong) 21 | "ap-northeast-1" # Asia Pacific (Tokyo) 22 | "ap-northeast-2" # Asia Pacific (Seoul) 23 | "ap-northeast-3" # Asia Pacific (Osaka-Local) 24 | "ap-southeast-1" # Asia Pacific (Singapore) 25 | "ap-southeast-2" # Asia Pacific (Sydney) 26 | "ap-south-1" # Asia Pacific (Mumbai) 27 | "me-south-1" # Middle East (Bahrain) 28 | "sa-east-1" # South America (São Paulo) 29 | ]; 30 | description = "Region. Must be set (use deployments/config.nix)."; 31 | default = "eu-central-1"; 32 | }; 33 | 34 | org = lib.mkOption { 35 | type = lib.types.enum (lib.attrNames pkgs.globals.ec2.credentials.accessKeyIds); 36 | }; 37 | 38 | roles.isMonitor = lib.mkOption { 39 | type = lib.types.bool; 40 | default = false; 41 | }; 42 | 43 | roles.class = lib.mkOption { 44 | type = lib.types.str; 45 | }; 46 | 47 | accessKeyId = mkOption { 48 | type = str; 49 | description = "Access key ID."; 50 | default = null; 51 | }; 52 | 53 | fqdn = mkOption { 54 | type = nullOr str; 55 | description = "Node's FQDN."; 56 | default = null; 57 | }; 58 | 59 | cpus = mkOption { 60 | type = int; 61 | description = 62 | "Number of CPUs available (not counting hyper-threading)"; 63 | }; 64 | 65 | memory = mkOption { 66 | type = int; 67 | description = 68 | "main memory size in GB"; 69 | }; 70 | 71 | ## 72 | ## Non-mandatory configuration: 73 | instanceType = mkOption { 74 | type = str; 75 | description = "Instance type."; 76 | default = "t3a.medium"; 77 | }; 78 | 79 | spotInstancePrice = mkOption { 80 | type = int; 81 | description = 82 | "Price (in dollar cents per hour) to use for spot instances request for the machine. If the value is equal to 0 (default), then spot instances are not used."; 83 | default = config.cluster.spotInstancePrice; 84 | }; 85 | 86 | allocateElasticIP = mkOption { 87 | type = bool; 88 | description = 89 | "Node-specific EIP allocation override. You must provide -ip."; 90 | default = config.cluster.allocateElasticIP; 91 | }; 92 | }; 93 | 94 | cluster = { 95 | ## 96 | ## Mandatory configuration: 97 | name = mkOption { 98 | type = str; 99 | description = "Name of the cluster instance"; 100 | default = null; 101 | }; 102 | 103 | deployerIP = mkOption { 104 | type = str; 105 | description = 106 | "Deployer machine IP. Must be set (use deployments/config.nix)."; 107 | default = null; 108 | }; 109 | 110 | generateLetsEncryptCert = mkOption { 111 | type = bool; 112 | description = 113 | "Use let's encrypt to generate a proper TLS certificate."; 114 | default = false; 115 | }; 116 | 117 | tlsCert = mkOption { 118 | type = nullOr path; 119 | description = 120 | "Custom TLS cert. Will use ACME to generate one if null"; 121 | default = null; 122 | }; 123 | 124 | tlsCertKey = mkOption { 125 | type = nullOr path; 126 | description = 127 | "Custom TLS cert dir key. Will use ACME to generate one if null"; 128 | default = null; 129 | }; 130 | 131 | ## 132 | ## Non-mandatory configuration: 133 | toplevelDomain = mkOption { 134 | type = nullOr str; 135 | description = "Top level domain. 'null' for no DNS record."; 136 | default = if config.cluster.hostedZone != null then 137 | config.cluster.hostedZone 138 | else 139 | "iohk"; 140 | }; 141 | 142 | hostedZone = mkOption { 143 | type = nullOr str; 144 | description = "Hosted zone"; 145 | default = null; 146 | }; 147 | 148 | allocateElasticIP = mkOption { 149 | type = bool; 150 | description = "Cluster-wide EIP allocation policy."; 151 | default = false; 152 | }; 153 | 154 | spotInstancePrice = mkOption { 155 | type = int; 156 | description = 157 | "Price (in dollar cents per hour) to use for spot instances request for the machine. If the value is equal to 0 (default), then spot instances are not used."; 158 | default = 0; 159 | }; 160 | 161 | subDomain = mkOption { 162 | type = nullOr str; 163 | description = "Subdomain to use. Defaults to `cluster.name`."; 164 | default = config.cluster.name; 165 | }; 166 | 167 | oauthEnable = mkOption { 168 | type = bool; 169 | description = "Configure oauth proxy."; 170 | default = true; 171 | }; 172 | }; 173 | 174 | hydra.s3Bucket = mkOption { 175 | type = nullOr str; 176 | description = 177 | "Specify a bucket name to use an existing bucket to upload docker images to. If set to null (default) a bucket will be created."; 178 | default = null; 179 | }; 180 | }; 181 | } 182 | -------------------------------------------------------------------------------- /modules/common.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, name, config, ... }: 2 | let 3 | inherit (pkgs.iohk-ops-lib.ssh-keys) allKeysFrom devOps; 4 | devOpsKeys = allKeysFrom devOps; 5 | in { 6 | 7 | imports = [ ./aws.nix ./monitoring-exporters.nix ./oauth.nix ]; 8 | options = { 9 | local.commonGivesVim = lib.mkOption { 10 | default = true; 11 | type = lib.types.bool; 12 | description = "allows making common.nix not install a vim"; 13 | }; 14 | }; 15 | config = { 16 | networking.hostName = name; 17 | 18 | environment.systemPackages = with pkgs; [ 19 | bat 20 | git 21 | graphviz 22 | htop 23 | iptables 24 | jq 25 | lsof 26 | mosh 27 | ncdu 28 | sysstat 29 | tcpdump 30 | tig 31 | tree 32 | di 33 | fd 34 | file 35 | ripgrep 36 | ] ++ (lib.optional config.local.commonGivesVim vim); 37 | 38 | users.mutableUsers = false; 39 | users.users.root.openssh.authorizedKeys.keys = devOpsKeys; 40 | 41 | services = { 42 | monitoring-exporters.graylogHost = 43 | if config.deployment.targetEnv == "ec2" 44 | then "monitoring-ip:5044" 45 | else "monitoring:5044"; 46 | 47 | openssh = { 48 | authorizedKeysFiles = lib.mkForce [ "/etc/ssh/authorized_keys.d/%u" ]; 49 | extraConfig = lib.mkOrder 9999 '' 50 | Match User root 51 | AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2 /etc/ssh/authorized_keys.d/%u 52 | ''; 53 | settings.PasswordAuthentication = false; 54 | }; 55 | 56 | timesyncd.enable = true; 57 | cron.enable = true; 58 | }; 59 | 60 | nix = { 61 | # 2.19 is the latest version that works with recursive submodules of haskellNix 62 | # https://github.com/NixOS/nix/issues/10022 63 | package = pkgs.nixVersions.nix_2_19; 64 | 65 | # make sure we have enough build users 66 | nrBuildUsers = 32; 67 | 68 | # use nix sandboxing for greater determinism 69 | settings = { 70 | sandbox = true; 71 | 72 | # use all cores 73 | cores = 0; 74 | 75 | # use our hydra builds 76 | substituters = [ "https://cache.nixos.org" "https://cache.iog.io" "https://iohk.cachix.org" ]; 77 | trusted-public-keys = [ 78 | "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" 79 | "hydra.iohk.io:f/Ea+s+dFdN+3Y/G+FDgSq+a5NEWhJGzdjvKNGv0/EQ=" 80 | "iohk.cachix.org-1:DpRUyj7h7V830dp/i6Nti+NEO2/nhblbov/8MW7Rqoo=" 81 | ]; 82 | }; 83 | 84 | # if our hydra is down, don't wait forever 85 | extraOptions = '' 86 | connect-timeout = 10 87 | http2 = true 88 | show-trace = true 89 | narinfo-cache-negative-ttl = 0 90 | 91 | # Fetch-closure required by capkgs 92 | experimental-features = nix-command flakes fetch-closure auto-allocate-uids configurable-impure-env 93 | allow-import-from-derivation = true 94 | 95 | # To disable warnings on newer nix versions ~ >= 2.19 96 | auto-allocate-uids = false 97 | impure-env = 98 | ''; 99 | 100 | # nixpkgs path is created by 'extraSystemBuilderCmds' below: 101 | nixPath = [ "nixpkgs=/run/current-system/nixpkgs" ]; 102 | }; 103 | 104 | system.extraSystemBuilderCmds = '' 105 | ln -sv ${pkgs.path} $out/nixpkgs 106 | ''; 107 | 108 | # Mosh 109 | networking.firewall.allowedUDPPortRanges = [{ 110 | from = 60000; 111 | to = 61000; 112 | }]; 113 | 114 | programs = { 115 | mosh.enable = true; 116 | ssh.package = ( 117 | builtins.getFlake "github:nixos/nixpkgs/b9014df496d5b68bf7c0145d0e9b0f529ce4f2a8" 118 | ).legacyPackages.${pkgs.system}.openssh; 119 | }; 120 | }; 121 | } 122 | -------------------------------------------------------------------------------- /modules/default.nix: -------------------------------------------------------------------------------- 1 | { 2 | common = import ./common.nix; 3 | 4 | oauth = import ./oauth.nix; 5 | 6 | sentry = import ./sentry; 7 | 8 | clickhouse-custom = import ./clickhouse-custom.nix; 9 | } 10 | -------------------------------------------------------------------------------- /modules/deployer.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, name, config, ... }: 2 | let 3 | inherit (pkgs.iohk-ops-lib.ssh-keys) allKeysFrom devOps; 4 | devOpsKeys = allKeysFrom devOps; 5 | in { 6 | 7 | imports = [ ./common.nix ]; 8 | 9 | environment.systemPackages = with pkgs; [ 10 | (ruby.withPackages (ps: with ps; [ sequel pry sqlite3 nokogiri ])) 11 | screen 12 | sqlite-interactive 13 | tmux 14 | gnupg 15 | pinentry 16 | ]; 17 | 18 | programs.gnupg.agent = { 19 | enable = true; 20 | enableSSHSupport = true; 21 | }; 22 | 23 | programs.screen.screenrc = '' 24 | defscrollback 10000 25 | #caption always 26 | maptimeout 5 27 | escape ^aa # default 28 | autodetach on # default: on 29 | crlf off # default: off 30 | hardcopy_append on # default: off 31 | startup_message off # default: on 32 | vbell off # default: ??? 33 | defmonitor on 34 | defscrollback 1000 # default: 100 35 | silencewait 15 # default: 30 36 | shelltitle "Shell" 37 | hardstatus alwayslastline "%{b}[ %{B}%H %{b}][ %{w}%?%-Lw%?%{b}(%{W}%n*%f %t%?(%u)%?%{b})%{w}%?%+Lw%?%?%= %{b}][%{B} %Y-%m-%d %{W}%c %{b}]" 38 | sorendition gk #red on white 39 | bell "%C -> %n%f %t Bell!~" 40 | pow_detach_msg "BYE" 41 | vbell_msg " *beep* " 42 | bind . 43 | bind ^\ 44 | bind \\ 45 | bind e mapdefault 46 | msgwait 2 47 | ''; 48 | } 49 | -------------------------------------------------------------------------------- /modules/gen-graylog-creds.nix: -------------------------------------------------------------------------------- 1 | { user ? null, password ? null }: 2 | 3 | let pkgs = import (import ../fetch-nixpkgs.nix) { }; 4 | in pkgs.stdenv.mkDerivation { 5 | name = "gen-graylog-creds"; 6 | buildInputs = with pkgs; [ pwgen gnused ]; 7 | shellHook = '' 8 | clusterChar="96" # Default graylog cluster secret length 9 | clusterSecret="" # Var for the clusterSecret 10 | credsFilename="graylog-creds.nix" # Default graylog static filename 11 | defaultUser="root" # Default administrative user 12 | password="${toString password}" # password supplied by cli arg 13 | passwordChar="32" # Default graylog password length 14 | passwordHash="" # Sha256 hash of the plaintext password 15 | staticPath=${toString ../static} # Absolute path to the static dir 16 | user="${toString user}" # user supplied by cli arg 17 | 18 | if [[ -e "$staticPath/$credsFilename" ]]; then 19 | echo "File already exists: $staticPath/$credsFilename, aborting!" 20 | exit 1 21 | elif [[ -z $user ]]; then 22 | echo "User is empty -- setting to a default administrative user of $defaultUser" 23 | user=$defaultUser 24 | fi 25 | echo "Writing graylog creds for user $user..." 26 | if [[ -z $password ]]; then 27 | echo "Password is empty -- setting to a random alphanumeric password of length $passwordChar" 28 | password=$(pwgen $passwordChar 1) 29 | fi 30 | 31 | passwordHash=$(echo -n $password | sha256sum | sed -z 's/ -\n//g') 32 | clusterSecret=$(pwgen $clusterChar 1) 33 | 34 | umask 077 35 | cd $path 36 | cat << EOF > $staticPath/$credsFilename 37 | { 38 | user = "$user"; 39 | password = "$password"; 40 | passwordHash = "$passwordHash"; 41 | clusterSecret = "$clusterSecret"; 42 | } 43 | EOF 44 | exit 0 45 | ''; 46 | } 47 | -------------------------------------------------------------------------------- /modules/grafana/generic/nginx-basic.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": "-- Grafana --", 7 | "enable": true, 8 | "hide": true, 9 | "iconColor": "rgba(0, 211, 255, 1)", 10 | "name": "Annotations & Alerts", 11 | "type": "dashboard" 12 | } 13 | ] 14 | }, 15 | "editable": true, 16 | "gnetId": null, 17 | "graphTooltip": 0, 18 | "id": 6, 19 | "iteration": 1575394279348, 20 | "links": [], 21 | "panels": [ 22 | { 23 | "aliasColors": {}, 24 | "bars": false, 25 | "dashLength": 10, 26 | "dashes": false, 27 | "datasource": "prometheus", 28 | "fill": 0, 29 | "fillGradient": 0, 30 | "gridPos": { 31 | "h": 6, 32 | "w": 12, 33 | "x": 0, 34 | "y": 0 35 | }, 36 | "id": 2, 37 | "legend": { 38 | "avg": false, 39 | "current": false, 40 | "max": false, 41 | "min": false, 42 | "show": true, 43 | "total": false, 44 | "values": false 45 | }, 46 | "lines": true, 47 | "linewidth": 1, 48 | "links": [], 49 | "nullPointMode": "null", 50 | "options": { 51 | "dataLinks": [] 52 | }, 53 | "percentage": false, 54 | "pointradius": 2, 55 | "points": false, 56 | "renderer": "flot", 57 | "seriesOverrides": [], 58 | "spaceLength": 10, 59 | "stack": false, 60 | "steppedLine": false, 61 | "targets": [ 62 | { 63 | "expr": "rate(nginx_vts_server_requests_total{code=\"2xx\",instance=~\"$node:9113\",host=\"$nginx_host\"}[1m])", 64 | "format": "time_series", 65 | "intervalFactor": 1, 66 | "refId": "B" 67 | } 68 | ], 69 | "thresholds": [], 70 | "timeFrom": null, 71 | "timeRegions": [], 72 | "timeShift": null, 73 | "title": "2xx", 74 | "tooltip": { 75 | "shared": true, 76 | "sort": 0, 77 | "value_type": "individual" 78 | }, 79 | "type": "graph", 80 | "xaxis": { 81 | "buckets": null, 82 | "mode": "time", 83 | "name": null, 84 | "show": true, 85 | "values": [] 86 | }, 87 | "yaxes": [ 88 | { 89 | "format": "short", 90 | "label": null, 91 | "logBase": 1, 92 | "max": null, 93 | "min": null, 94 | "show": true 95 | }, 96 | { 97 | "format": "short", 98 | "label": null, 99 | "logBase": 1, 100 | "max": null, 101 | "min": null, 102 | "show": true 103 | } 104 | ], 105 | "yaxis": { 106 | "align": false, 107 | "alignLevel": null 108 | } 109 | }, 110 | { 111 | "aliasColors": {}, 112 | "bars": false, 113 | "dashLength": 10, 114 | "dashes": false, 115 | "datasource": "prometheus", 116 | "fill": 0, 117 | "fillGradient": 0, 118 | "gridPos": { 119 | "h": 6, 120 | "w": 12, 121 | "x": 12, 122 | "y": 0 123 | }, 124 | "id": 4, 125 | "legend": { 126 | "avg": false, 127 | "current": false, 128 | "max": false, 129 | "min": false, 130 | "show": true, 131 | "total": false, 132 | "values": false 133 | }, 134 | "lines": true, 135 | "linewidth": 1, 136 | "links": [], 137 | "nullPointMode": "null", 138 | "options": { 139 | "dataLinks": [] 140 | }, 141 | "percentage": false, 142 | "pointradius": 2, 143 | "points": false, 144 | "renderer": "flot", 145 | "seriesOverrides": [], 146 | "spaceLength": 10, 147 | "stack": false, 148 | "steppedLine": false, 149 | "targets": [ 150 | { 151 | "expr": "rate(nginx_vts_server_requests_total{code=\"4xx\",instance=~\"$node:9113\",host=\"$nginx_host\"}[1m])", 152 | "format": "time_series", 153 | "intervalFactor": 1, 154 | "refId": "A" 155 | } 156 | ], 157 | "thresholds": [], 158 | "timeFrom": null, 159 | "timeRegions": [], 160 | "timeShift": null, 161 | "title": "4xx", 162 | "tooltip": { 163 | "shared": true, 164 | "sort": 0, 165 | "value_type": "individual" 166 | }, 167 | "type": "graph", 168 | "xaxis": { 169 | "buckets": null, 170 | "mode": "time", 171 | "name": null, 172 | "show": true, 173 | "values": [] 174 | }, 175 | "yaxes": [ 176 | { 177 | "format": "short", 178 | "label": null, 179 | "logBase": 1, 180 | "max": null, 181 | "min": null, 182 | "show": true 183 | }, 184 | { 185 | "format": "short", 186 | "label": null, 187 | "logBase": 1, 188 | "max": null, 189 | "min": null, 190 | "show": true 191 | } 192 | ], 193 | "yaxis": { 194 | "align": false, 195 | "alignLevel": null 196 | } 197 | }, 198 | { 199 | "aliasColors": {}, 200 | "bars": false, 201 | "dashLength": 10, 202 | "dashes": false, 203 | "datasource": "prometheus", 204 | "fill": 0, 205 | "fillGradient": 0, 206 | "gridPos": { 207 | "h": 6, 208 | "w": 12, 209 | "x": 0, 210 | "y": 6 211 | }, 212 | "id": 6, 213 | "legend": { 214 | "avg": false, 215 | "current": false, 216 | "max": false, 217 | "min": false, 218 | "show": true, 219 | "total": false, 220 | "values": false 221 | }, 222 | "lines": true, 223 | "linewidth": 1, 224 | "links": [], 225 | "nullPointMode": "null", 226 | "options": { 227 | "dataLinks": [] 228 | }, 229 | "percentage": false, 230 | "pointradius": 2, 231 | "points": false, 232 | "renderer": "flot", 233 | "seriesOverrides": [], 234 | "spaceLength": 10, 235 | "stack": false, 236 | "steppedLine": false, 237 | "targets": [ 238 | { 239 | "expr": "rate(nginx_vts_server_requests_total{code=\"5xx\",instance=~\"$node:9113\",host=\"$nginx_host\"}[1m])", 240 | "format": "time_series", 241 | "intervalFactor": 1, 242 | "refId": "C" 243 | } 244 | ], 245 | "thresholds": [], 246 | "timeFrom": null, 247 | "timeRegions": [], 248 | "timeShift": null, 249 | "title": "5xx", 250 | "tooltip": { 251 | "shared": true, 252 | "sort": 0, 253 | "value_type": "individual" 254 | }, 255 | "type": "graph", 256 | "xaxis": { 257 | "buckets": null, 258 | "mode": "time", 259 | "name": null, 260 | "show": true, 261 | "values": [] 262 | }, 263 | "yaxes": [ 264 | { 265 | "format": "short", 266 | "label": null, 267 | "logBase": 1, 268 | "max": null, 269 | "min": null, 270 | "show": true 271 | }, 272 | { 273 | "format": "short", 274 | "label": null, 275 | "logBase": 1, 276 | "max": null, 277 | "min": null, 278 | "show": true 279 | } 280 | ], 281 | "yaxis": { 282 | "align": false, 283 | "alignLevel": null 284 | } 285 | }, 286 | { 287 | "aliasColors": {}, 288 | "bars": false, 289 | "dashLength": 10, 290 | "dashes": false, 291 | "datasource": "prometheus", 292 | "fill": 0, 293 | "fillGradient": 0, 294 | "gridPos": { 295 | "h": 6, 296 | "w": 12, 297 | "x": 12, 298 | "y": 6 299 | }, 300 | "id": 8, 301 | "legend": { 302 | "avg": false, 303 | "current": false, 304 | "max": false, 305 | "min": false, 306 | "show": true, 307 | "total": false, 308 | "values": false 309 | }, 310 | "lines": true, 311 | "linewidth": 1, 312 | "links": [], 313 | "nullPointMode": "null", 314 | "options": { 315 | "dataLinks": [] 316 | }, 317 | "percentage": false, 318 | "pointradius": 2, 319 | "points": false, 320 | "renderer": "flot", 321 | "seriesOverrides": [], 322 | "spaceLength": 10, 323 | "stack": false, 324 | "steppedLine": false, 325 | "targets": [ 326 | { 327 | "expr": "nginx_vts_main_connections{status=\"active\",alias=\"$node\"}", 328 | "format": "time_series", 329 | "intervalFactor": 1, 330 | "refId": "A" 331 | } 332 | ], 333 | "thresholds": [], 334 | "timeFrom": null, 335 | "timeRegions": [], 336 | "timeShift": null, 337 | "title": "active connections", 338 | "tooltip": { 339 | "shared": true, 340 | "sort": 0, 341 | "value_type": "individual" 342 | }, 343 | "type": "graph", 344 | "xaxis": { 345 | "buckets": null, 346 | "mode": "time", 347 | "name": null, 348 | "show": true, 349 | "values": [] 350 | }, 351 | "yaxes": [ 352 | { 353 | "format": "short", 354 | "label": null, 355 | "logBase": 1, 356 | "max": null, 357 | "min": null, 358 | "show": true 359 | }, 360 | { 361 | "format": "short", 362 | "label": null, 363 | "logBase": 1, 364 | "max": null, 365 | "min": null, 366 | "show": true 367 | } 368 | ], 369 | "yaxis": { 370 | "align": false, 371 | "alignLevel": null 372 | } 373 | }, 374 | { 375 | "aliasColors": {}, 376 | "bars": false, 377 | "dashLength": 10, 378 | "dashes": false, 379 | "datasource": "prometheus", 380 | "fill": 0, 381 | "fillGradient": 0, 382 | "gridPos": { 383 | "h": 8, 384 | "w": 12, 385 | "x": 0, 386 | "y": 12 387 | }, 388 | "id": 10, 389 | "legend": { 390 | "avg": false, 391 | "current": false, 392 | "max": false, 393 | "min": false, 394 | "show": true, 395 | "total": false, 396 | "values": false 397 | }, 398 | "lines": true, 399 | "linewidth": 1, 400 | "links": [], 401 | "nullPointMode": "null", 402 | "options": { 403 | "dataLinks": [] 404 | }, 405 | "percentage": false, 406 | "pointradius": 2, 407 | "points": false, 408 | "renderer": "flot", 409 | "seriesOverrides": [], 410 | "spaceLength": 10, 411 | "stack": false, 412 | "steppedLine": false, 413 | "targets": [ 414 | { 415 | "expr": "cardano_current_epoch{alias=\"c-a-1.mainnet\"}", 416 | "format": "time_series", 417 | "intervalFactor": 1, 418 | "refId": "A" 419 | } 420 | ], 421 | "thresholds": [], 422 | "timeFrom": null, 423 | "timeRegions": [], 424 | "timeShift": null, 425 | "title": "current epoch", 426 | "tooltip": { 427 | "shared": true, 428 | "sort": 0, 429 | "value_type": "individual" 430 | }, 431 | "type": "graph", 432 | "xaxis": { 433 | "buckets": null, 434 | "mode": "time", 435 | "name": null, 436 | "show": true, 437 | "values": [] 438 | }, 439 | "yaxes": [ 440 | { 441 | "format": "short", 442 | "label": null, 443 | "logBase": 1, 444 | "max": null, 445 | "min": null, 446 | "show": true 447 | }, 448 | { 449 | "format": "short", 450 | "label": null, 451 | "logBase": 1, 452 | "max": null, 453 | "min": null, 454 | "show": true 455 | } 456 | ], 457 | "yaxis": { 458 | "align": false, 459 | "alignLevel": null 460 | } 461 | } 462 | ], 463 | "refresh": false, 464 | "schemaVersion": 20, 465 | "style": "dark", 466 | "tags": [], 467 | "templating": { 468 | "list": [ 469 | { 470 | "allValue": null, 471 | "current": { 472 | "text": "node", 473 | "value": "node" 474 | }, 475 | "datasource": "prometheus", 476 | "definition": "label_values(node_exporter_build_info, job)", 477 | "hide": 0, 478 | "includeAll": false, 479 | "label": "Job", 480 | "multi": false, 481 | "name": "job", 482 | "options": [], 483 | "query": "label_values(node_exporter_build_info, job)", 484 | "refresh": 1, 485 | "regex": "", 486 | "skipUrlSync": false, 487 | "sort": 0, 488 | "tagValuesQuery": "", 489 | "tags": [], 490 | "tagsQuery": "", 491 | "type": "query", 492 | "useTags": false 493 | }, 494 | { 495 | "allValue": null, 496 | "current": { 497 | "tags": [], 498 | "text": "explorer.mainnet", 499 | "value": "explorer.mainnet" 500 | }, 501 | "datasource": "prometheus", 502 | "definition": "label_values(node_exporter_build_info{job=~\"$job\"}, instance)", 503 | "hide": 0, 504 | "includeAll": false, 505 | "label": "Host:", 506 | "multi": false, 507 | "name": "node", 508 | "options": [], 509 | "query": "label_values(node_exporter_build_info{job=~\"$job\"}, instance)", 510 | "refresh": 1, 511 | "regex": "/([^:]+):.*/", 512 | "skipUrlSync": false, 513 | "sort": 0, 514 | "tagValuesQuery": "", 515 | "tags": [], 516 | "tagsQuery": "", 517 | "type": "query", 518 | "useTags": false 519 | }, 520 | { 521 | "allValue": null, 522 | "current": { 523 | "text": "cardano-explorer.cardano-mainnet.iohk.io", 524 | "value": "cardano-explorer.cardano-mainnet.iohk.io" 525 | }, 526 | "datasource": "prometheus", 527 | "definition": "label_values(nginx_vts_server_requests_total{instance=~\"$node:9113\"},host)", 528 | "hide": 0, 529 | "includeAll": false, 530 | "label": "Nginx host:", 531 | "multi": false, 532 | "name": "nginx_host", 533 | "options": [], 534 | "query": "label_values(nginx_vts_server_requests_total{instance=~\"$node:9113\"},host)", 535 | "refresh": 1, 536 | "regex": "", 537 | "skipUrlSync": false, 538 | "sort": 0, 539 | "tagValuesQuery": "", 540 | "tags": [], 541 | "tagsQuery": "", 542 | "type": "query", 543 | "useTags": false 544 | } 545 | ] 546 | }, 547 | "time": { 548 | "from": "now-6h", 549 | "to": "now" 550 | }, 551 | "timepicker": { 552 | "refresh_intervals": [ 553 | "5s", 554 | "10s", 555 | "30s", 556 | "1m", 557 | "5m", 558 | "15m", 559 | "30m", 560 | "1h", 561 | "2h", 562 | "1d" 563 | ], 564 | "time_options": [ 565 | "5m", 566 | "15m", 567 | "1h", 568 | "6h", 569 | "12h", 570 | "24h", 571 | "2d", 572 | "7d", 573 | "30d" 574 | ] 575 | }, 576 | "timezone": "", 577 | "title": "Nginx basic", 578 | "uid": "LoC4-GnWk", 579 | "version": 14 580 | } 581 | -------------------------------------------------------------------------------- /modules/graylog/graylogConfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "v": 1, 3 | "id": "51ad8f2f-36e6-4cf0-bb0d-0bef13271347", 4 | "rev": 1, 5 | "name": "monitorContentPack", 6 | "summary": "Cardano Graylog Monitoring Content Pack", 7 | "description": "", 8 | "vendor": "IOHK", 9 | "url": "", 10 | "parameters": [], 11 | "entities": [ 12 | { 13 | "v": "1", 14 | "type": { 15 | "name": "input", 16 | "version": "1" 17 | }, 18 | "id": "43b89315-29d2-47c3-b6cb-308980c3377d", 19 | "data": { 20 | "title": { 21 | "@type": "string", 22 | "@value": "Beats" 23 | }, 24 | "configuration": { 25 | "tls_key_file": { 26 | "@type": "string", 27 | "@value": "" 28 | }, 29 | "port": { 30 | "@type": "integer", 31 | "@value": 5044 32 | }, 33 | "tls_enable": { 34 | "@type": "boolean", 35 | "@value": false 36 | }, 37 | "recv_buffer_size": { 38 | "@type": "integer", 39 | "@value": 1048576 40 | }, 41 | "tcp_keepalive": { 42 | "@type": "boolean", 43 | "@value": false 44 | }, 45 | "tls_client_auth_cert_file": { 46 | "@type": "string", 47 | "@value": "" 48 | }, 49 | "bind_address": { 50 | "@type": "string", 51 | "@value": "0.0.0.0" 52 | }, 53 | "no_beats_prefix": { 54 | "@type": "boolean", 55 | "@value": false 56 | }, 57 | "tls_cert_file": { 58 | "@type": "string", 59 | "@value": "" 60 | }, 61 | "tls_client_auth": { 62 | "@type": "string", 63 | "@value": "disabled" 64 | }, 65 | "number_worker_threads": { 66 | "@type": "integer", 67 | "@value": 2 68 | }, 69 | "tls_key_password": { 70 | "@type": "string", 71 | "@value": "" 72 | } 73 | }, 74 | "static_fields": {}, 75 | "type": { 76 | "@type": "string", 77 | "@value": "org.graylog.plugins.beats.Beats2Input" 78 | }, 79 | "global": { 80 | "@type": "boolean", 81 | "@value": true 82 | }, 83 | "extractors": [] 84 | }, 85 | "constraints": [ 86 | { 87 | "type": "server-version", 88 | "version": ">=3.0.1+de74b68" 89 | } 90 | ] 91 | } 92 | ] 93 | } 94 | -------------------------------------------------------------------------------- /modules/graylog/graylogPreload.sh: -------------------------------------------------------------------------------- 1 | # Shell shebang prepended by nix 2 | set -e 3 | 4 | # General Definitions 5 | installSuccess="/var/lib/graylog/.graylogConfigured" 6 | installLog="/var/lib/graylog/.graylogConfigured.log" 7 | 8 | # Content Pack Definitions 9 | contentPack="" # Content pack file, passed as arg 2 10 | cpName="" # Must parse to "monitorContentPack" for proper script logic 11 | cpId="" # Content pack name as parsed from the content pack file 12 | cpVer="" # Content pack version as parsed from the content pack file 13 | cpRev="" # Content pack revision as parsed from the content pack file 14 | cpComment="monitorContentPack" # Must remain set to "monitorContentPack" for proper script logic 15 | cpVendor="IOHK" # Must remain set to "IOHK" for proper script logic 16 | 17 | # Curl Definitions 18 | user="@user@" 19 | password="@password@" 20 | graylogApiUrl="http://localhost:9000/api" 21 | curlH="curl -s -w \"\\\\napiRc: %{http_code}\" -u $user:$password -H 'X-Requested-By: $user' $graylogApiUrl" 22 | jsonH="-H 'Content-Type: application/json'" 23 | jsonComment="-d '{ \"comment\": \"$cpComment\" }'" 24 | jsonData="" # Set dynamically based on the contentPack variable 25 | 26 | # Other globals, used dynamically 27 | cmd="" # Command string used dynamically for evaluations 28 | rsp="" # Generic API body response plus return code 29 | body="" # Parsed JSON response body from last API call 30 | code="" # HTTP(S) code response from last API call 31 | flagScriptInstalled="false" # Flag to show if the success file shows the content pack is installed 32 | flagApiLoaded="false" # Flag to show if the Api indicates the content pack is loaded 33 | flagApiInstalled="false" # Flag to show if the Api indicates the content pack is installed 34 | installTotal="" # Content pack installed quantity, integer 35 | installIds="" # Content pack install _id list, \n separated 36 | preCpName="" # Pre-existing default monitoring content pack name, if any 37 | preCpId="" # Pre-existing default monitoring content pack id, if any 38 | preCpVer="" # Pre-existing default monitoring content pack version, if any 39 | preCpRev="" # Pre-existing default monitoring content pack revision, if any 40 | loadedCpNames="" # Array with the loaded content pack names 41 | loadedCpVendors="" # Array with the loaded content pack vendors 42 | loadedCpIds="" # Array with the loaded content pack ids 43 | loadedCpVers="" # Array with the loaded content pack versions 44 | loadedCpRevs="" # Array with the loaded content pack revisions 45 | i="0" # Looping variable 46 | j="0" # Looping variable 47 | deleteCpId="" # Function parameter 48 | uninstallCpId="" # Function parameter 49 | uninstallTotal="" # Total installations to purge for a given content pack id 50 | 51 | # Functions 52 | usage () { 53 | echo -e "Usage:\\n" 54 | echo "graylogPreload.sh " 55 | echo " install = install default monitoring graylog content pack" 56 | echo " remove = remove default monitoring graylog content pack" 57 | echo " content_pack = path to content pack to install or remove" 58 | echo "" 59 | echo "Comments:" 60 | echo "This script will install or remove one content pack as specified" 61 | echo "by the content pack definition variables in the script and cli args." 62 | echo "If the specified content pack if not loaded and installed into" 63 | echo "graylog, this script will do so. If the content pack is already" 64 | echo "loaded into graylog, it will not reload or perform an additional" 65 | echo "installation. If the specified content pack is found to already" 66 | echo "be installed in graylog, but the version, revision, or id of the" 67 | echo "existing installation do not match the content spec defined in" 68 | echo "the script, the existing installation will be removed and" 69 | echo "replaced with the version defined in the script." 70 | echo "" 71 | echo "The script determines content pack installation status by" 72 | echo "searching for a default content pack name, comment and vendor of:" 73 | echo "name=\"monitorContentPack\", comment=\"monitorContentPack\", vendor=\"IOHK\"" 74 | echo "" 75 | echo "These key-value definitions must stay static, otherwise, the" 76 | echo "script logic will no longer work." 77 | exit 1 78 | } 79 | 80 | parseRsp () { 81 | rsp="$1" 82 | body="$(echo "$rsp" | head -n -1)" 83 | code="$(echo "$rsp" | grep apiRc | cut -f 2 -d ' ')" 84 | } 85 | 86 | apiCpLoadedCheck () { 87 | print "Checking for a loaded default monitoring graylog content pack reported by API based on name" 88 | cmd="$curlH/system/content_packs/" 89 | parseRsp "$(eval "$cmd")" 90 | if [[ $code != 200 ]]; then 91 | print "Failed to retrieve the loaded content packs by API, rc $code" 92 | return 1 93 | fi 94 | loadedTotal="$(echo "$body" | jq -r .total)" 95 | if [[ $loadedTotal == 0 ]]; then 96 | print "No content packs are loaded" 97 | return 0 98 | fi 99 | mapfile -t loadedCpNames < <(echo "$body" | jq -r '.content_packs[].name' ) 100 | mapfile -t loadedCpVendors < <(echo "$body" | jq -r '.content_packs[].vendor') 101 | mapfile -t loadedCpIds < <(echo "$body" | jq -r '.content_packs[].id') 102 | mapfile -t loadedCpVers < <(echo "$body" | jq -r '.content_packs[].v') 103 | mapfile -t loadedCpRevs < <(echo "$body" | jq -r '.content_packs[].rev') 104 | for (( i=0; i < loadedTotal; i++ )) do 105 | print " Evaluating content pack: \"${loadedCpNames[$i]}\"" 106 | if [[ ${loadedCpNames[$i]} == "$cpName" ]] && [[ ${loadedCpVendors[$i]} == "$cpVendor" ]]; then 107 | print " Found default: \"${loadedCpNames[$i]}\" Vendor: \"${loadedCpVendors[$i]}\" Id: \"${loadedCpIds[$i]}\" Ver: \"${loadedCpVers[$i]}\" Rev: \"${loadedCpRevs[$i]}\"" 108 | if [[ ${loadedCpIds[$i]} == "$cpId" ]] && [[ ${loadedCpVers[$i]} == "$cpVer" ]] && \ 109 | [[ ${loadedCpRevs[$i]} == "$cpRev" ]]; then 110 | print " The loaded default monitoring content pack is already the correct spec, Id: \"$cpId\", Ver: \"$cpVer\", Rev: \"$cpRev\"" 111 | flagApiLoaded="true" 112 | else 113 | print " The loaded default monitoring content pack is not the expected:" 114 | print " Expected Id: \"$cpId\" Ver: \"$cpVer\" Rev: \"$cpRev\"" 115 | print " Found Id: \"${loadedCpIds[$i]}\" Ver: \"${loadedCpVers[$i]}\" Rev: \"${loadedCpRevs[$i]}\"" 116 | print "" 117 | print "Removing old default monitoring content pack with Id: \"${loadedCpIds[$i]}\" Ver: \"${loadedCpVers[$i]}\" Rev: \"${loadedCpRevs[$i]}\"" 118 | if apiUninstall "${loadedCpIds[$i]}"; then 119 | apiDelete "${loadedCpIds[$i]}" || return 1 120 | else 121 | print "Failed to parse loaded content packs" 122 | return 1 123 | fi 124 | fi 125 | fi 126 | done 127 | print "" 128 | return 0 129 | } 130 | 131 | apiUpload () { 132 | print "Uploading the default graylog content pack..." 133 | cmd="$curlH/system/content_packs $jsonH $jsonData" 134 | parseRsp "$(eval "$cmd")" 135 | print " $body" 136 | print " ApiRC: $code" 137 | if [[ $body == *"already found"* ]] || [[ $code == 201 ]]; then 138 | return 0 139 | else 140 | return 1 141 | fi 142 | } 143 | 144 | apiInstallCheck () { 145 | print "Checking for an installed version of the specified content pack..." 146 | cmd="$curlH/system/content_packs/$cpId/installations" 147 | parseRsp "$(eval "$cmd")" 148 | installTotal="$(echo "$body" | jq -r .total)" 149 | if [[ $code != 200 ]]; then 150 | print "Failed to parse the existing installations for content pack id: $cpId" 151 | return 1 152 | elif [[ $installTotal == 0 ]]; then 153 | print "Existing installation not found for content pack id: $cpId" 154 | else 155 | mapfile -t installIds < <(echo "$body" | jq -r '.installations[]._id') 156 | if [[ $installTotal == 1 ]]; then 157 | print "Verified one default monitoring content pack installation:" 158 | print " Install id: \"${installIds[0]}\"" 159 | flagApiInstalled="true" 160 | else 161 | print "Multiple installations, $installTotal, found for the default monitoring content pack:" 162 | for (( i=0; i < installTotal; i++ )); do 163 | print " Install id: \"${installIds[$i]}\"" 164 | done 165 | print "Uninstalling the installations to ensure the proper install is the one in use" 166 | apiUninstall "$cpId" || return 1 167 | fi 168 | fi 169 | return 0 170 | } 171 | 172 | apiInstall () { 173 | print "Installing the default monitoring graylog content pack..." 174 | cmd="$curlH/system/content_packs/$cpId/$cpRev/installations $jsonH $jsonComment" 175 | parseRsp "$(eval "$cmd")" 176 | print " $body" 177 | print " ApiRC: $code" 178 | if [[ $code != 200 ]]; then 179 | return 1 180 | else 181 | echo "$cpName $cpId $cpVer $cpRev" > $installSuccess 182 | return 0 183 | fi 184 | } 185 | 186 | apiUninstall () { 187 | uninstallCpId="$1" 188 | print "Uninstalling content pack with Id: $uninstallCpId" 189 | cmd="$curlH/system/content_packs/$uninstallCpId/installations" 190 | parseRsp "$(eval "$cmd")" 191 | uninstallTotal="$(echo "$body" | jq -r .total)" 192 | if [[ $code != 200 ]]; then 193 | print "Error retrieving content pack installations for Id: $uninstallCpId" 194 | return 1 195 | elif [[ $uninstallTotal == 0 ]]; then 196 | print "No content pack installations for Id: $uninstallCpId, proceeding with deletion." 197 | else 198 | mapfile -t uninstallIds < <(echo "$body" | jq -r '.installations[]._id') 199 | for (( j=0; j < uninstallTotal; j++ )); do 200 | print "Uninstalling content pack Id: $uninstallCpId, install Id: ${uninstallIds[$j]}" 201 | cmd="$curlH/system/content_packs/$uninstallCpId/installations/${uninstallIds[$j]} -XDELETE" 202 | parseRsp "$(eval "$cmd")" 203 | print " $body" 204 | print " ApiRC: $code" 205 | if [[ $code != "200" ]]; then 206 | print "Failed uninstall of install Id: ${uninstallIds[$j]}" 207 | return 1 208 | fi 209 | done 210 | fi 211 | return 0 212 | } 213 | 214 | apiDelete () { 215 | deleteCpId="$1" 216 | print "Deleting the content pack with Id: $deleteCpId" 217 | cmd="$curlH/system/content_packs/$deleteCpId -XDELETE" 218 | parseRsp "$(eval "$cmd")" 219 | print " $body" 220 | print " ApiRC: $code" 221 | if [[ $code != "204" ]]; then 222 | print "Failed to delete content pack with Id: $deleteCpId" 223 | return 1 224 | fi 225 | return 0 226 | } 227 | 228 | print () { 229 | echo -e "$(date): $1" | tee -a $installLog 230 | } 231 | 232 | ### MAIN 233 | 234 | # Process command cli args and preload content pack required key values 235 | if [[ $# -ne 2 ]] || { [[ $1 != install ]] && [[ $1 != remove ]]; }; then 236 | usage 237 | elif ! [[ -r $2 ]]; then 238 | echo "Content pack file specified does not exist or cannot be read: \"$2\"" 239 | exit 1 240 | else 241 | contentPack="$2" 242 | # cpName must parse to "monitorContentPack" for proper script logic 243 | cpName="$(jq -e -r .name "$contentPack")" || { print "Failed to obtain cpName from file"; exit 1; } 244 | cpId="$(jq -e -r .id "$contentPack")" || { print "Failed to obtain cpId from file"; exit 1; } 245 | cpVer="$(jq -e -r .v "$contentPack")" || { print "Failed to obtain cpVer from file"; exit 1; } 246 | cpRev="$(jq -e -r .rev "$contentPack")" || { print "Failed to obtain cpRev from file"; exit 1; } 247 | jsonData="-d '@$contentPack'" 248 | print "Specified content pack to install or remove is:" 249 | print " Path: \"$contentPack\"" 250 | print " Name: \"$cpName\", Id: \"$cpId\", Ver: \"$cpVer\", Rev: \"$cpRev\"" 251 | print "" 252 | fi 253 | 254 | # Load any pre-existing state info 255 | print "Checking script history for a pre-existing installed default monitoring content pack..." 256 | if [[ -r $installSuccess ]]; then 257 | flagScriptInstalled="true" 258 | preCpName="$(cut -f 1 -d ' ' $installSuccess)" 259 | preCpId="$(cut -f 2 -d ' ' $installSuccess)" 260 | preCpVer="$(cut -f 3 -d ' ' $installSuccess)" 261 | preCpRev="$(cut -f 4 -d ' ' $installSuccess)" 262 | print " Found, Name: \"$preCpName\", Id: \"$preCpId\", Ver: \"$preCpVer\", Rev: \"$preCpRev\"" 263 | else 264 | print " No script history for a pre-existing default monitoring content pack was found" 265 | fi 266 | print "" 267 | 268 | # Install the content pack 269 | if [[ $1 == install ]]; then 270 | 271 | # Check for the proper definition of the default content monitoring pack 272 | if [[ $cpName != "monitorContentPack" ]] || [[ $cpComment != "monitorContentPack" ]]; then 273 | print "Warning: the new content pack or script config does not appear" 274 | print "to be valid as the content pack JSON name or comment is not as expected:" 275 | print "Expected content pack name and comment: \"monitorContentPack\"" 276 | print "Actual content pack name: \"$cpName\"" 277 | print "Script defined content pack comment: \"$cpComment\"" 278 | print "" 279 | print "The default monitoring content pack should utilize the expected name" 280 | print "and comment above so that this script can differentiate it from other" 281 | print "user applied content packs which should not be modified by" 282 | print "this script. Please correct the comment and re-run this script." 283 | exit 0 284 | fi 285 | 286 | # Parse the state info for a pre-existing default content pack installation 287 | if [[ $flagScriptInstalled == "true" ]] && [[ $preCpId == "$cpId" ]] && [[ $preCpVer == "$cpVer" ]] \ 288 | && [[ $preCpName == "$cpName" ]] && [[ $preCpRev == "$cpRev" ]]; then 289 | print "This script indicates this content pack was previously installed." 290 | print "If you wish to re-install, please uninstall the old pack first" 291 | print "with the \"remove\" script argument." 292 | exit 0 293 | elif [[ $flagScriptInstalled == "true" ]] && [[ $preCpName == "$cpName" ]]; then 294 | print "A different version of the default monitoring content pack is installed:" 295 | print "Specified New Id: \"$cpId\", Ver: \"$cpVer\", Rev: \"$cpRev\"" 296 | print "Pre-existing Id: \"$preCpId\", Ver: \"$preCpVer\", Rev: \"$preCpRev\"" 297 | print "" 298 | fi 299 | 300 | # Process any loaded content packs for possible other version collisions 301 | # Remove other conflicting content packs as needed 302 | print "Checking graylog API responses for a pre-existing loaded default monitoring content pack based on name..." 303 | if ! apiCpLoadedCheck; then 304 | print "Failed to complete the install request" 305 | exit 1 306 | fi 307 | 308 | # Load the content pack if the correct pack is not already present 309 | if [[ $flagApiLoaded != "true" ]]; then 310 | if apiUpload; then 311 | print "Content pack upload succeeded or content pack already uploaded" 312 | else 313 | print "Content pack upload failed" 314 | exit 1 315 | fi 316 | fi 317 | print "" 318 | 319 | # Check for an existing install of the content pack by API 320 | print "Checking graylog API responses for a pre-existing installed default monitoring content pack..." 321 | if ! apiInstallCheck; then 322 | print "Failed to properly check for the correct installed content pack" 323 | exit 0 324 | fi 325 | 326 | # Install the content pack 327 | if [[ $flagApiInstalled != "true" ]]; then 328 | if apiInstall; then 329 | print "Default content pack installed\\n" 330 | print "Created success file indicator at $installSuccess" 331 | exit 0 332 | else 333 | print "Unable to install default content pack\\n" 334 | exit 1 335 | fi 336 | fi 337 | exit 0 338 | 339 | elif [[ $1 == remove ]]; then 340 | 341 | # Check if a previous install succeeded 342 | if ! [[ -e $installSuccess ]]; then 343 | print "This script indicates no content pack was previously installed" 344 | print "Trying to remove anyway...\\n" 345 | fi 346 | 347 | # Uninstall the content pack 348 | if ! apiUninstall "$cpId"; then 349 | print "Uninstalling failed\\n" 350 | exit 1 351 | fi 352 | 353 | # Delete the content pack 354 | if apiDelete "$cpId"; then 355 | print "The default monitoring content pack has been removed or is not present\\n" 356 | print "Deleting success file indicator at $installSuccess" 357 | rm -f $installSuccess 358 | exit 0 359 | else 360 | print "Unable to delete the default content pack\\n" 361 | exit 1 362 | fi 363 | fi 364 | -------------------------------------------------------------------------------- /modules/monitoring-exporters.nix: -------------------------------------------------------------------------------- 1 | { config, options, pkgs, lib, name, ... }: 2 | 3 | with lib; 4 | 5 | let 6 | cfg = config.services.monitoring-exporters; 7 | extraExporterOpts = { 8 | options = 9 | # We resuse options defined in services.prometheus.scrapeConfigs, in a somewhat hacky way... 10 | let 11 | scrapeConfigOpts = (builtins.head (builtins.head 12 | options.services.prometheus.scrapeConfigs.type.functor.wrapped.getSubModules).imports).options; 13 | in removeAttrs scrapeConfigOpts [ "static_configs" ] // { 14 | port = mkOption { 15 | type = types.int; 16 | default = 80; 17 | description = '' 18 | Monitoring target port. 19 | ''; 20 | }; 21 | labels = mkOption { 22 | type = types.attrsOf types.str; 23 | default = { }; 24 | description = '' 25 | Labels assigned to all metrics scraped from the targets. 26 | ''; 27 | }; 28 | }; 29 | }; 30 | 31 | in { 32 | 33 | options = { 34 | services.monitoring-exporters = { 35 | enable = mkOption { 36 | type = types.bool; 37 | default = true; 38 | description = '' 39 | Enable monitoring exporters. Metrics exporters are 40 | prometheus, statsd and nginx by default. Log exporting is 41 | available via journalbeat by default. 42 | Metrics export can be selectively disabled with the metrics option. 43 | Log export be selectively disabled with the logging option. 44 | ''; 45 | }; 46 | 47 | metrics = mkOption { 48 | type = types.bool; 49 | default = cfg.enable; 50 | description = '' 51 | Enable metrics exporters via prometheus, statsd 52 | and nginx. 53 | See also the corresponding metrics server option in 54 | the monitoring-services.nix module: 55 | config.services.monitoring-services.metrics 56 | ''; 57 | }; 58 | 59 | statsdPort = mkOption { 60 | type = types.int; 61 | default = 8125; 62 | description = '' 63 | Local statsd listenning port. 64 | ''; 65 | }; 66 | 67 | extraPrometheusExportersPorts = mkOption { 68 | type = types.listOf types.int; 69 | default = [ ]; 70 | apply = list: [ 9100 9102 ] ++ list; 71 | description = '' 72 | Ports of application specific prometheus exporters. 73 | ''; 74 | }; 75 | 76 | extraPrometheusExporters = mkOption { 77 | default = [ ]; 78 | type = with types; listOf (submodule extraExporterOpts); 79 | apply = exporters: 80 | lib.optional (config.services.nginx.enable) { 81 | job_name = "nginx"; 82 | scrape_interval = "5s"; 83 | metrics_path = "/status/format/prometheus"; 84 | port = 9113; 85 | } ++ lib.optional (config.services.varnish.enable) { 86 | job_name = "varnish"; 87 | scrape_interval = "5s"; 88 | port = 9131; 89 | } ++ map (port: { 90 | job_name = "node"; 91 | scrape_interval = "10s"; 92 | inherit port; 93 | }) cfg.extraPrometheusExportersPorts ++ exporters; 94 | description = '' 95 | A list of additional scrape configurations for this host. 96 | ''; 97 | example = literalExample '' 98 | [ 99 | { 100 | job_name = "netdata"; 101 | scrape_interval = "60s"; 102 | metrics_path = "/api/v1/allmetrics?format=prometheus"; 103 | port = globals.netdataExporterPort; 104 | }; 105 | ] 106 | ''; 107 | }; 108 | 109 | logging = mkOption { 110 | type = types.bool; 111 | default = cfg.enable; 112 | description = '' 113 | Enable logging exporter via journalbeat to graylog. 114 | See also the corresponding logging server option in 115 | the monitoring-services.nix module: 116 | config.services.monitoring-services.logging 117 | ''; 118 | }; 119 | 120 | graylogHost = mkOption { 121 | type = types.str; 122 | example = "graylog:5044"; 123 | description = '' 124 | The host port under which Graylog is externally reachable. 125 | ''; 126 | }; 127 | 128 | papertrail.enable = mkOption { 129 | type = types.bool; 130 | default = false; 131 | description = '' 132 | Enable papertrail. 133 | ''; 134 | }; 135 | 136 | ownIp = mkOption { 137 | type = types.str; 138 | description = 139 | "the address a remote prometheus node will use to contact this machine"; 140 | }; 141 | }; 142 | }; 143 | 144 | config = mkIf cfg.enable (mkMerge [ 145 | (mkIf (config.services.nginx.enable && cfg.metrics) { 146 | services.nginx = { 147 | appendHttpConfig = '' 148 | vhost_traffic_status_zone; 149 | server { 150 | listen 9113; 151 | location /status { 152 | vhost_traffic_status_display; 153 | vhost_traffic_status_display_format html; 154 | } 155 | } 156 | ''; 157 | }; 158 | networking.firewall.allowedTCPPorts = [ 9113 ]; 159 | }) 160 | 161 | (mkIf (config.services.varnish.enable && cfg.metrics) { 162 | services.prometheus.exporters.varnish = { 163 | enable = true; 164 | group = "varnish"; 165 | instance = "/var/spool/varnish/${name}"; 166 | }; 167 | networking.firewall.allowedTCPPorts = [ 9131 ]; 168 | }) 169 | 170 | (mkIf cfg.metrics { 171 | systemd.services."statd-exporter" = { 172 | wantedBy = [ "multi-user.target" ]; 173 | requires = [ "network.target" ]; 174 | after = [ "network.target" ]; 175 | serviceConfig.ExecStart = 176 | "${pkgs.prometheus-statsd-exporter}/bin/statsd_exporter --statsd.listen-udp=:${ 177 | toString cfg.statsdPort 178 | } --web.listen-address=:9102"; 179 | }; 180 | 181 | services = { 182 | prometheus.exporters.node = { 183 | enable = true; 184 | enabledCollectors = [ 185 | "systemd" 186 | "tcpstat" 187 | "conntrack" 188 | "diskstats" 189 | "entropy" 190 | "filefd" 191 | "filesystem" 192 | "loadavg" 193 | "meminfo" 194 | "netdev" 195 | "netstat" 196 | "stat" 197 | "time" 198 | "timex" 199 | "vmstat" 200 | "logind" 201 | "interrupts" 202 | "ksmd" 203 | "processes" 204 | ]; 205 | }; 206 | }; 207 | networking.firewall.allowedTCPPorts = 208 | map (e: e.port) cfg.extraPrometheusExporters; 209 | }) 210 | 211 | (mkIf cfg.logging { 212 | services.journalbeat = { 213 | enable = false; 214 | package = pkgs.journalbeat7; 215 | extraConfig = '' 216 | journalbeat: 217 | seek_position: cursor 218 | cursor_seek_fallback: tail 219 | write_cursor_state: true 220 | cursor_flush_period: 5s 221 | clean_field_names: true 222 | convert_to_numbers: false 223 | move_metadata_to_field: journal 224 | default_type: journal 225 | output.logstash: 226 | hosts: ["${cfg.graylogHost}"] 227 | journalbeat.inputs: 228 | - paths: 229 | - "/var/log/journal/" 230 | ''; 231 | }; 232 | }) 233 | 234 | (mkIf cfg.papertrail.enable { 235 | systemd.services.papertrail = { 236 | description = "Papertrail.com log aggregation"; 237 | after = [ "network.target" ]; 238 | wantedBy = [ "multi-user.target" ]; 239 | script = '' 240 | ${pkgs.systemd}/bin/journalctl -f | ${pkgs.nmap}/bin/ncat --ssl logs5.papertrailapp.com 43689 241 | ''; 242 | serviceConfig = { 243 | Restart = "on-failure"; 244 | RestartSec = "5s"; 245 | TimeoutStartSec = 0; 246 | KillSignal = "SIGINT"; 247 | }; 248 | }; 249 | }) 250 | ]); 251 | } 252 | -------------------------------------------------------------------------------- /modules/nginx/cardano-large.svg: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /modules/nginx/cardano-small.svg: -------------------------------------------------------------------------------- 1 | F79794F5-5892-43CE-86BB-132FBA56084A -------------------------------------------------------------------------------- /modules/oauth.nix: -------------------------------------------------------------------------------- 1 | { config, options, pkgs, lib, name, ... }: 2 | 3 | with lib; 4 | 5 | let 6 | cfg = config.services.oauth2_proxy; 7 | in { 8 | options = { 9 | services.oauth2_proxy.nginx.config = mkOption { 10 | type = types.str; 11 | default = optionalString cfg.enable '' 12 | auth_request /oauth2/auth; 13 | error_page 401 = /oauth2/sign_in; 14 | 15 | # pass information via X-User and X-Email headers to backend, 16 | # requires running with --set-xauthrequest flag 17 | auth_request_set $user $upstream_http_x_auth_request_user; 18 | auth_request_set $email $upstream_http_x_auth_request_email; 19 | proxy_set_header X-User $user; 20 | proxy_set_header X-Email $email; 21 | 22 | # if you enabled --cookie-refresh, this is needed for it to work with auth_request 23 | auth_request_set $auth_cookie $upstream_http_set_cookie; 24 | add_header Set-Cookie $auth_cookie; 25 | ''; 26 | }; 27 | }; 28 | 29 | config = lib.mkIf cfg.enable { 30 | services = { 31 | oauth2_proxy = let staticConf = pkgs.globals.static.oauth or {}; in { 32 | setXauthrequest = mkDefault true; 33 | } // staticConf 34 | // (mapAttrsRecursive (_: mkDefault) (removeAttrs staticConf ["alphaConfig"])); 35 | }; 36 | users.users.oauth2_proxy.group = "oauth2_proxy"; 37 | users.groups.oauth2_proxy = {}; 38 | }; 39 | } 40 | -------------------------------------------------------------------------------- /modules/sentry/default.nix: -------------------------------------------------------------------------------- 1 | { 2 | imports = [ ./services/sentry.nix 3 | ./services/snuba.nix 4 | ./services/symbolicator.nix 5 | ]; 6 | } 7 | -------------------------------------------------------------------------------- /modules/sentry/services/snuba.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, ... }: 2 | let 3 | cfg = config.services.snuba; 4 | 5 | surround = prefix: suffix: x: if x != null then prefix + x + suffix else null; 6 | 7 | or_ = a: b: if a != null then a else b; 8 | in 9 | with lib; 10 | { 11 | 12 | options = { 13 | services.snuba = { 14 | enable = mkEnableOption "Run the snuba suite of services."; 15 | 16 | package = mkOption { 17 | type = types.package; 18 | default = pkgs.snuba; 19 | example = literalExample "pkgs.snuba"; 20 | description = '' 21 | Snuba package to use. 22 | ''; 23 | }; 24 | 25 | host = mkOption { 26 | type = types.str; 27 | default = "localhost"; 28 | description = '' 29 | Host Snuba should run on. 30 | ''; 31 | }; 32 | 33 | port = mkOption { 34 | type = types.int; 35 | default = 1218; 36 | description = '' 37 | Port snuba should run on. 38 | ''; 39 | }; 40 | 41 | redisHost = mkOption { 42 | type = types.str; 43 | default = "localhost"; 44 | description = '' 45 | Host Redis is running on. 46 | ''; 47 | }; 48 | 49 | redisPort = mkOption { 50 | type = types.int; 51 | default = config.services.redis.port; 52 | description = '' 53 | Port Redis is running on. 54 | ''; 55 | }; 56 | 57 | redisDb = mkOption { 58 | type = types.int; 59 | default = 1; 60 | description = '' 61 | Redis database to use. 62 | ''; 63 | }; 64 | 65 | redisPasswordFile = mkOption { 66 | type = with types; nullOr path; 67 | default = null; 68 | description = '' 69 | Password file for the redis database. 70 | ''; 71 | }; 72 | 73 | kafkaHost = mkOption { 74 | type = types.str; 75 | default = "localhost"; 76 | description = '' 77 | Host Kafka is running on. 78 | ''; 79 | }; 80 | 81 | kafkaPort = mkOption { 82 | type = types.int; 83 | default = config.services.apache-kafka.port; 84 | description = '' 85 | Port Kafka is running on. 86 | ''; 87 | }; 88 | 89 | clickhouseHost = mkOption { 90 | type = types.str; 91 | default = "localhost"; 92 | description = '' 93 | Host clickhouse is running on. 94 | ''; 95 | }; 96 | 97 | clickhouseClientPort = mkOption { 98 | type = types.int; 99 | default = 9000; 100 | description = '' 101 | Port clickhouse server is running on. Note that clickhouse 102 | server listens on multiple ports, this port is the port 103 | clickhouse client should use. 104 | ''; 105 | }; 106 | 107 | clickhouseHttpPort = mkOption { 108 | type = types.int; 109 | default = 8123; 110 | description = '' 111 | Port clickhouse HTTP server is running on. Note that 112 | clickhouse server listens on multiple ports, this port is the 113 | port for the HTTP server. 114 | ''; 115 | }; 116 | }; 117 | }; 118 | 119 | config = 120 | let 121 | snubaSettingsPy = pkgs.writeText "settings.py" '' 122 | import os 123 | from snuba.settings_base import * # NOQA 124 | 125 | env = os.environ.get 126 | 127 | def readPasswordFile(file): 128 | with open(file, 'r') as fd: 129 | return fd.read() 130 | 131 | HOST = "${cfg.host}" 132 | PORT = ${toString cfg.port} 133 | 134 | DEBUG = env("DEBUG", "0").lower() in ("1", "true") 135 | 136 | DEFAULT_BROKERS = "${cfg.kafkaHost}:${toString cfg.kafkaPort}".split(",") 137 | 138 | REDIS_HOST = "${cfg.redisHost}" 139 | REDIS_PORT = ${toString cfg.redisPort} 140 | REDIS_PASSWORD = "${or_ (surround "readPasswordFile(" ")" cfg.redisPasswordFile) ""}" 141 | REDIS_DB = ${toString cfg.redisDb} 142 | USE_REDIS_CLUSTER = False 143 | 144 | # Clickhouse Options 145 | CLICKHOUSE_HOST = "${cfg.clickhouseHost}" 146 | CLICKHOUSE_PORT = ${toString cfg.clickhouseClientPort} 147 | CLICKHOUSE_HTTP_PORT = ${toString cfg.clickhouseHttpPort} 148 | CLICKHOUSE_MAX_POOL_SIZE = 25 149 | 150 | # Dogstatsd Options 151 | DOGSTATSD_HOST = None 152 | DOGSTATSD_PORT = None 153 | ''; 154 | in mkIf cfg.enable { 155 | users.users.snuba = { 156 | name = "snuba"; 157 | group = "snuba"; 158 | description = "Snuba user"; 159 | }; 160 | 161 | users.groups.snuba = {}; 162 | 163 | environment.systemPackages = [ 164 | cfg.package 165 | ]; 166 | 167 | services.cron = { 168 | enable = true; 169 | systemCronJobs = [ 170 | "*/5 * * * * snuba SNUBA_SETTINGS=${snubaSettingsPy} snuba cleanup --dry-run False" 171 | ]; 172 | }; 173 | 174 | systemd.services = 175 | let 176 | common = { 177 | wantedBy = [ "multi-user.target" ]; 178 | requires = [ "snuba-init.service" ]; 179 | after = [ "network.target" "snuba-init.service" ]; 180 | 181 | serviceConfig = { 182 | Environment="SNUBA_SETTINGS=${snubaSettingsPy}"; 183 | User = "snuba"; 184 | Group = "snuba"; 185 | Restart="on-failure"; 186 | RestartSec="5s"; 187 | }; 188 | }; 189 | in { 190 | snuba-api = lib.recursiveUpdate common { 191 | description = "Snuba API"; 192 | serviceConfig.ExecStart = "${cfg.package}/bin/snuba api"; 193 | }; 194 | 195 | snuba-consumer = lib.recursiveUpdate common { 196 | description = "Snuba events consumer"; 197 | serviceConfig.ExecStart = "${cfg.package}/bin/snuba consumer --dataset events --auto-offset-reset=latest --max-batch-time-ms 750"; 198 | }; 199 | 200 | 201 | snuba-outcomes-consumer = lib.recursiveUpdate common { 202 | description = "Snuba outcomes consumer"; 203 | serviceConfig.ExecStart = "${cfg.package}/bin/snuba consumer --dataset outcomes --auto-offset-reset=earliest --max-batch-time-ms 750"; 204 | }; 205 | 206 | snuba-replacer = lib.recursiveUpdate common { 207 | description = "Snuba replacer"; 208 | serviceConfig.ExecStart = "${cfg.package}/bin/snuba replacer --auto-offset-reset=latest --max-batch-size 3"; 209 | }; 210 | 211 | snuba-init = { 212 | description = "Create Kafka topics and Clickhouse tables for Snuba"; 213 | wantedBy = [ "multi-user.target" ]; 214 | after = [ "network.target" ]; 215 | 216 | script = '' 217 | wait_for_open_port() { 218 | local hostname="$1" 219 | local port="$2" 220 | 221 | ${pkgs.coreutils}/bin/timeout 5m ${pkgs.bash}/bin/bash -c "until ${pkgs.netcat}/bin/nc -z $hostname $port -w 1; do echo \"polling $hostname:$port...\"; done" 222 | } 223 | 224 | wait_for_open_port ${cfg.kafkaHost} ${toString cfg.kafkaPort} 225 | kafka=$? 226 | wait_for_open_port ${cfg.clickhouseHost} ${toString cfg.clickhouseHttpPort} 227 | clickhouse=$? 228 | 229 | if [ $kafka -eq 0 -a $clickhouse -eq 0 ] 230 | then 231 | SNUBA_SETTINGS=${snubaSettingsPy} ${cfg.package}/bin/snuba bootstrap --force 232 | else 233 | exit 1 234 | fi 235 | ''; 236 | 237 | serviceConfig = { 238 | Type="oneshot"; 239 | RemainAfterExit = true; 240 | User = "snuba"; 241 | Group = "snuba"; 242 | }; 243 | }; 244 | }; 245 | }; 246 | } 247 | -------------------------------------------------------------------------------- /modules/sentry/services/symbolicator.nix: -------------------------------------------------------------------------------- 1 | { config, lib, pkgs, ... }: 2 | let 3 | cfg = config.services.symbolicator; 4 | in 5 | with lib; 6 | { 7 | 8 | options = { 9 | services.symbolicator = { 10 | enable = mkEnableOption "Run the symbolicator service."; 11 | 12 | package = mkOption { 13 | type = types.package; 14 | default = pkgs.symbolicator; 15 | example = literalExample "pkgs.symbolicator"; 16 | description = '' 17 | Symbolicator package to use. 18 | ''; 19 | }; 20 | 21 | host = mkOption { 22 | type = types.str; 23 | default = "localhost"; 24 | description = '' 25 | Host Symbolicator should run on. 26 | ''; 27 | }; 28 | 29 | port = mkOption { 30 | type = types.int; 31 | default = 3021; 32 | description = '' 33 | Port Symbolicator should run on. 34 | ''; 35 | }; 36 | }; 37 | }; 38 | 39 | config = 40 | let 41 | symbolicatorCfg = pkgs.writeText "config.yml" '' 42 | cache_dir: "/tmp/symbolicator" 43 | bind: "${cfg.host}:${toString cfg.port}" 44 | logging: 45 | level: "info" 46 | format: "auto" 47 | enable_backtraces: true 48 | # metrics: 49 | # statsd: "127.0.0.1:8125" 50 | # prefix: "symbolicator" 51 | ''; 52 | in mkIf cfg.enable { 53 | users.users.symbolicator = { 54 | name = "symbolicator"; 55 | group = "symbolicator"; 56 | description = "Symbolicator user"; 57 | }; 58 | 59 | users.groups.symbolicator = {}; 60 | 61 | environment.systemPackages = [ 62 | cfg.package 63 | ]; 64 | 65 | services.cron = { 66 | enable = true; 67 | systemCronJobs = [ 68 | "55 23 * * * symbolicator ${cfg.package}/bin/symbolicator cleanup -c ${symbolicatorCfg}" 69 | ]; 70 | }; 71 | 72 | systemd.services.symbolicator = { 73 | description = "Symbolicator"; 74 | 75 | wantedBy = [ "multi-user.target" ]; 76 | after = [ "network.target" ]; 77 | 78 | serviceConfig = { 79 | User = "symbolicator"; 80 | Group = "symbolicator"; 81 | ExecStart = "${cfg.package}/bin/symbolicator run -c ${symbolicatorCfg}"; 82 | Restart="on-failure"; 83 | RestartSec="5s"; 84 | }; 85 | }; 86 | }; 87 | } 88 | -------------------------------------------------------------------------------- /modules/vim-michael-bishop.nix: -------------------------------------------------------------------------------- 1 | { pkgs, ... }: 2 | 3 | let 4 | customizedVim = pkgs.vim_configurable.customize { 5 | name = "vim-mb"; 6 | vimrcConfig = { 7 | customRC = '' 8 | syntax on 9 | set nu 10 | set foldmethod=syntax 11 | set listchars=tab:->,trail:· 12 | set list 13 | set ruler 14 | set backspace=indent,eol,start 15 | map :tabp 16 | map :tabn 17 | set expandtab 18 | set softtabstop=2 19 | set shiftwidth=2 20 | set autoindent 21 | set background=dark 22 | 23 | highlight ExtraWhitespace ctermbg=red guibg=red 24 | au ColorScheme * highlight ExtraWhitespace guibg=red 25 | au BufEnter * match ExtraWhitespace /\s\+$/ 26 | au InsertEnter * match ExtraWhitespace /\s\+\%#\@ :FuzzyOpen 41 | ''; 42 | packages.myVimPackage = with pkgs.vimPlugins; { 43 | start = [ 44 | denite-nvim 45 | deoplete-nvim 46 | editorconfig-vim 47 | gruvbox 48 | neoformat 49 | neomake 50 | neoterm 51 | vim-abolish 52 | vim-airline 53 | vim-airline-themes 54 | vim-eunuch 55 | vim-fugitive 56 | vim-gitgutter 57 | vim-grepper 58 | vim-multiple-cursors 59 | vim-polyglot 60 | vim-repeat 61 | vim-surround 62 | vim-test 63 | vim-unimpaired 64 | ]; 65 | 66 | opt = [ 67 | vim-javascript 68 | vim-nix 69 | vim-addon-nix 70 | ]; 71 | }; 72 | }; 73 | }; 74 | in { 75 | environment.systemPackages = [ 76 | customizedVim 77 | ]; 78 | } 79 | -------------------------------------------------------------------------------- /modules/vims.nix: -------------------------------------------------------------------------------- 1 | { 2 | imports = [ 3 | ./vim-michael-bishop.nix 4 | ./vim-michael-fellinger.nix 5 | ]; 6 | environment = { 7 | shellAliases = { 8 | vim = "nvim"; 9 | vi = "nvim"; 10 | }; 11 | variables.EDITOR = "nvim"; 12 | }; 13 | programs.bash.shellAliases = { 14 | vim = "nvim"; 15 | vi = "nvim"; 16 | }; 17 | local.commonGivesVim = false; 18 | } 19 | -------------------------------------------------------------------------------- /nix/default.nix: -------------------------------------------------------------------------------- 1 | { system ? builtins.currentSystem 2 | , crossSystem ? null 3 | , config ? {} 4 | , sourcesOverride ? {} }: 5 | let 6 | sourcePaths = import ./sources.nix { inherit pkgs; } 7 | // sourcesOverride; 8 | 9 | iohkNix = import sourcePaths.iohk-nix {}; 10 | 11 | # use our own nixpkgs if it exists in our sources, 12 | # otherwise use iohkNix default nixpkgs. 13 | nixpkgs = if (sourcePaths ? nixpkgs) 14 | then sourcePaths.nixpkgs 15 | else iohkNix.nixpkgs; 16 | 17 | flake-compat = import sourcePaths.flake-compat; 18 | 19 | overlays = (import ../overlays sourcePaths false) ++ 20 | [ (import ../globals-deployers.nix) 21 | (final: prev: { 22 | inherit ((flake-compat { 23 | inherit pkgs; 24 | src = sourcePaths.nixpkgs-2211; 25 | }).defaultNix.legacyPackages.${final.system}) nix; 26 | }) 27 | ]; 28 | 29 | pkgs = import nixpkgs { 30 | inherit config system crossSystem overlays; 31 | }; 32 | 33 | in pkgs 34 | -------------------------------------------------------------------------------- /nix/sources.json: -------------------------------------------------------------------------------- 1 | { 2 | "flake-compat": { 3 | "branch": "fixes", 4 | "description": null, 5 | "homepage": null, 6 | "owner": "input-output-hk", 7 | "repo": "flake-compat", 8 | "rev": "7da118186435255a30b5ffeabba9629c344c0bec", 9 | "sha256": "01iwymbnyadqj1xfl0xhk8582v7k9b2m6s6yxp3r805w4g3h1k62", 10 | "type": "tarball", 11 | "url": "https://github.com/input-output-hk/flake-compat/archive/7da118186435255a30b5ffeabba9629c344c0bec.tar.gz", 12 | "url_template": "https://github.com///archive/.tar.gz" 13 | }, 14 | "iohk-nix": { 15 | "branch": "master", 16 | "description": "nix scripts shared across projects", 17 | "homepage": null, 18 | "owner": "input-output-hk", 19 | "repo": "iohk-nix", 20 | "rev": "bc4216c5b0e14dbde5541763f4952f99c3c712fa", 21 | "sha256": "0y5n3limj5dg1vgxyxafg0ky35qq7w97rr00gr3yl16xx5jrhs6w", 22 | "type": "tarball", 23 | "url": "https://github.com/input-output-hk/iohk-nix/archive/bc4216c5b0e14dbde5541763f4952f99c3c712fa.tar.gz", 24 | "url_template": "https://github.com///archive/.tar.gz" 25 | }, 26 | "naersk": { 27 | "branch": "master", 28 | "description": "Build rust crates in Nix. No configuration, no code generation, no IFD. Sandbox friendly.", 29 | "homepage": "", 30 | "owner": "nmattia", 31 | "repo": "naersk", 32 | "rev": "254fa90956d3e0b6747fbd2b6bea71169c740e6f", 33 | "sha256": "04z5wibrmsad2yhx09hxkyvgn2kmdjd40fmxa9g7yla0vdxldg0m", 34 | "type": "tarball", 35 | "url": "https://github.com/nmattia/naersk/archive/254fa90956d3e0b6747fbd2b6bea71169c740e6f.tar.gz", 36 | "url_template": "https://github.com///archive/.tar.gz" 37 | }, 38 | "nixops-aws": { 39 | "branch": "ipv6", 40 | "description": "Nixops AWS Backend Plugin", 41 | "homepage": null, 42 | "owner": "input-output-hk", 43 | "repo": "nixops-aws", 44 | "rev": "eac7647187b70077b2e1a08dd6cc02f5f156b5a4", 45 | "sha256": "1y9x15z3b8y1smv9c7rpsjmi3xs8abg3xk7r7ar4jj7njbarbfaf", 46 | "type": "tarball", 47 | "url": "https://github.com/input-output-hk/nixops-aws/archive/eac7647187b70077b2e1a08dd6cc02f5f156b5a4.tar.gz", 48 | "url_template": "https://github.com///archive/.tar.gz" 49 | }, 50 | "nixops-core": { 51 | "branch": "nixops-core-pr-int", 52 | "description": "NixOps, the NixOS-based cloud deployment tool", 53 | "homepage": "http://nixos.org/", 54 | "owner": "input-output-hk", 55 | "repo": "nixops", 56 | "rev": "e8499dbf13fe4c1d2a88f0f706104b25783673ef", 57 | "sha256": "1adhwlinlakfidw5p5ah8my1v5r3ahjhxj2l89b8vn8spfddpdvx", 58 | "type": "tarball", 59 | "url": "https://github.com/input-output-hk/nixops/archive/e8499dbf13fe4c1d2a88f0f706104b25783673ef.tar.gz", 60 | "url_template": "https://github.com///archive/.tar.gz" 61 | }, 62 | "nixops-libvirtd": { 63 | "branch": "fix-for-latest-nix", 64 | "description": "NixOps libvirtd backend plugin [maintainer=@AmineChikhaoui]", 65 | "homepage": "", 66 | "owner": "input-output-hk", 67 | "repo": "nixops-libvirtd", 68 | "rev": "efb48a99837403622d8b89a051a3df4ac52cee51", 69 | "sha256": "0dh799qq24wigvys8zkw8q4lkjsif6yx5h0kvndr8f8b17vb439j", 70 | "type": "tarball", 71 | "url": "https://github.com/input-output-hk/nixops-libvirtd/archive/efb48a99837403622d8b89a051a3df4ac52cee51.tar.gz", 72 | "url_template": "https://github.com///archive/.tar.gz" 73 | }, 74 | "nixops-packet": { 75 | "branch": "c2-medium-reserved-changes", 76 | "description": "NixOps Packet.net Plugin", 77 | "homepage": null, 78 | "owner": "input-output-hk", 79 | "repo": "nixops-packet", 80 | "rev": "b96a78c57d315bb292318f7fb4f198b1483e188b", 81 | "sha256": "15b7x6rilhmg42wmprqjaqadwcn8351a4wkf76m90rfqsk1piw64", 82 | "type": "tarball", 83 | "url": "https://github.com/input-output-hk/nixops-packet/archive/b96a78c57d315bb292318f7fb4f198b1483e188b.tar.gz", 84 | "url_template": "https://github.com///archive/.tar.gz" 85 | }, 86 | "nixpkgs": { 87 | "branch": "nixos-23.11", 88 | "description": "Nix Packages collection", 89 | "homepage": null, 90 | "owner": "NixOS", 91 | "repo": "nixpkgs", 92 | "rev": "dd37924974b9202f8226ed5d74a252a9785aedf8", 93 | "sha256": "1nxd4dqci8rs94a7cypx30axgj778p2wydkx16q298n29crkflbw", 94 | "type": "tarball", 95 | "url": "https://github.com/NixOS/nixpkgs/archive/dd37924974b9202f8226ed5d74a252a9785aedf8.tar.gz", 96 | "url_template": "https://github.com///archive/.tar.gz" 97 | }, 98 | "nixpkgs-2211": { 99 | "branch": "nixos-22.11", 100 | "description": "Nix Packages collection", 101 | "homepage": "", 102 | "owner": "NixOS", 103 | "repo": "nixpkgs", 104 | "rev": "d51554151a91cd4543a7620843cc378e3cbc767e", 105 | "sha256": "01ibf17750xis7j9zqms6kr5j5jrxd4kvf4p8iwkw706q2mj6i06", 106 | "type": "tarball", 107 | "url": "https://github.com/NixOS/nixpkgs/archive/d51554151a91cd4543a7620843cc378e3cbc767e.tar.gz", 108 | "url_template": "https://github.com///archive/.tar.gz" 109 | }, 110 | "nixpkgs-mozilla": { 111 | "branch": "master", 112 | "description": "mozilla related nixpkgs (extends nixos/nixpkgs repo)", 113 | "homepage": null, 114 | "owner": "mozilla", 115 | "repo": "nixpkgs-mozilla", 116 | "rev": "e912ed483e980dfb4666ae0ed17845c4220e5e7c", 117 | "sha256": "08fvzb8w80bkkabc1iyhzd15f4sm7ra10jn32kfch5klgl0gj3j3", 118 | "type": "tarball", 119 | "url": "https://github.com/mozilla/nixpkgs-mozilla/archive/e912ed483e980dfb4666ae0ed17845c4220e5e7c.tar.gz", 120 | "url_template": "https://github.com///archive/.tar.gz" 121 | }, 122 | "nixpkgs-nixops": { 123 | "branch": "nixos-21.11", 124 | "description": "Nix Packages collection & NixOS", 125 | "homepage": "", 126 | "owner": "NixOS", 127 | "repo": "nixpkgs", 128 | "rev": "eabc38219184cc3e04a974fe31857d8e0eac098d", 129 | "sha256": "04ffwp2gzq0hhz7siskw6qh9ys8ragp7285vi1zh8xjksxn1msc5", 130 | "type": "tarball", 131 | "url": "https://github.com/NixOS/nixpkgs/archive/eabc38219184cc3e04a974fe31857d8e0eac098d.tar.gz", 132 | "url_template": "https://github.com///archive/.tar.gz" 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /nix/sources.nix: -------------------------------------------------------------------------------- 1 | # This file has been generated by Niv. 2 | 3 | let 4 | 5 | # 6 | # The fetchers. fetch_ fetches specs of type . 7 | # 8 | 9 | fetch_file = pkgs: name: spec: 10 | let 11 | name' = sanitizeName name + "-src"; 12 | in 13 | if spec.builtin or true then 14 | builtins_fetchurl { inherit (spec) url sha256; name = name'; } 15 | else 16 | pkgs.fetchurl { inherit (spec) url sha256; name = name'; }; 17 | 18 | fetch_tarball = pkgs: name: spec: 19 | let 20 | name' = sanitizeName name + "-src"; 21 | in 22 | if spec.builtin or true then 23 | builtins_fetchTarball { name = name'; inherit (spec) url sha256; } 24 | else 25 | pkgs.fetchzip { name = name'; inherit (spec) url sha256; }; 26 | 27 | fetch_git = name: spec: 28 | let 29 | ref = 30 | if spec ? ref then spec.ref else 31 | if spec ? branch then "refs/heads/${spec.branch}" else 32 | if spec ? tag then "refs/tags/${spec.tag}" else 33 | abort "In git source '${name}': Please specify `ref`, `tag` or `branch`!"; 34 | in 35 | builtins.fetchGit { url = spec.repo; inherit (spec) rev; inherit ref; }; 36 | 37 | fetch_local = spec: spec.path; 38 | 39 | fetch_builtin-tarball = name: throw 40 | ''[${name}] The niv type "builtin-tarball" is deprecated. You should instead use `builtin = true`. 41 | $ niv modify ${name} -a type=tarball -a builtin=true''; 42 | 43 | fetch_builtin-url = name: throw 44 | ''[${name}] The niv type "builtin-url" will soon be deprecated. You should instead use `builtin = true`. 45 | $ niv modify ${name} -a type=file -a builtin=true''; 46 | 47 | # 48 | # Various helpers 49 | # 50 | 51 | # https://github.com/NixOS/nixpkgs/pull/83241/files#diff-c6f540a4f3bfa4b0e8b6bafd4cd54e8bR695 52 | sanitizeName = name: 53 | ( 54 | concatMapStrings (s: if builtins.isList s then "-" else s) 55 | ( 56 | builtins.split "[^[:alnum:]+._?=-]+" 57 | ((x: builtins.elemAt (builtins.match "\\.*(.*)" x) 0) name) 58 | ) 59 | ); 60 | 61 | # The set of packages used when specs are fetched using non-builtins. 62 | mkPkgs = sources: system: 63 | let 64 | sourcesNixpkgs = 65 | import (builtins_fetchTarball { inherit (sources.nixpkgs) url sha256; }) { inherit system; }; 66 | hasNixpkgsPath = builtins.any (x: x.prefix == "nixpkgs") builtins.nixPath; 67 | hasThisAsNixpkgsPath = == ./.; 68 | in 69 | if builtins.hasAttr "nixpkgs" sources 70 | then sourcesNixpkgs 71 | else if hasNixpkgsPath && ! hasThisAsNixpkgsPath then 72 | import {} 73 | else 74 | abort 75 | '' 76 | Please specify either (through -I or NIX_PATH=nixpkgs=...) or 77 | add a package called "nixpkgs" to your sources.json. 78 | ''; 79 | 80 | # The actual fetching function. 81 | fetch = pkgs: name: spec: 82 | 83 | if ! builtins.hasAttr "type" spec then 84 | abort "ERROR: niv spec ${name} does not have a 'type' attribute" 85 | else if spec.type == "file" then fetch_file pkgs name spec 86 | else if spec.type == "tarball" then fetch_tarball pkgs name spec 87 | else if spec.type == "git" then fetch_git name spec 88 | else if spec.type == "local" then fetch_local spec 89 | else if spec.type == "builtin-tarball" then fetch_builtin-tarball name 90 | else if spec.type == "builtin-url" then fetch_builtin-url name 91 | else 92 | abort "ERROR: niv spec ${name} has unknown type ${builtins.toJSON spec.type}"; 93 | 94 | # If the environment variable NIV_OVERRIDE_${name} is set, then use 95 | # the path directly as opposed to the fetched source. 96 | replace = name: drv: 97 | let 98 | saneName = stringAsChars (c: if isNull (builtins.match "[a-zA-Z0-9]" c) then "_" else c) name; 99 | ersatz = builtins.getEnv "NIV_OVERRIDE_${saneName}"; 100 | in 101 | if ersatz == "" then drv else 102 | # this turns the string into an actual Nix path (for both absolute and 103 | # relative paths) 104 | if builtins.substring 0 1 ersatz == "/" then /. + ersatz else /. + builtins.getEnv "PWD" + "/${ersatz}"; 105 | 106 | # Ports of functions for older nix versions 107 | 108 | # a Nix version of mapAttrs if the built-in doesn't exist 109 | mapAttrs = builtins.mapAttrs or ( 110 | f: set: with builtins; 111 | listToAttrs (map (attr: { name = attr; value = f attr set.${attr}; }) (attrNames set)) 112 | ); 113 | 114 | # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/lists.nix#L295 115 | range = first: last: if first > last then [] else builtins.genList (n: first + n) (last - first + 1); 116 | 117 | # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L257 118 | stringToCharacters = s: map (p: builtins.substring p 1 s) (range 0 (builtins.stringLength s - 1)); 119 | 120 | # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L269 121 | stringAsChars = f: s: concatStrings (map f (stringToCharacters s)); 122 | concatMapStrings = f: list: concatStrings (map f list); 123 | concatStrings = builtins.concatStringsSep ""; 124 | 125 | # https://github.com/NixOS/nixpkgs/blob/8a9f58a375c401b96da862d969f66429def1d118/lib/attrsets.nix#L331 126 | optionalAttrs = cond: as: if cond then as else {}; 127 | 128 | # fetchTarball version that is compatible between all the versions of Nix 129 | builtins_fetchTarball = { url, name ? null, sha256 }@attrs: 130 | let 131 | inherit (builtins) lessThan nixVersion fetchTarball; 132 | in 133 | if lessThan nixVersion "1.12" then 134 | fetchTarball ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; })) 135 | else 136 | fetchTarball attrs; 137 | 138 | # fetchurl version that is compatible between all the versions of Nix 139 | builtins_fetchurl = { url, name ? null, sha256 }@attrs: 140 | let 141 | inherit (builtins) lessThan nixVersion fetchurl; 142 | in 143 | if lessThan nixVersion "1.12" then 144 | fetchurl ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; })) 145 | else 146 | fetchurl attrs; 147 | 148 | # Create the final "sources" from the config 149 | mkSources = config: 150 | mapAttrs ( 151 | name: spec: 152 | if builtins.hasAttr "outPath" spec 153 | then abort 154 | "The values in sources.json should not have an 'outPath' attribute" 155 | else 156 | spec // { outPath = replace name (fetch config.pkgs name spec); } 157 | ) config.sources; 158 | 159 | # The "config" used by the fetchers 160 | mkConfig = 161 | { sourcesFile ? if builtins.pathExists ./sources.json then ./sources.json else null 162 | , sources ? if isNull sourcesFile then {} else builtins.fromJSON (builtins.readFile sourcesFile) 163 | , system ? builtins.currentSystem 164 | , pkgs ? mkPkgs sources system 165 | }: rec { 166 | # The sources, i.e. the attribute set of spec name to spec 167 | inherit sources; 168 | 169 | # The "pkgs" (evaluated nixpkgs) to use for e.g. non-builtin fetchers 170 | inherit pkgs; 171 | }; 172 | 173 | in 174 | mkSources (mkConfig {}) // { __functor = _: settings: mkSources (mkConfig settings); } 175 | -------------------------------------------------------------------------------- /overlays/default.nix: -------------------------------------------------------------------------------- 1 | sourcePaths: withRustOverlays: map import (import ./overlay-list.nix withRustOverlays) ++ 2 | [ (self: super: { inherit sourcePaths; })] 3 | ++ (if withRustOverlays then [ (import sourcePaths.nixpkgs-mozilla) ] else []) 4 | -------------------------------------------------------------------------------- /overlays/nginx-monitoring.nix: -------------------------------------------------------------------------------- 1 | self: super: { 2 | nginxMainline = super.nginxMainline.override 3 | (oldAttrs: { modules = oldAttrs.modules ++ [ self.nginxModules.vts ]; }); 4 | 5 | nginxStable = super.nginxStable.override 6 | (oldAttrs: { modules = oldAttrs.modules ++ [ self.nginxModules.vts ]; }); 7 | } 8 | -------------------------------------------------------------------------------- /overlays/nixops.nix: -------------------------------------------------------------------------------- 1 | self: super: { 2 | nixops = (import (self.sourcePaths.nixops-core + "/release.nix") { 3 | nixpkgs = self.sourcePaths.nixpkgs-nixops; 4 | pluginsSources = self.sourcePaths; 5 | p = self.lib.attrVals (self.globals.nixops-plugins or [ "nixops-aws" "nixops-libvirtd" ]); 6 | }).build.${self.stdenv.system}; 7 | } 8 | -------------------------------------------------------------------------------- /overlays/overlay-list.nix: -------------------------------------------------------------------------------- 1 | withRustOverlays: [ 2 | ./packages.nix 3 | ./nixops.nix 4 | ./nginx-monitoring.nix 5 | ] ++ (if withRustOverlays then [ ./rust.nix ] else []) 6 | -------------------------------------------------------------------------------- /overlays/packages.nix: -------------------------------------------------------------------------------- 1 | self: super: 2 | let 3 | inherit (self) callPackage lib; 4 | in 5 | { 6 | iohk-ops-lib = { 7 | physical = import ../physical self; 8 | roles = import ../roles; 9 | modules = import ../modules; 10 | ssh-keys = import ./ssh-keys.nix lib; 11 | scripts = { 12 | gen-graylog-creds = import ../scripts/gen-graylog-creds.nix; 13 | gen-sentry-secret-key = import ../scripts/gen-sentry-secret-key.nix; 14 | }; 15 | }; 16 | 17 | sentry = import ../pkgs/sentry { pkgs = self; }; 18 | symbolicator = callPackage ../pkgs/symbolicator { }; 19 | snuba = import ../pkgs/snuba { pkgs = self; }; 20 | naersk = callPackage self.sourcePaths.naersk {}; 21 | 22 | # grabed from https://raw.githubusercontent.com/vantage-sh/ec2instances.info/master/www/aws-instances.json 23 | aws-instances = builtins.listToAttrs (map (i: lib.nameValuePair i.instance_type i) 24 | (builtins.fromJSON (builtins.readFile ./aws-instances.json))); 25 | 26 | # workaround https://github.com/NixOS/nixpkgs/issues/47900 27 | awscli2 = (super.awscli2.overrideAttrs (old: { 28 | makeWrapperArgs = (old.makeWrapperArgs or []) ++ ["--unset" "PYTHONPATH"]; 29 | })); 30 | 31 | mkBlackboxScrapeConfig = job_name: module: targets: { 32 | inherit job_name; 33 | scrape_interval = "60s"; 34 | metrics_path = "/probe"; 35 | params = { inherit module; }; 36 | static_configs = [ { inherit targets; } ]; 37 | relabel_configs = [ 38 | { 39 | source_labels = [ "__address__" ]; 40 | target_label = "__param_target"; 41 | } 42 | { 43 | source_labels = [ "__param_target" ]; 44 | target_label = "instance"; 45 | } 46 | { 47 | replacement = "127.0.0.1:9115"; 48 | target_label = "__address__"; 49 | } 50 | ]; 51 | }; 52 | 53 | } 54 | -------------------------------------------------------------------------------- /overlays/rust.nix: -------------------------------------------------------------------------------- 1 | self: super: 2 | let 3 | rustChannel = self.rustChannelOf { date = "2020-02-04"; channel = "nightly"; }; 4 | in 5 | rec { 6 | rustc = rustChannel.rust; 7 | cargo = rustChannel.cargo; 8 | rustPlatform = super.recurseIntoAttrs (super.makeRustPlatform { 9 | rustc = rustChannel.rust; 10 | cargo = rustChannel.cargo; 11 | }); 12 | } 13 | -------------------------------------------------------------------------------- /physical/aws/c4.large.nix: -------------------------------------------------------------------------------- 1 | { ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "c4.large"; 4 | } 5 | -------------------------------------------------------------------------------- /physical/aws/c5.2xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "c5.2xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/c5.4xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "c5.4xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/c5.9xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "c5.9xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/c5a.4xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "c5a.4xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/common.nix: -------------------------------------------------------------------------------- 1 | { name, config, resources, pkgs, lib, ... }: 2 | let 3 | inherit (pkgs) aws-instances; 4 | inherit (lib) mkDefault; 5 | inherit (config.deployment.ec2) region; 6 | 7 | inherit (pkgs.globals) ec2 domain; 8 | inherit (ec2.credentials) accessKeyIds; 9 | accessKeyId = accessKeyIds.${config.node.org}; 10 | in { 11 | imports = [ ../../modules/aws.nix ]; 12 | 13 | deployment.ec2 = { 14 | 15 | inherit (config.node) accessKeyId; 16 | 17 | ebsInitialRootDiskSize = mkDefault 30; 18 | 19 | elasticIPv4 = resources.elasticIPs."${name}-ip" or ""; 20 | 21 | }; 22 | 23 | deployment.route53 = lib.mkIf (config.node.fqdn != null) { 24 | accessKeyId = accessKeyIds.dns or config.node.accessKeyId; 25 | hostName = config.node.fqdn; 26 | }; 27 | 28 | node = { 29 | inherit accessKeyId region; 30 | fqdn = "${name}.${domain}"; 31 | }; 32 | } 33 | -------------------------------------------------------------------------------- /physical/aws/default.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs; 2 | let 3 | /* Round a float to integer, toward 0. */ 4 | roundToInt = f: lib.toInt (lib.head (lib.splitString "." (toString f))); 5 | mkInstance = p: let i = import p { inherit pkgs lib; }; in lib.recursiveUpdate i { 6 | # those data need to be available without the module system: 7 | node = { 8 | cpus = aws-instances.${i.deployment.ec2.instanceType}.vCPU; 9 | memory = roundToInt aws-instances.${i.deployment.ec2.instanceType}.memory; 10 | }; 11 | }; 12 | in { 13 | targetEnv = "ec2"; 14 | 15 | t2-nano = mkInstance ./t2.nano.nix; 16 | t2-large = mkInstance ./t2.large.nix; 17 | t2-xlarge = mkInstance ./t2.xlarge.nix; 18 | 19 | t3-xlarge = mkInstance ./t3.xlarge.nix; 20 | t3-2xlarge = mkInstance ./t3.2xlarge.nix; 21 | t3-2xlargeMonitor = mkInstance ./t3.2xlarge-monitor.nix; 22 | 23 | t3a-nano = mkInstance ./t3a.nano.nix; 24 | t3a-small = mkInstance ./t3a.small.nix; 25 | t3a-medium = mkInstance ./t3a.medium.nix; 26 | t3a-large = mkInstance ./t3a.large.nix; 27 | t3a-xlarge = mkInstance ./t3a.xlarge.nix; 28 | t3a-xlargeMonitor = mkInstance ./t3a.xlarge-monitor.nix; 29 | t3a-2xlarge = mkInstance ./t3a.2xlarge.nix; 30 | 31 | c4-large = mkInstance ./c4.large.nix; 32 | 33 | c5-2xlarge = mkInstance ./c5.2xlarge.nix; 34 | c5-4xlarge = mkInstance ./c5.4xlarge.nix; 35 | c5a-4xlarge = mkInstance ./c5a.4xlarge.nix; 36 | c5-9xlarge = mkInstance ./c5.9xlarge.nix; 37 | 38 | m5-xlarge = mkInstance ./m5.xlarge.nix; 39 | m5-2xlarge = mkInstance ./m5.2xlarge.nix; 40 | m5-4xlarge = mkInstance ./m5.4xlarge.nix; 41 | m5-8xlarge = mkInstance ./m5.8xlarge.nix; 42 | m5-12xlarge = mkInstance ./m5.12xlarge.nix; 43 | 44 | m5ad-xlarge = mkInstance ./m5ad.xlarge.nix; 45 | m5ad-4xlarge = mkInstance ./m5ad.4xlarge.nix; 46 | 47 | r5-large = mkInstance ./r5.large.nix; 48 | r5-xlarge = mkInstance ./r5.xlarge.nix; 49 | r5-2xlarge = mkInstance ./r5.2xlarge.nix; 50 | 51 | r5a-xlarge = mkInstance ./r5a.xlarge.nix; 52 | r5a-2xlarge = mkInstance ./r5a.2xlarge.nix; 53 | 54 | security-groups = import ./security-groups; 55 | 56 | } 57 | -------------------------------------------------------------------------------- /physical/aws/m5.12xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "m5.12xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/m5.2xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "m5.2xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/m5.4xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "m5.4xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/m5.8xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "m5.8xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/m5.xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "m5.xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/m5ad.4xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "m5ad.4xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/m5ad.xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "m5ad.xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/r5.2xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "r5.2xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/r5.large.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "r5.large"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/r5.xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "r5.xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/r5a.2xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "r5a.2xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/r5a.xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "r5a.xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/security-groups/allow-all-to-tcp-port.nix: -------------------------------------------------------------------------------- 1 | portName: port: { region, org, pkgs, ... }@args: { 2 | "allow-all-to-${portName}-${region}-${org}" = {resources, ...}: { 3 | inherit region; 4 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.${org}; 5 | _file = ./allow-all-to-tcp-port.nix; 6 | description = "Allow All to TCP/${toString port}"; 7 | rules = [{ 8 | protocol = "tcp"; 9 | fromPort = port; 10 | toPort = port; 11 | sourceIp = "0.0.0.0/0"; 12 | sourceIpv6 = "::/0"; 13 | }]; 14 | } // pkgs.lib.optionalAttrs (args ? vpcId) { 15 | vpcId = resources.vpc.${args.vpcId}; 16 | }; 17 | } 18 | -------------------------------------------------------------------------------- /physical/aws/security-groups/allow-all-to-udp-port.nix: -------------------------------------------------------------------------------- 1 | portName: port: { region, org, pkgs, ... }@args: { 2 | "allow-all-to-${portName}-${region}-${org}" = {resources, ...}: { 3 | inherit region; 4 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.${org}; 5 | _file = ./allow-all-to-udp-port.nix; 6 | description = "Allow All to UDP/${toString port}"; 7 | rules = [{ 8 | protocol = "udp"; 9 | fromPort = port; 10 | toPort = port; 11 | sourceIp = "0.0.0.0/0"; 12 | sourceIpv6 = "::/0"; 13 | }]; 14 | } // pkgs.lib.optionalAttrs (args ? vpcId) { 15 | vpcId = resources.vpc.${args.vpcId}; 16 | }; 17 | } 18 | -------------------------------------------------------------------------------- /physical/aws/security-groups/allow-all.nix: -------------------------------------------------------------------------------- 1 | { region, org, lib, pkgs, ... }@args: { 2 | "allow-all-${region}-${org}" = {resources, ...}: { 3 | inherit region; 4 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.${org}; 5 | _file = ./allow-all.nix; 6 | description = "Allow all ${region}"; 7 | rules = [{ 8 | protocol = "-1"; # all 9 | fromPort = 0; 10 | toPort = 65535; 11 | sourceIp = "0.0.0.0/0"; 12 | sourceIpv6 = "::/0"; 13 | }]; 14 | } // pkgs.lib.optionalAttrs (args ? vpcId) { 15 | vpcId = resources.vpc.${args.vpcId}; 16 | }; 17 | } 18 | -------------------------------------------------------------------------------- /physical/aws/security-groups/allow-deployer-ssh.nix: -------------------------------------------------------------------------------- 1 | { region, org, pkgs, lib, ... }@args: { 2 | "allow-deployer-ssh-${region}-${org}" = {resources, ...}: { 3 | inherit region; 4 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.${org}; 5 | _file = ./allow-deployer-ssh.nix; 6 | description = "SSH"; 7 | rules = [{ 8 | protocol = "tcp"; # TCP 9 | fromPort = 22; 10 | toPort = 22; 11 | sourceIp = pkgs.globals.deployerIp + "/32"; 12 | }]; 13 | } // pkgs.lib.optionalAttrs (args ? vpcId) { 14 | vpcId = resources.vpc.${args.vpcId}; 15 | }; 16 | } 17 | -------------------------------------------------------------------------------- /physical/aws/security-groups/allow-graylog.nix: -------------------------------------------------------------------------------- 1 | { lib, region, org, nodes, pkgs, ... }@args: 2 | import ./allow-to-tcp-port.nix "graylog" 5044 (lib.attrNames nodes) args 3 | -------------------------------------------------------------------------------- /physical/aws/security-groups/allow-monitoring-collection.nix: -------------------------------------------------------------------------------- 1 | { region, org, pkgs, ... }@args: { 2 | "allow-monitoring-collection-${region}-${org}" = { nodes, resources, lib, ... }: 3 | let monitoringSourceIp = resources.elasticIPs.monitoring-ip; 4 | in { 5 | inherit region; 6 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.${org}; 7 | _file = ./allow-monitoring-collection.nix; 8 | description = "Monitoring collection"; 9 | rules = lib.optionals (nodes ? "monitoring") map (p: 10 | { 11 | protocol = "tcp"; 12 | fromPort = p; 13 | toPort = p; # prometheus exporters 14 | sourceIp = monitoringSourceIp; 15 | }) ([ 16 | 9100 # prometheus exporters 17 | 9102 # statd exporter 18 | 9113 # nginx exporter 19 | 9131 # varnish exporter 20 | ] ++ (pkgs.globals.extraPrometheusExportersPorts or [])); 21 | } // pkgs.lib.optionalAttrs (args ? vpcId) { 22 | vpcId = resources.vpc.${args.vpcId}; 23 | }; 24 | } 25 | -------------------------------------------------------------------------------- /physical/aws/security-groups/allow-public-www-https.nix: -------------------------------------------------------------------------------- 1 | { region, org, pkgs, ... }@args: { 2 | "allow-public-www-https-${region}-${org}" = {resources, ...}: { 3 | _file = ./allow-public-www-https.nix; 4 | inherit region; 5 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.${org}; 6 | description = "WWW-http(s)"; 7 | rules = [ 8 | { 9 | protocol = "tcp"; 10 | fromPort = 80; 11 | toPort = 80; 12 | sourceIp = "0.0.0.0/0"; 13 | sourceIpv6 = "::/0"; 14 | } 15 | { 16 | protocol = "tcp"; 17 | fromPort = 443; 18 | toPort = 443; 19 | sourceIp = "0.0.0.0/0"; 20 | sourceIpv6 = "::/0"; 21 | } 22 | ]; 23 | } // pkgs.lib.optionalAttrs (args ? vpcId) { 24 | vpcId = resources.vpc.${args.vpcId}; 25 | }; 26 | } 27 | -------------------------------------------------------------------------------- /physical/aws/security-groups/allow-to-tcp-port.nix: -------------------------------------------------------------------------------- 1 | portName: port: sourcesNodes: { region, org, pkgs, ... }@args: { 2 | "allow-to-${portName}-${region}-${org}" = { resources, ... }: { 3 | inherit region; 4 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.${org}; 5 | _file = ./allow-to-tcp-port.nix; 6 | description = "Allow to TCP/${toString port}"; 7 | rules = map (n: { 8 | protocol = "tcp"; # all 9 | fromPort = port; 10 | toPort = port; 11 | sourceIp = resources.elasticIPs."${n}-ip"; 12 | }) sourcesNodes; 13 | } // pkgs.lib.optionalAttrs (args ? vpcId) { 14 | vpcId = resources.vpc.${args.vpcId}; 15 | }; 16 | } 17 | -------------------------------------------------------------------------------- /physical/aws/security-groups/default.nix: -------------------------------------------------------------------------------- 1 | rec { 2 | allow-all = import ./allow-all.nix; 3 | allow-deployer-ssh = import ./allow-deployer-ssh.nix; 4 | allow-monitoring-collection = import ./allow-monitoring-collection.nix; 5 | allow-public-www-https = import ./allow-public-www-https.nix; 6 | allow-ssh = allow-all-to-tcp-port "ssh" 22; 7 | allow-wireguard = allow-all-to-udp-port "wireguard" 17777; 8 | allow-graylog = allow-all-to-tcp-port "graylog" 5044; 9 | allow-all-to-tcp-port = import ./allow-all-to-tcp-port.nix; 10 | allow-all-to-udp-port = import ./allow-all-to-udp-port.nix; 11 | allow-to-tcp-port = import ./allow-to-tcp-port.nix; 12 | } 13 | -------------------------------------------------------------------------------- /physical/aws/t2.large.nix: -------------------------------------------------------------------------------- 1 | { ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "t2.large"; 4 | } 5 | -------------------------------------------------------------------------------- /physical/aws/t2.nano.nix: -------------------------------------------------------------------------------- 1 | { ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "t2.nano"; 4 | } 5 | -------------------------------------------------------------------------------- /physical/aws/t2.xlarge.nix: -------------------------------------------------------------------------------- 1 | 2 | { ... }: { 3 | imports = [ ./common.nix ]; 4 | deployment.ec2.instanceType = "t2.xlarge"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/t3.2xlarge-monitor.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2 = { 4 | instanceType = "t3.2xlarge"; 5 | ebsInitialRootDiskSize = 1000; 6 | associatePublicIpAddress = true; 7 | }; 8 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 9 | } 10 | -------------------------------------------------------------------------------- /physical/aws/t3.2xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "t3.2xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/t3.xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "t3.xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/t3a.2xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "t3a.2xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/t3a.large.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "t3a.large"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/t3a.medium.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "t3a.medium"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/t3a.nano.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "t3a.nano"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/t3a.small.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "t3a.small"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/aws/t3a.xlarge-monitor.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2 = { 4 | instanceType = "t3a.xlarge"; 5 | ebsInitialRootDiskSize = 1000; 6 | associatePublicIpAddress = true; 7 | }; 8 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 9 | } 10 | -------------------------------------------------------------------------------- /physical/aws/t3a.xlarge.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, ... }: { 2 | imports = [ ./common.nix ]; 3 | deployment.ec2.instanceType = "t3a.xlarge"; 4 | boot.loader.grub.device = lib.mkForce "/dev/nvme0n1"; 5 | } 6 | -------------------------------------------------------------------------------- /physical/default.nix: -------------------------------------------------------------------------------- 1 | pkgs: { 2 | libvirtd = import ./libvirtd; 3 | aws = import ./aws pkgs; 4 | } 5 | -------------------------------------------------------------------------------- /physical/libvirtd/common.nix: -------------------------------------------------------------------------------- 1 | { pkgs, name, ... }: 2 | let inherit (pkgs.globals) domain; 3 | in { 4 | deployment.libvirtd.headless = true; 5 | nixpkgs.localSystem.system = "x86_64-linux"; 6 | imports = [ ../../modules/aws.nix ]; 7 | node = { fqdn = "${name}.${domain}"; }; 8 | 9 | services.nginx.mapHashBucketSize = 128; 10 | } 11 | -------------------------------------------------------------------------------- /physical/libvirtd/default.nix: -------------------------------------------------------------------------------- 1 | let mkInstance = p: import p // { 2 | imports = [ 3 | ./common.nix 4 | ({ config, ...}: { 5 | deployment.libvirtd.memorySize = config.node.memory * 1024; 6 | deployment.libvirtd.vcpu = config.node.cpus; 7 | }) 8 | ]; 9 | }; in { 10 | targetEnv = "libvirtd"; 11 | large = mkInstance ./large.nix; 12 | medium = mkInstance ./medium.nix; 13 | tiny = mkInstance ./tiny.nix; 14 | } 15 | -------------------------------------------------------------------------------- /physical/libvirtd/large.nix: -------------------------------------------------------------------------------- 1 | { 2 | node = { 3 | cpus = 3; 4 | memory = 8; 5 | }; 6 | } 7 | -------------------------------------------------------------------------------- /physical/libvirtd/medium.nix: -------------------------------------------------------------------------------- 1 | { 2 | node = { 3 | cpus = 2; 4 | memory = 4; 5 | }; 6 | } 7 | -------------------------------------------------------------------------------- /physical/libvirtd/tiny.nix: -------------------------------------------------------------------------------- 1 | { 2 | node = { 3 | cpus = 1; 4 | memory = 2; 5 | }; 6 | } 7 | -------------------------------------------------------------------------------- /physical/packet/c1.small.nix: -------------------------------------------------------------------------------- 1 | { ... }: { 2 | imports = [ ./. ]; 3 | deployment.packet.plan = "c1.small.x86"; 4 | } 5 | -------------------------------------------------------------------------------- /physical/packet/default.nix: -------------------------------------------------------------------------------- 1 | { pkgs, lib, name, config, resources, ... }: 2 | let inherit (pkgs.globals) domain packet; 3 | in { 4 | deployment.packet = { 5 | keyPair = resources.packetKeyPairs.global; 6 | inherit (packet.credentials) accessKeyId project; 7 | }; 8 | 9 | nixpkgs.localSystem.system = "x86_64-linux"; 10 | 11 | node = { fqdn = "${name}.${domain}"; }; 12 | } 13 | -------------------------------------------------------------------------------- /physical/packet/s1.large.nix: -------------------------------------------------------------------------------- 1 | { }: { 2 | imports = [ ./. ]; 3 | plan = "s1.large.x86"; 4 | } 5 | -------------------------------------------------------------------------------- /physical/packet/t1.small.nix: -------------------------------------------------------------------------------- 1 | { ... }: { 2 | imports = [ ./. ]; 3 | deployment.packet.plan = "t1.small.x86"; 4 | } 5 | -------------------------------------------------------------------------------- /pkgs/sentry/default.nix: -------------------------------------------------------------------------------- 1 | { pkgs }: 2 | 3 | let 4 | python = import ./requirements.nix { inherit pkgs; }; 5 | 6 | django_1_11_patch = pkgs.fetchpatch { 7 | url = "https://github.com/getsentry/sentry/commit/e171b81e66f65825d4f9f38db2cc91d8927621b3.patch"; 8 | sha256 = "0dzlhal08wdhlasw8pkr2i0sl4kf4bmn4zfx0c6zm9kn26rar6hz"; 9 | excludes = [ ".travis.yml" ]; 10 | }; 11 | in 12 | 13 | (python.mkDerivation rec { 14 | pname = "sentry"; 15 | version = "10.0.0"; 16 | format = "wheel"; 17 | 18 | src = pkgs.python.pkgs.fetchPypi { 19 | inherit pname version format; 20 | sha256 = "2695ed1cf11e5afee92de04dc1868de9f4c49d5876c34579c935d9d6cf9fbb03"; 21 | python = "py27"; 22 | }; 23 | 24 | buildInputs = []; 25 | 26 | propagatedBuildInputs = builtins.attrValues python.packages; 27 | 28 | makeWrapperArgs = ["--set PYTHONPATH $PYTHONPATH" "--set LD_LIBRARY_PATH ${pkgs.xmlsec}/lib" ]; 29 | 30 | }).overrideAttrs(drv: { 31 | buildPhase = '' 32 | # Unset SOURCE_DATE_EPOCH: ZIP requires timestamps >= 1980 33 | # https://nixos.org/nixpkgs/manual/#python-setup.py-bdist_wheel-cannot-create-.whl 34 | unset SOURCE_DATE_EPOCH 35 | pushd dist 36 | wheel_file=sentry-10.0.0-py27-none-any.whl 37 | wheel unpack $wheel_file 38 | rm $wheel_file 39 | # Remove uwsgi dependency 40 | sed -i '/.*uwsgi.*/d' sentry-10.0.0/sentry-10.0.0.dist-info/METADATA 41 | # Jailbreak 42 | sed -i 's/.*PyYAML.*/Requires-Dist: PyYAML/' sentry-10.0.0/sentry-10.0.0.dist-info/METADATA 43 | sed -i 's/.*Django.*/Requires-Dist: Django/' sentry-10.0.0/sentry-10.0.0.dist-info/METADATA 44 | sed -i 's/.*djangorestframework.*/Requires-Dist: djangorestframework/' sentry-10.0.0/sentry-10.0.0.dist-info/METADATA 45 | # Allow new Django version to work 46 | patch --strip 2 --directory sentry-10.0.0/ < ${django_1_11_patch} 47 | wheel pack ./sentry-10.0.0 48 | rm -r sentry-10.0.0 49 | popd 50 | ''; 51 | }) 52 | -------------------------------------------------------------------------------- /pkgs/sentry/executable.patch: -------------------------------------------------------------------------------- 1 | commit d8e0bac0c0d831510683939ec7a7b5bd72192423 2 | Author: Frederik Rietdijk 3 | Date: Sat Jan 5 11:38:28 2019 +0100 4 | 5 | Have a top-level attribute for the executable 6 | 7 | diff --git a/pkgconfig/pkgconfig.py b/pkgconfig/pkgconfig.py 8 | index 3deb97f..e7c5561 100644 9 | --- a/pkgconfig/pkgconfig.py 10 | +++ b/pkgconfig/pkgconfig.py 11 | @@ -30,6 +30,9 @@ from functools import wraps 12 | from subprocess import call, PIPE, Popen 13 | 14 | 15 | +PKG_CONFIG_EXE = "pkg-config" 16 | + 17 | + 18 | def _compare_versions(v1, v2): 19 | """ 20 | Compare two version strings and return -1, 0 or 1 depending on the equality 21 | @@ -65,7 +68,7 @@ def _convert_error(func): 22 | 23 | @_convert_error 24 | def _query(package, *options): 25 | - pkg_config_exe = os.environ.get('PKG_CONFIG', None) or 'pkg-config' 26 | + pkg_config_exe = os.environ.get('PKG_CONFIG', None) or PKG_CONFIG_EXE 27 | cmd = '{0} {1} {2}'.format(pkg_config_exe, ' '.join(options), package) 28 | proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE) 29 | out, err = proc.communicate() 30 | @@ -84,7 +87,7 @@ def exists(package): 31 | 32 | If ``pkg-config`` not on path, raises ``EnvironmentError``. 33 | """ 34 | - pkg_config_exe = os.environ.get('PKG_CONFIG', None) or 'pkg-config' 35 | + pkg_config_exe = os.environ.get('PKG_CONFIG', None) or PKG_CONFIG_EXE 36 | cmd = '{0} --exists {1}'.format(pkg_config_exe, package).split() 37 | return call(cmd) == 0 38 | -------------------------------------------------------------------------------- /pkgs/sentry/requirements_frozen.txt: -------------------------------------------------------------------------------- 1 | amqp==1.4.9 2 | anyjson==0.3.3 3 | attrs==19.3.0 4 | backports.functools-lru-cache==1.6.1 5 | beautifulsoup4==4.7.1 6 | billiard==3.3.0.23 7 | boto3==1.4.5 8 | botocore==1.5.70 9 | cached-property==1.5.1 10 | cachetools==3.1.1 11 | celery==3.1.18 12 | certifi==2020.6.20 13 | cffi==1.14.0 14 | chardet==3.0.4 15 | click==6.7 16 | configparser==4.0.2 17 | confluent-kafka==0.11.5 18 | contextlib2==0.6.0.post1 19 | croniter==0.3.34 20 | cryptography==2.9.2 21 | cssselect==1.1.0 22 | cssutils==1.0.2 23 | datadog==0.30.0 24 | decorator==4.4.2 25 | defusedxml==0.6.0 26 | Django==1.11.29 27 | django-crispy-forms==1.6.1 28 | django-picklefield==1.0.0 29 | django-sudo==3.1.0 30 | djangorestframework==3.9.4 31 | docutils==0.16 32 | email-reply-parser==0.2.0 33 | enum34==1.1.10 34 | funcsigs==1.0.2 35 | functools32==3.2.3.post2 36 | futures==3.3.0 37 | google-api-core==1.14.3 38 | google-auth==1.6.3 39 | google-cloud-bigtable==0.32.2 40 | google-cloud-core==0.29.1 41 | google-cloud-pubsub==0.35.4 42 | google-cloud-storage==1.13.3 43 | google-resumable-media==0.4.1 44 | googleapis-common-protos==1.6.0 45 | grpc-google-iam-v1==0.11.4 46 | grpcio==1.30.0 47 | hiredis==0.1.6 48 | httplib2==0.18.1 49 | idna==2.7 50 | importlib-metadata==1.6.1 51 | ipaddress==1.0.23 52 | isodate==0.6.0 53 | jmespath==0.10.0 54 | jsonschema==2.6.0 55 | kombu==3.0.35 56 | loremipsum==1.0.5 57 | lxml==4.3.5 58 | maxminddb==1.4.1 59 | milksnake==0.1.5 60 | mistune==0.8.4 61 | mmh3==2.3.1 62 | mock==2.0.0 63 | msgpack==0.6.2 64 | natsort==6.2.1 65 | oauth2==1.9.0.post1 66 | oauthlib==3.1.0 67 | parsimonious==0.8.0 68 | pathlib2==2.3.5 69 | pbr==5.4.5 70 | percy==2.0.2 71 | petname==2.6 72 | phabricator==0.7.0 73 | phonenumberslite==7.7.5 74 | progressbar2==3.10.1 75 | protobuf==3.12.2 76 | psycopg2-binary==2.8.5 77 | pyasn1==0.4.8 78 | pyasn1-modules==0.2.8 79 | pycparser==2.20 80 | PyJWT==1.5.3 81 | pyOpenSSL==19.1.0 82 | pyrsistent==0.16.0 83 | pytest-runner==5.2 84 | python-dateutil==2.8.1 85 | python-memcached==1.59 86 | python-u2flib-server==5.0.0 87 | python-utils==2.4.0 88 | pytz==2020.1 89 | PyYAML==5.3.1 90 | qrcode==5.3 91 | querystring-parser==1.2.4 92 | rb==1.7 93 | redis==2.10.5 94 | redis-py-cluster==1.3.4 95 | requests==2.20.1 96 | requests-oauthlib==0.3.3 97 | rsa==4.5 98 | s3transfer==0.1.13 99 | scandir==1.10.0 100 | selenium==3.141.0 101 | sentry-sdk==0.15.1 102 | setproctitle==1.1.10 103 | setuptools-scm==4.1.2 104 | simplejson==3.8.2 105 | six==1.10.0 106 | soupsieve==1.9.6 107 | sqlparse==0.2.4 108 | statsd==3.1 109 | strict-rfc3339==0.7 110 | structlog==16.1.0 111 | toml==0.10.1 112 | toronado==0.0.11 113 | ua-parser==0.7.3 114 | unidiff==0.6.0 115 | urllib3==1.24.2 116 | uWSGI==2.0.19.1 117 | vcversioner==2.16.0.0 118 | zipp==1.2.0 119 | -------------------------------------------------------------------------------- /pkgs/sentry/requirements_override.nix: -------------------------------------------------------------------------------- 1 | { pkgs, python }: 2 | 3 | self: super: { 4 | xmlsec = python.mkDerivation rec { 5 | pname = "xmlsec"; 6 | version = "1.3.8"; 7 | 8 | src = pkgs.python.pkgs.fetchPypi { 9 | inherit pname version; 10 | sha256 = "1ki5jiws8r9sbdbbn5cw058m57rhx42g91rrsa2bblqwngi3z546"; 11 | }; 12 | 13 | buildInputs = with pkgs; [ libtool.lib zlib xmlsec.dev xmlsec ]; 14 | propagatedBuildInputs = with pkgs; [ self.lxml self.pkgconfig self.pathlib2 self."setuptools-scm" self.toml xmlsec.dev xmlsec ]; 15 | 16 | # checkInputs = with pkgs; [ pytest xmlsec.dev xmlsec hypothesis ]; 17 | doCheck = false; 18 | postPatch = '' 19 | patch --strip=1 < ${./xmlsec/lxml-workaround.patch} 20 | patch --strip=1 < ${./xmlsec/no-black-format.patch} 21 | ''; 22 | 23 | LD_LIBRARY_PATH = "${pkgs.xmlsec}/lib"; 24 | PKG_CONFIG_PATH = "${pkgs.xmlsec.dev}/lib/pkgconfig:${pkgs.libxml2.dev}/lib/pkgconfig:${pkgs.libxslt.dev}/lib/pkgconfig:$PKG_CONFIG_PATH"; 25 | }; 26 | 27 | python3-saml = 28 | let 29 | fix2020Patch = pkgs.fetchpatch { 30 | url = "https://patch-diff.githubusercontent.com/raw/onelogin/python3-saml/pull/140.patch"; 31 | sha256 = "0pm40kszv5qcnkw3ksz6c68zkqibakaxdggkxfadiasw9ys91nl6"; 32 | }; 33 | fixCertValue = pkgs.fetchpatch { 34 | url = "https://github.com/onelogin/python3-saml/commit/771072e2ae1380acde4ec6af2d7b46b96dccfd2d.patch"; 35 | sha256 = "0yplwcpb5ksxgbfnmmxssj4c9ak1g1p6hfj8nfh2ybrmbk38n2f8"; 36 | }; 37 | in 38 | python.mkDerivation rec { 39 | pname = "python3-saml"; 40 | version = "1.4.0"; 41 | 42 | buildInputs = []; 43 | 44 | # Fetch from GitHub because PyPi doesn't have tests available in src 45 | src = pkgs.fetchFromGitHub { 46 | owner = "onelogin"; 47 | repo = "${pname}"; 48 | rev = "refs/tags/v${version}"; 49 | sha256 = "05l63qwfqvw67v70bsam76amxpz7hnkqn8329yrds3fzgzkhkqrl"; 50 | }; 51 | 52 | postPatch = '' 53 | patch --strip=1 < ${fix2020Patch} 54 | patch --strip=1 < ${fixCertValue} 55 | ''; 56 | 57 | propagatedBuildInputs = with self; [ defusedxml xmlsec isodate ]; 58 | 59 | LD_LIBRARY_PATH = "${pkgs.xmlsec}/lib"; 60 | 61 | doCheck = false; 62 | }; 63 | 64 | pillow = pkgs.python.pkgs.pillow.overrideAttrs (oldAttrs: rec { 65 | pname = "Pillow"; 66 | version = "6.2.1"; 67 | 68 | src = pkgs.python.pkgs.fetchPypi { 69 | inherit pname version; 70 | sha256 = "1c8wkzc58f5wdh006jvmwdk3wxld1xgagcbdvj7iv17qi0m9fkmz"; 71 | }; 72 | 73 | doCheck = false; 74 | }); 75 | 76 | semaphore = import ./semaphore { 77 | buildPythonPackage = python.mkDerivation; 78 | fetchFromGitHub = pkgs.fetchFromGitHub; 79 | milksnake = self."milksnake"; 80 | pkg-config = pkgs.pkg-config; 81 | openssl = pkgs.openssl; 82 | naersk = pkgs.naersk; 83 | }; 84 | 85 | symbolic = pkgs.python.pkgs.callPackage ./symbolic { }; 86 | 87 | "jsonschema" = python.overrideDerivation super."jsonschema" (old: { 88 | propagatedBuildInputs = old.propagatedBuildInputs ++ [ self.vcversioner ]; 89 | }); 90 | 91 | "progressbar2" = python.overrideDerivation super."progressbar2" (old: { 92 | propagatedBuildInputs = old.propagatedBuildInputs ++ [ self.pytest-runner ]; 93 | }); 94 | 95 | "ua-parser" = python.overrideDerivation super."ua-parser" (old: { 96 | propagatedBuildInputs = old.propagatedBuildInputs ++ [ self.pyyaml ]; 97 | }); 98 | 99 | defusedxml = python.mkDerivation rec { 100 | pname = "defusedxml"; 101 | version = "0.5.0"; 102 | 103 | buildInputs = []; 104 | 105 | src = pkgs.python.pkgs.fetchPypi { 106 | inherit pname version; 107 | sha256 = "1x54n0h8hl92vvwyymx883fbqpqjwn2mc8fb383bcg3z9zwz5mr4"; 108 | }; 109 | }; 110 | 111 | pkgconfig = python.mkDerivation rec { 112 | pname = "pkgconfig"; 113 | version = "1.5.1"; 114 | 115 | setupHook = pkgs.pkgconfig.setupHook; 116 | 117 | src = pkgs.python.pkgs.fetchPypi { 118 | inherit pname version; 119 | sha256 = "97bfe3d981bab675d5ea3ef259045d7919c93897db7d3b59d4e8593cba8d354f"; 120 | }; 121 | 122 | nativeBuildInputs = [ pkgs.pkgconfig ]; 123 | 124 | doCheck = false; 125 | 126 | buildInputs = []; 127 | patches = [ ./executable.patch ]; 128 | postPatch = '' 129 | substituteInPlace pkgconfig/pkgconfig.py --replace 'PKG_CONFIG_EXE = "pkg-config"' 'PKG_CONFIG_EXE = "${pkgs.pkgconfig}/bin/pkg-config"' 130 | ''; 131 | 132 | meta = with pkgs.lib; { 133 | description = "Interface Python with pkg-config"; 134 | homepage = "https://github.com/matze/pkgconfig"; 135 | license = licenses.mit; 136 | }; 137 | }; 138 | 139 | uwsgi = null; 140 | } 141 | -------------------------------------------------------------------------------- /pkgs/sentry/semaphore/default.nix: -------------------------------------------------------------------------------- 1 | { buildPythonPackage 2 | , fetchFromGitHub 3 | , milksnake 4 | , naersk 5 | , pkg-config 6 | , openssl 7 | }: 8 | 9 | let 10 | rust-semaphore = import ./rust.nix { inherit naersk fetchFromGitHub pkg-config openssl; }; 11 | in 12 | 13 | import ./python.nix { inherit buildPythonPackage fetchFromGitHub rust-semaphore milksnake; } 14 | -------------------------------------------------------------------------------- /pkgs/sentry/semaphore/python.nix: -------------------------------------------------------------------------------- 1 | { buildPythonPackage 2 | , fetchFromGitHub 3 | , rust-semaphore 4 | , milksnake 5 | }: 6 | 7 | buildPythonPackage rec { 8 | pname = "semaphore"; 9 | version = "0.4.65"; 10 | 11 | src = (import ./source.nix { inherit fetchFromGitHub; }).semaphore; 12 | 13 | postPatch = '' 14 | patch --strip=1 < ${./setup-py.patch} 15 | substituteInPlace py/setup.py \ 16 | --replace '@nixBuildDylib@' '${rust-semaphore}/lib/libsemaphore.so' \ 17 | --replace '@nixBuildHeader@' '${rust-semaphore}/include/semaphore.h' 18 | ''; 19 | 20 | buildInputs = []; 21 | nativeBuildInputs = [ milksnake rust-semaphore ]; 22 | propagatedBuildInputs = [ milksnake ]; 23 | 24 | preBuild = '' 25 | cd py 26 | ''; 27 | } 28 | -------------------------------------------------------------------------------- /pkgs/sentry/semaphore/rust.nix: -------------------------------------------------------------------------------- 1 | { naersk 2 | , fetchFromGitHub 3 | , pkg-config 4 | , openssl 5 | }: 6 | 7 | let 8 | sources = import ./source.nix { inherit fetchFromGitHub; }; 9 | in 10 | 11 | naersk.buildPackage rec { 12 | pname = "semaphore"; 13 | version = "0.4.65"; 14 | 15 | src = 16 | sources.semaphore.overrideAttrs(drv: { 17 | postFetch = drv.postFetch + '' 18 | # See commentary in patch 19 | patch --directory=$out --strip=1 < ${./semaphore.patch} 20 | 21 | # Copy over the json-forensics crate 22 | cp -r ${sources.json-forensics} $out/json-forensics 23 | ''; 24 | }); 25 | 26 | override = (drv: { 27 | preConfigure = '' 28 | # Kinda hacky, but this works to build the cabi dylib with all 29 | # the dependencies from the root project. 30 | cd cabi 31 | ''; 32 | 33 | postInstall = '' 34 | cp -r ${src}/cabi/include $out/include 35 | ''; 36 | }); 37 | 38 | nativeBuildInputs = [ pkg-config openssl ]; 39 | } 40 | -------------------------------------------------------------------------------- /pkgs/sentry/semaphore/semaphore.patch: -------------------------------------------------------------------------------- 1 | Does a few things: 2 | 1. Adds the sub packages of this project to the workspace members. 3 | This is required by naersk and probably good practice anyway. 4 | 2. Modifies the dependencies to use a local version of json-forensics. 5 | If we try to use the Git version, it will fail due to not having a 6 | lock file present in the git repo: "the source ... requires a lock 7 | file to be present first before it can be used against vendored 8 | source code". 9 | 1. Removes the redis dependecy as it's troublesome and the processing 10 | feature isn't used in this build. 11 | diff --git a/Cargo.toml b/Cargo.toml 12 | index 9c546b0..2877bc8 100644 13 | --- a/Cargo.toml 14 | +++ b/Cargo.toml 15 | @@ -11,6 +11,14 @@ build = "build.rs" 16 | publish = false 17 | 18 | [workspace] 19 | +members = [ 20 | + "cabi", 21 | + "common", 22 | + "general", 23 | + "general/derive", 24 | + "json-forensics", 25 | + "server" 26 | +] 27 | 28 | [features] 29 | default = [] 30 | diff --git a/cabi/Cargo.toml b/cabi/Cargo.toml 31 | index 646f25f..ffe0f98 100644 32 | --- a/cabi/Cargo.toml 33 | +++ b/cabi/Cargo.toml 34 | @@ -12,8 +12,6 @@ license-file = "../LICENSE" 35 | name = "semaphore" 36 | crate-type = ["cdylib"] 37 | 38 | -[workspace] 39 | - 40 | [profile.release] 41 | debug = true 42 | lto = true 43 | @@ -21,7 +19,7 @@ lto = true 44 | [dependencies] 45 | chrono = "0.4.7" 46 | failure = "0.1.5" 47 | -json-forensics = { version = "*", git = "https://github.com/getsentry/rust-json-forensics" } 48 | +json-forensics = { path = "../json-forensics" } 49 | lazy_static = "1.3.0" 50 | serde = {version = "1.0.98", features = ["derive"]} 51 | serde_json = "1.0.40" 52 | diff --git a/server/Cargo.toml b/server/Cargo.toml 53 | index 2cee289..83b8008 100644 54 | --- a/server/Cargo.toml 55 | +++ b/server/Cargo.toml 56 | @@ -13,7 +13,7 @@ license-file = "../LICENSE" 57 | [features] 58 | default = ["with_ssl"] 59 | with_ssl = ["native-tls", "actix-web/tls"] 60 | -processing = ["rdkafka", "redis", "r2d2"] 61 | +processing = ["rdkafka", "r2d2"] 62 | 63 | [dependencies] 64 | actix = "0.7.9" 65 | @@ -25,7 +25,7 @@ clap = "2.33.0" 66 | failure = "0.1.5" 67 | flate2 = "1.0.9" 68 | futures = "0.1.28" 69 | -json-forensics = { version = "*", git = "https://github.com/getsentry/rust-json-forensics" } 70 | +json-forensics = { path = "../json-forensics" } 71 | lazy_static = "1.3.0" 72 | listenfd = "0.3.3" 73 | log = "0.4.8" 74 | @@ -33,7 +33,6 @@ native-tls = { version = "0.2.3", optional = true } 75 | parking_lot = "0.9.0" 76 | r2d2 = { version = "0.8.5", optional = true } 77 | rdkafka = { version = "0.22.0", optional = true } 78 | -redis = { git = "https://github.com/mitsuhiko/redis-rs", optional = true, branch = "feature/cluster", features = ["cluster", "r2d2"] } 79 | regex = "1.2.0" 80 | rmp-serde = "0.13.7" 81 | sentry = "0.17.0" -------------------------------------------------------------------------------- /pkgs/sentry/semaphore/setup-py.patch: -------------------------------------------------------------------------------- 1 | diff --git a/py/setup.py b/py/setup.py 2 | index 35ddd0a..e8db457 100644 3 | --- a/py/setup.py 4 | +++ b/py/setup.py 5 | @@ -82,6 +82,16 @@ def build_native(spec): 6 | rtld_flags=rtld_flags, 7 | ) 8 | 9 | +def find_native(spec): 10 | + rtld_flags = ["NOW"] 11 | + if sys.platform == "darwin": 12 | + rtld_flags.append("NODELETE") 13 | + spec.add_cffi_module( 14 | + module_path="semaphore._lowlevel", 15 | + dylib=lambda: "@nixBuildDylib@", 16 | + header_filename=lambda: "@nixBuildHeader@", 17 | + rtld_flags=rtld_flags, 18 | + ) 19 | 20 | setup( 21 | name="semaphore", 22 | @@ -97,6 +107,6 @@ setup( 23 | platforms="any", 24 | install_requires=["milksnake>=0.1.2"], 25 | setup_requires=["milksnake>=0.1.2"], 26 | - milksnake_tasks=[build_native], 27 | + milksnake_tasks=[find_native], 28 | cmdclass={"sdist": CustomSDist}, 29 | ) 30 | -------------------------------------------------------------------------------- /pkgs/sentry/semaphore/source.nix: -------------------------------------------------------------------------------- 1 | { fetchFromGitHub }: 2 | 3 | rec { 4 | json-forensics = fetchFromGitHub { 5 | owner = "getsentry"; 6 | repo = "rust-json-forensics"; 7 | rev = "3896ab98bae363570b7fc0e0af353f287ab17282"; 8 | sha256 = "0vmqnqdh767gqxz2i0nlm5xyjg61fbn9370slrzzpkv9hpdprx5r"; 9 | }; 10 | 11 | semaphore = fetchFromGitHub { 12 | owner = "getsentry"; 13 | repo = "relay"; 14 | rev = "refs/tags/0.4.65"; 15 | sha256 = "0f67l8c1dd96jlm8ppg9kg9w354smh80q3cy955zsakn3fx6x4lk"; 16 | fetchSubmodules = true; 17 | }; 18 | } 19 | -------------------------------------------------------------------------------- /pkgs/sentry/symbolic/default.nix: -------------------------------------------------------------------------------- 1 | { buildPythonPackage 2 | , fetchFromGitHub 3 | , milksnake 4 | , naersk 5 | , pkg-config 6 | , openssl 7 | }: 8 | 9 | let 10 | rust-symbolic = import ./rust.nix { inherit naersk fetchFromGitHub pkg-config openssl; }; 11 | in 12 | 13 | import ./python.nix { inherit buildPythonPackage fetchFromGitHub rust-symbolic milksnake; } 14 | -------------------------------------------------------------------------------- /pkgs/sentry/symbolic/python.nix: -------------------------------------------------------------------------------- 1 | { buildPythonPackage 2 | , fetchFromGitHub 3 | , rust-symbolic 4 | , milksnake 5 | }: 6 | 7 | let 8 | sources = import ./source.nix { inherit fetchFromGitHub; }; 9 | in 10 | buildPythonPackage rec { 11 | pname = "symbolic"; 12 | version = "7.2.0"; 13 | 14 | src = sources.symbolic; 15 | 16 | postPatch = '' 17 | patch --strip=1 < ${./python.patch} 18 | substituteInPlace py/setup.py \ 19 | --replace '@nixBuildDylib@' '${rust-symbolic}/lib/libsymbolic.so' \ 20 | --replace '@nixBuildHeader@' '${rust-symbolic}/include/symbolic.h' 21 | ''; 22 | 23 | nativeBuildInputs = [ milksnake rust-symbolic ]; 24 | 25 | preBuild = '' 26 | cd py 27 | ''; 28 | } 29 | -------------------------------------------------------------------------------- /pkgs/sentry/symbolic/python.patch: -------------------------------------------------------------------------------- 1 | diff --git a/py/setup.py b/py/setup.py 2 | index 0cc3698..c793c35 100644 3 | --- a/py/setup.py 4 | +++ b/py/setup.py 5 | @@ -83,6 +83,16 @@ def build_native(spec): 6 | rtld_flags=rtld_flags, 7 | ) 8 | 9 | +def find_native(spec): 10 | + rtld_flags = ["NOW"] 11 | + if sys.platform == "darwin": 12 | + rtld_flags.append("NODELETE") 13 | + spec.add_cffi_module( 14 | + module_path="symbolic._lowlevel", 15 | + dylib=lambda: "@nixBuildDylib@", 16 | + header_filename=lambda: "@nixBuildHeader@", 17 | + rtld_flags=rtld_flags, 18 | + ) 19 | 20 | setup( 21 | name="symbolic", 22 | @@ -98,6 +108,6 @@ setup( 23 | platforms="any", 24 | install_requires=["milksnake>=0.1.2",], 25 | setup_requires=["milksnake>=0.1.2",], 26 | - milksnake_tasks=[build_native,], 27 | + milksnake_tasks=[find_native,], 28 | cmdclass={"sdist": CustomSDist,}, 29 | ) 30 | -------------------------------------------------------------------------------- /pkgs/sentry/symbolic/rust.nix: -------------------------------------------------------------------------------- 1 | { naersk 2 | , fetchFromGitHub 3 | , pkg-config 4 | , openssl 5 | }: 6 | 7 | let 8 | sources = import ./source.nix { inherit fetchFromGitHub; }; 9 | in 10 | 11 | naersk.buildPackage rec { 12 | pname = "symbolic"; 13 | version = "7.2.0"; 14 | 15 | src = 16 | sources.symbolic.overrideAttrs(drv: { 17 | postFetch = drv.postFetch + '' 18 | # See commentary in patch 19 | patch --directory=$out --strip=1 < ${./rust.patch} 20 | 21 | cp ${./Cargo.lock} $out/Cargo.lock 22 | 23 | # Copy over apple-crash-report-parser crate 24 | cp -r ${sources.apple-crash-report-parser} $out/apple-crash-report-parser 25 | ''; 26 | }); 27 | 28 | override = (drv: { 29 | preConfigure = '' 30 | # Kinda hacky, but this works to build the cabi dylib with all 31 | # the dependencies from the root project. 32 | cd cabi 33 | ''; 34 | 35 | postInstall = '' 36 | cp -r ${src}/cabi/include $out/include 37 | ''; 38 | }); 39 | 40 | nativeBuildInputs = [ pkg-config openssl ]; 41 | } 42 | -------------------------------------------------------------------------------- /pkgs/sentry/symbolic/rust.patch: -------------------------------------------------------------------------------- 1 | diff --git a/Cargo.toml b/Cargo.toml 2 | index d98e50a..43cc1ab 100644 3 | --- a/Cargo.toml 4 | +++ b/Cargo.toml 5 | @@ -29,6 +29,18 @@ include = [ 6 | all-features = true 7 | 8 | [workspace] 9 | +members = [ 10 | + "cabi", 11 | + "common", 12 | + "debuginfo", 13 | + "demangle", 14 | + "minidump", 15 | + "proguard", 16 | + "sourcemap", 17 | + "symcache", 18 | + "unreal", 19 | + "apple-crash-report-parser" 20 | +] 21 | 22 | [features] 23 | default = ["debuginfo"] 24 | @@ -56,7 +68,7 @@ symbolic-unreal = { version = "7.2.0", path = "unreal", optional = true } 25 | 26 | [dev-dependencies] 27 | clap = "2.33.0" 28 | -failure = "0.1.7" 29 | +failure = "0.1.8" 30 | walkdir = "2.3.1" 31 | 32 | [[example]] 33 | diff --git a/cabi/Cargo.toml b/cabi/Cargo.toml 34 | index 9a6fe98..95e1d8c 100644 35 | --- a/cabi/Cargo.toml 36 | +++ b/cabi/Cargo.toml 37 | @@ -27,6 +27,6 @@ lto = true 38 | 39 | [dependencies] 40 | serde_json = "1.0.40" 41 | -failure = "0.1.5" 42 | -apple-crash-report-parser = { version = "0.4.0", features = ["with_serde"] } 43 | +failure = "0.1.8" 44 | +apple-crash-report-parser = { path = "../apple-crash-report-parser", features = ["with_serde"] } 45 | symbolic = { version = "7.2.0", path = "..", features = ["debuginfo", "demangle", "minidump", "proguard", "sourcemap", "symcache", "unreal-serde"] } 46 | diff --git a/common/Cargo.toml b/common/Cargo.toml 47 | index 4b8f323..b089a1a 100644 48 | --- a/common/Cargo.toml 49 | +++ b/common/Cargo.toml 50 | @@ -18,7 +18,7 @@ edition = "2018" 51 | 52 | [dependencies] 53 | debugid = "0.7.1" 54 | -failure = "0.1.5" 55 | +failure = "0.1.8" 56 | memmap = "0.7.0" 57 | stable_deref_trait = "1.1.1" 58 | serde_ = { package = "serde", version = "1.0.88", optional = true, features = ["derive"] } 59 | diff --git a/debuginfo/Cargo.toml b/debuginfo/Cargo.toml 60 | index 7367d9b..e16e2b4 100644 61 | --- a/debuginfo/Cargo.toml 62 | +++ b/debuginfo/Cargo.toml 63 | @@ -21,7 +21,7 @@ exclude = [ 64 | 65 | [dependencies] 66 | dmsort = "1.0.0" 67 | -failure = "0.1.5" 68 | +failure = "0.1.8" 69 | fallible-iterator = "0.2.0" 70 | flate2 = { version = "1.0.13", features = ["rust_backend"], default-features = false } 71 | gimli = { version = "0.20.0", features = ["read", "std"], default-features = false } 72 | diff --git a/minidump/Cargo.toml b/minidump/Cargo.toml 73 | index 04c6b92..1aaf1e1 100644 74 | --- a/minidump/Cargo.toml 75 | +++ b/minidump/Cargo.toml 76 | @@ -27,7 +27,7 @@ include = [ 77 | ] 78 | 79 | [dependencies] 80 | -failure = "0.1.5" 81 | +failure = "0.1.8" 82 | lazy_static = "1.4.0" 83 | regex = "1.3.5" 84 | serde = { version = "1.0.94", optional = true } 85 | -------------------------------------------------------------------------------- /pkgs/sentry/symbolic/source.nix: -------------------------------------------------------------------------------- 1 | { fetchFromGitHub }: 2 | 3 | rec { 4 | apple-crash-report-parser = fetchFromGitHub { 5 | owner = "getsentry"; 6 | repo = "apple-crash-report-parser"; 7 | rev = "refs/tags/0.4.0"; 8 | sha256 = "0wy1hgz62cv17gwl9gxpk5idy2d2w96mnvw6h2w2wa5wj9zflvb8"; 9 | }; 10 | 11 | symbolic = fetchFromGitHub { 12 | owner = "getsentry"; 13 | repo = "symbolic"; 14 | rev = "refs/tags/7.2.0"; 15 | sha256 = "0400hvnrg60ynjx8vkiyzm95izjzldifn26ifzx8z3h9a9iriyzg"; 16 | fetchSubmodules = true; 17 | }; 18 | } 19 | -------------------------------------------------------------------------------- /pkgs/sentry/xmlsec/lxml-workaround.patch: -------------------------------------------------------------------------------- 1 | Workaround for: https://bugs.launchpad.net/lxml/+bug/1880251 2 | 3 | diff --git a/tests/base.py b/tests/base.py 4 | index b05de1d..5ec356f 100644 5 | --- a/tests/base.py 6 | +++ b/tests/base.py 7 | @@ -94,6 +94,7 @@ class TestMemoryLeaks(unittest.TestCase): 8 | 9 | def load_xml(self, name, xpath=None): 10 | """returns xml.etree""" 11 | + etree.set_default_parser(parser=etree.XMLParser()) 12 | root = etree.parse(self.path(name)).getroot() 13 | if xpath is None: 14 | return root 15 | diff --git a/tests/test_doc_examples.py b/tests/test_doc_examples.py 16 | index 2fc490f..53d2377 100644 17 | --- a/tests/test_doc_examples.py 18 | +++ b/tests/test_doc_examples.py 19 | @@ -42,3 +42,5 @@ def test_doc_example(example): 20 | """ 21 | with cd(example.parent): 22 | runpy.run_path(str(example)) 23 | + from lxml import etree 24 | + etree.set_default_parser(parser=etree.XMLParser()) 25 | -------------------------------------------------------------------------------- /pkgs/sentry/xmlsec/no-black-format.patch: -------------------------------------------------------------------------------- 1 | The black formatter is only available in python 3, and the Nix build 2 | fails on skipped tests. 3 | 4 | diff --git a/tests/test_type_stubs.py b/tests/test_type_stubs.py 5 | deleted file mode 100644 6 | index 3b1c375..0000000 7 | --- a/tests/test_type_stubs.py 8 | +++ /dev/null 9 | @@ -1,71 +0,0 @@ 10 | -"""Test type stubs for correctness where possible.""" 11 | - 12 | -import os 13 | -import sys 14 | - 15 | -import pytest 16 | - 17 | -import xmlsec 18 | - 19 | -black = pytest.importorskip('black') 20 | - 21 | - 22 | -if sys.version_info >= (3, 4): 23 | - from pathlib import Path 24 | -else: 25 | - from _pytest.pathlib import Path 26 | - 27 | - 28 | -constants_stub_header = """ 29 | -import sys 30 | -from typing import NamedTuple 31 | - 32 | -if sys.version_info >= (3, 8): 33 | - from typing import Final 34 | -else: 35 | - from typing_extensions import Final 36 | - 37 | - 38 | -class __KeyData(NamedTuple): # __KeyData type 39 | - href: str 40 | - name: str 41 | - 42 | - 43 | -class __Transform(NamedTuple): # __Transform type 44 | - href: str 45 | - name: str 46 | - usage: int 47 | - 48 | - 49 | -""" 50 | - 51 | - 52 | -def gen_constants_stub(): 53 | - """ 54 | - Generate contents of the file:`xmlsec/constants.pyi`. 55 | - 56 | - Simply load all constants at runtime, 57 | - generate appropriate type hint for each constant type. 58 | - """ 59 | - 60 | - def process_constant(name): 61 | - """Generate line in stub file for constant name.""" 62 | - obj = getattr(xmlsec.constants, name) 63 | - return '{name}: Final = {obj!r}'.format(name=name, obj=obj) 64 | - 65 | - names = list(sorted(name for name in dir(xmlsec.constants) if not name.startswith('__'))) 66 | - lines = [process_constant(name) for name in names] 67 | - return constants_stub_header + os.linesep.join(lines) 68 | - 69 | - 70 | -def test_xmlsec_constants_stub(request): 71 | - """ 72 | - Generate the stub file for :mod:`xmlsec.constants` from existing code. 73 | - 74 | - Compare it against the existing stub :file:`xmlsec/constants.pyi`. 75 | - """ 76 | - rootdir = Path(str(request.config.rootdir)) 77 | - stub = rootdir / 'src' / 'xmlsec' / 'constants.pyi' 78 | - mode = black.FileMode(target_versions=[black.TargetVersion.PY38], line_length=130, is_pyi=True, string_normalization=False) 79 | - formatted = black.format_file_contents(gen_constants_stub(), fast=False, mode=mode) 80 | - assert formatted == stub.read_text() 81 | -------------------------------------------------------------------------------- /pkgs/snuba/configurable-host.patch: -------------------------------------------------------------------------------- 1 | diff --git a/snuba/cli/api.py b/snuba/cli/api.py 2 | index ec6f29d3..00ff7a85 100644 3 | --- a/snuba/cli/api.py 4 | +++ b/snuba/cli/api.py 5 | @@ -9,4 +9,4 @@ def api(*, debug: bool) -> None: 6 | from werkzeug.serving import WSGIRequestHandler 7 | 8 | WSGIRequestHandler.protocol_version = "HTTP/1.1" 9 | - application.run(port=settings.PORT, threaded=True, debug=debug) 10 | + application.run(port=settings.PORT, threaded=True, debug=debug, host=settings.HOST) 11 | -------------------------------------------------------------------------------- /pkgs/snuba/dashboard.patch: -------------------------------------------------------------------------------- 1 | diff --git a/snuba/web/static/dashboard.html b/snuba/web/static/dashboard.html 2 | index a67ae551..e7eb614c 100644 3 | --- a/snuba/web/static/dashboard.html 4 | +++ b/snuba/web/static/dashboard.html 5 | @@ -1,14 +1,14 @@ 6 | 7 | 8 | - 9 | - 10 | + 11 | + 12 | 13 | 14 | 15 | 16 | 17 | 18 | - 19 | + 20 | 21 |