├── pkgs ├── kes-rotation │ ├── .gitignore │ ├── shard.lock │ ├── shards.nix │ ├── shard.yml │ └── default.nix ├── node-update │ ├── .gitignore │ ├── shard.lock │ ├── shards.nix │ ├── shard.yml │ └── default.nix ├── snapshot-states │ ├── .gitignore │ ├── shard.lock │ ├── shards.nix │ ├── shard.yml │ └── default.nix └── varnish │ └── modules.nix ├── FirefoxDebugExample.png ├── physical ├── aws │ └── security-groups │ │ ├── allow-public.nix │ │ ├── allow-wireguard.nix │ │ ├── allow-explorer-gw.nix │ │ └── allow-peers.nix └── mock.nix ├── default.nix ├── modules ├── ssh.nix ├── common.nix ├── load-client.nix ├── tcpdump.nix ├── db-sync.nix ├── grafana │ └── cardano │ │ └── systemd-service-restarts.json └── cardano-postgres.nix ├── scripts ├── physical-clean.sh ├── hours-since-last-epoch.sh ├── create-aws.sh ├── hours-until-next-epoch.sh ├── resize-core-nodes-ebs-disks.sh ├── resize-relay-nodes-ebs-disks.sh ├── upload-with-checksum.sh ├── create-libvirtd.sh ├── genesis.spec.json ├── resize-ebs-disks.sh ├── gen-grafana-creds.nix ├── test-deploy.sh ├── gen-graylog-creds.nix ├── renew-kes-keys.sh └── submit-update-proposal.sh ├── deployments ├── cardano-libvirtd.nix └── cardano-aws.nix ├── .gitignore ├── globals-shelley-qa-p2p.nix ├── bench ├── Makefile ├── lib-sheets.sh ├── lib-report.sh ├── latency-map.sh ├── profile-genesis.jq ├── lib-benchrun.sh ├── lib-attic.sh ├── lib-tag.sh ├── lib-fetch.sh ├── lib-profile.sh ├── lib-genesis-byron.sh ├── lib-analysis.sh ├── sync-to.sh ├── lib.sh ├── make-topology.hs └── lib-sanity.sh ├── static └── default.nix ├── nix ├── sources.bench.json ├── cardano.nix ├── default.nix └── sources.nix ├── globals-shelley-qa.nix ├── topologies ├── bench-distrib-3.nix ├── shelley-dev.nix ├── bench-eu-central-1-3.nix ├── bench-dense-3.nix ├── bench-distrib-6.nix ├── shelley-qa-p2p.nix ├── bench-distrib-9.nix ├── shelley-qa.nix ├── staging.nix ├── alonzo-purple.nix ├── testnet.nix ├── bench-distrib-12.nix ├── bench-dense-12.nix └── p2p.nix ├── globals-p2p.nix ├── roles ├── relay-high-load.nix ├── relay.nix ├── snapshots.nix ├── load-client.nix ├── core.nix ├── faucet.nix ├── explorer-gateway.nix └── tx-generator.nix ├── globals-staging.nix ├── release.nix ├── globals-shelley-dev.nix ├── examples └── shelley-testnet │ ├── globals.nix │ ├── scripts │ ├── setup-stakepools-block-production.sh │ ├── submit-update-proposal.sh │ └── register-stake-pool.sh │ └── topology.nix ├── globals-testnet.nix ├── shell.nix ├── globals-alonzo-purple.nix ├── globals-mainnet.nix └── README.md /pkgs/kes-rotation/.gitignore: -------------------------------------------------------------------------------- 1 | lib/ 2 | -------------------------------------------------------------------------------- /pkgs/node-update/.gitignore: -------------------------------------------------------------------------------- 1 | lib/ 2 | -------------------------------------------------------------------------------- /pkgs/snapshot-states/.gitignore: -------------------------------------------------------------------------------- 1 | lib/ 2 | -------------------------------------------------------------------------------- /FirefoxDebugExample.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/input-output-hk/cardano-ops/HEAD/FirefoxDebugExample.png -------------------------------------------------------------------------------- /pkgs/kes-rotation/shard.lock: -------------------------------------------------------------------------------- 1 | version: 1.0 2 | shards: 3 | email: 4 | github: arcage/crystal-email 5 | version: 0.6.1 6 | -------------------------------------------------------------------------------- /pkgs/node-update/shard.lock: -------------------------------------------------------------------------------- 1 | version: 1.0 2 | shards: 3 | email: 4 | github: arcage/crystal-email 5 | version: 0.6.1 6 | -------------------------------------------------------------------------------- /pkgs/snapshot-states/shard.lock: -------------------------------------------------------------------------------- 1 | version: 2.0 2 | shards: 3 | email: 4 | git: https://github.com/arcage/crystal-email.git 5 | version: 0.6.3 6 | 7 | -------------------------------------------------------------------------------- /physical/aws/security-groups/allow-public.nix: -------------------------------------------------------------------------------- 1 | { pkgs, ... }@args: 2 | with pkgs; 3 | iohk-ops-lib.physical.aws.security-groups.allow-all-to-tcp-port 4 | "cardano" globals.cardanoNodePort args 5 | -------------------------------------------------------------------------------- /pkgs/kes-rotation/shards.nix: -------------------------------------------------------------------------------- 1 | { 2 | email = { 3 | owner = "arcage"; 4 | repo = "crystal-email"; 5 | rev = "v0.6.1"; 6 | sha256 = "0p3wf8aq4qcad41s3m5jbnsp2zv88mk888rnvwhwb08c0ndzls2s"; 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /pkgs/node-update/shards.nix: -------------------------------------------------------------------------------- 1 | { 2 | email = { 3 | owner = "arcage"; 4 | repo = "crystal-email"; 5 | rev = "v0.6.1"; 6 | sha256 = "0p3wf8aq4qcad41s3m5jbnsp2zv88mk888rnvwhwb08c0ndzls2s"; 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /pkgs/snapshot-states/shards.nix: -------------------------------------------------------------------------------- 1 | { 2 | email = { 3 | owner = "arcage"; 4 | repo = "crystal-email"; 5 | rev = "v0.6.3"; 6 | sha256 = "sha256-lO2qAr4iwlvLr0Aq/oHXMGfCmhFJOFrt73sBqjPKwkw="; 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /default.nix: -------------------------------------------------------------------------------- 1 | { system ? builtins.currentSystem 2 | , crossSystem ? null 3 | , config ? {} 4 | , pkgs ? import ./nix { inherit system crossSystem config; } 5 | }: with pkgs; { 6 | 7 | shell = import ./shell.nix { inherit pkgs; }; 8 | } 9 | -------------------------------------------------------------------------------- /modules/ssh.nix: -------------------------------------------------------------------------------- 1 | {...}: { 2 | # OpenSSH Version 9.8p1 fixes CVE-2024-6387 3 | programs.ssh.package = ( 4 | builtins.getFlake "github:nixos/nixpkgs/b9014df496d5b68bf7c0145d0e9b0f529ce4f2a8" 5 | ).legacyPackages.x86_64-linux.openssh; 6 | } 7 | -------------------------------------------------------------------------------- /scripts/physical-clean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | sed -i $1 -e \ 4 | ' 5 | s_"ssh-[^"]*"_""_; 6 | s_\("\| \)[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\("\| \)_\11.1.1.1\2_; 7 | s_"i-[0-9a-f]*"_""_ 8 | s_"sg-[0-9a-f]*"_""_ 9 | ' 10 | -------------------------------------------------------------------------------- /scripts/hours-since-last-epoch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | elapsedSeconds=$(( $(date +\%s) - $(date +\%s -d "$SYSTEM_START") )) 6 | elapsedSecondsInEpoch=$(( $elapsedSeconds % $EPOCH_LENGTH )) 7 | hoursSinceLastEpoch=$(( $elapsedSecondsInEpoch / 3600 )) 8 | echo $hoursSinceLastEpoch 9 | -------------------------------------------------------------------------------- /deployments/cardano-libvirtd.nix: -------------------------------------------------------------------------------- 1 | with import ../nix {}; 2 | 3 | import ../clusters/cardano.nix { 4 | inherit pkgs; 5 | inherit (globals.libvirtd) instances; 6 | } // lib.optionalAttrs (builtins.getEnv "BUILD_ONLY" == "true") { 7 | defaults = { 8 | users.users.root.openssh.authorizedKeys.keys = lib.mkForce [""]; 9 | }; 10 | } 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | \#* 3 | \.#* 4 | *.swp 5 | result* 6 | 7 | /analysis 8 | /runs 9 | /runs-archive 10 | /runs-last 11 | /db-sync-snapshot 12 | /state-snapshots 13 | *-logs/ 14 | 15 | .envrc 16 | genesis-keys 17 | globals.nix 18 | keys 19 | last-deploy.log 20 | static 21 | 22 | /*.json 23 | /*.split 24 | /*.tar.xz 25 | .nix-gc-roots 26 | 27 | *.md.temp 28 | -------------------------------------------------------------------------------- /scripts/create-aws.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euxo pipefail 4 | 5 | cd "$(dirname "$0")/.." 6 | 7 | # Credential setup 8 | if [ ! -f ./static/graylog-creds.nix ]; then 9 | nix-shell -A gen-graylog-creds 10 | fi 11 | 12 | nixops destroy || true 13 | nixops delete || true 14 | nixops create ./deployments/cardano-aws.nix -I nixpkgs=./nix 15 | nixops deploy --show-trace 16 | -------------------------------------------------------------------------------- /scripts/hours-until-next-epoch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | elapsedSeconds=$(( $(date +\%s) - $(date +\%s -d "$SYSTEM_START") )) 6 | elapsedSecondsInEpoch=$(( $elapsedSeconds % $EPOCH_LENGTH )) 7 | secondsUntilNextEpoch=$(( $EPOCH_LENGTH - $elapsedSecondsInEpoch )) 8 | hoursUntilNextEpoch=$(( $secondsUntilNextEpoch / 3600 )) 9 | echo $hoursUntilNextEpoch 10 | -------------------------------------------------------------------------------- /pkgs/kes-rotation/shard.yml: -------------------------------------------------------------------------------- 1 | name: kes-rotation 2 | version: 0.1.0 3 | 4 | authors: 5 | - John Lotoski 6 | 7 | description: | 8 | KES key rotation script for use in crontab 9 | 10 | targets: 11 | kes-rotation: 12 | main: src/kes-rotation.cr 13 | 14 | dependencies: 15 | email: 16 | github: arcage/crystal-email 17 | version: 0.6.1 18 | 19 | crystal: 0.34.0 20 | 21 | license: MIT 22 | -------------------------------------------------------------------------------- /globals-shelley-qa-p2p.nix: -------------------------------------------------------------------------------- 1 | pkgs: { 2 | 3 | deploymentName = "shelley-qa-p2p"; 4 | 5 | environmentName = "shelley_qa"; 6 | 7 | relaysNew = "relays.${pkgs.globals.domain}"; 8 | nbInstancesPerRelay = 1; 9 | 10 | withExplorer = false; 11 | 12 | ec2 = { 13 | credentials = { 14 | accessKeyIds = { 15 | IOHK = "dev-deployer"; 16 | dns = "dev-deployer"; 17 | }; 18 | }; 19 | }; 20 | } 21 | -------------------------------------------------------------------------------- /pkgs/node-update/shard.yml: -------------------------------------------------------------------------------- 1 | name: node-update 2 | version: 0.1.0 3 | 4 | authors: 5 | - John Lotoski 6 | 7 | description: | 8 | Peer update and rotation script for use in crontab 9 | 10 | targets: 11 | node-update: 12 | main: src/node-update.cr 13 | 14 | dependencies: 15 | email: 16 | github: arcage/crystal-email 17 | version: 0.6.1 18 | 19 | crystal: 0.34.0 20 | 21 | license: MIT 22 | -------------------------------------------------------------------------------- /scripts/resize-core-nodes-ebs-disks.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/usr/bin/env bash 3 | 4 | set -euo pipefail 5 | 6 | cd "$(dirname "$0")/.." 7 | 8 | CORE_NODES=$(nix eval --impure --raw --expr '(toString (map (r: r.name) (import ./nix {}).globals.topology.coreNodes))') 9 | 10 | TARGET_SIZE=$(nix eval --impure --expr '(with (import ./nix {}).globals; systemDiskAllocationSize + nodeDbDiskAllocationSize)') 11 | 12 | ./scripts/resize-ebs-disks.sh $TARGET_SIZE $CORE_NODES 13 | -------------------------------------------------------------------------------- /bench/Makefile: -------------------------------------------------------------------------------- 1 | all: docs 2 | 3 | docs: README.md 4 | 5 | README.md: README.org 6 | pandoc --from org --to gfm <$< >$@ 7 | 8 | TOPO=--size 52 9 | TOPO+=--loc EU --loc AP --loc US 10 | TOPO+=--topology-output topology.json. 11 | TOPO+=--dot-output topology.dot 12 | 13 | topology: 14 | ./make-topology.hs ${TOPO} 15 | jq '.' topology.json. > topology.json && rm topology.json. 16 | neato -s120 -Tpdf topology.dot > topology.pdf 17 | evince topology.pdf 18 | -------------------------------------------------------------------------------- /scripts/resize-relay-nodes-ebs-disks.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/usr/bin/env bash 3 | 4 | set -euo pipefail 5 | 6 | cd "$(dirname "$0")/.." 7 | 8 | RELAYS=$(nix eval --impure --raw --expr '(toString (map (r: r.name) (import ./nix {}).globals.topology.relayNodes))') 9 | 10 | TARGET_SIZE=$(nix eval --impure --expr '(with (import ./nix {}).globals; systemDiskAllocationSize + nodeDbDiskAllocationSize * nbInstancesPerRelay)') 11 | 12 | ./scripts/resize-ebs-disks.sh $TARGET_SIZE $RELAYS 13 | -------------------------------------------------------------------------------- /pkgs/snapshot-states/shard.yml: -------------------------------------------------------------------------------- 1 | name: snapshot-states 2 | version: 0.1.0 3 | 4 | authors: 5 | - Jean-Baptiste Giraudeau 6 | 7 | description: | 8 | Upload db-sync snapshot, to be schedule on a X epochs period. 9 | 10 | targets: 11 | snapshot-states: 12 | main: src/snapshot-states.cr 13 | 14 | dependencies: 15 | email: 16 | github: arcage/crystal-email 17 | version: 0.6.3 18 | 19 | crystal: 1.0.0 20 | 21 | license: MIT 22 | -------------------------------------------------------------------------------- /physical/aws/security-groups/allow-wireguard.nix: -------------------------------------------------------------------------------- 1 | { region, org, pkgs, lib, ... }: 2 | with lib; { 3 | "allow-wireguard" = { resources, ... }: { 4 | inherit region; 5 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.${org}; 6 | _file = ./allow-wireguard.nix; 7 | description = "Allow to UDP/51820"; 8 | rules = [{ 9 | protocol = "udp"; 10 | fromPort = 51820; 11 | toPort = 51820; 12 | sourceIp = "0.0.0.0/0"; 13 | sourceIpv6 = "::/0"; 14 | }]; 15 | }; 16 | } 17 | -------------------------------------------------------------------------------- /static/default.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs.lib; 2 | let condImport = name: file: optionalAttrs (builtins.pathExists file) { 3 | "${name}" = import file; 4 | }; 5 | in { 6 | additionalPeers = []; 7 | relaysExcludeList = []; 8 | poolsExcludeList = []; 9 | } // condImport "graylogCreds" ./graylog-creds.nix 10 | // condImport "grafanaCreds" ./grafana-creds.nix 11 | // condImport "pagerDuty" ./pager-duty.nix 12 | // condImport "deadMansSnitch" ./dead-mans-snitch.nix 13 | // condImport "oauth" ./oauth.nix 14 | // (condImport "static" ./static.nix).static or {} 15 | -------------------------------------------------------------------------------- /nix/sources.bench.json: -------------------------------------------------------------------------------- 1 | { 2 | "cardano-node": { 3 | "branch": "", 4 | "description": null, 5 | "homepage": null, 6 | "owner": "input-output-hk", 7 | "repo": "cardano-node", 8 | "rev": "0f4a85fac2b6a7c2afccdd7f03bedb257f7b1f41", 9 | "sha256": "0vgbirbc45rkrfpm7d5ma29g5rqq25s7mw1k0ff1z2x9w81gc9w8", 10 | "type": "tarball", 11 | "url": "https://github.com/input-output-hk/cardano-node/archive/0f4a85fac2b6a7c2afccdd7f03bedb257f7b1f41.tar.gz", 12 | "url_template": "https://github.com///archive/.tar.gz" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /globals-shelley-qa.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs; { 2 | 3 | deploymentName = "shelley-qa"; 4 | environmentName = "shelley_qa"; 5 | 6 | relaysNew = globals.environmentConfig.relaysNew; 7 | 8 | withFaucet = true; 9 | withExplorer = true; 10 | explorerBackendsInContainers = true; 11 | explorerBackends.a = globals.explorer13; 12 | withSmash = true; 13 | withSubmitApi = true; 14 | faucetHostname = "faucet"; 15 | minCpuPerInstance = 1; 16 | minMemoryPerInstance = 4; 17 | 18 | ec2 = { 19 | credentials = { 20 | accessKeyIds = { 21 | IOHK = "dev-deployer"; 22 | dns = "dev-deployer"; 23 | }; 24 | }; 25 | }; 26 | } 27 | -------------------------------------------------------------------------------- /topologies/bench-distrib-3.nix: -------------------------------------------------------------------------------- 1 | { 2 | coreNodes = [ 3 | { 4 | name = "node-0"; 5 | nodeId = 0; 6 | org = "IOHK"; 7 | region = "eu-central-1"; 8 | producers = ["node-1" "node-2"]; 9 | } 10 | { 11 | name = "node-1"; 12 | nodeId = 1; 13 | org = "IOHK"; 14 | region = "ap-southeast-2"; 15 | producers = ["node-2" "node-0"]; 16 | pools = 1; 17 | } 18 | { 19 | name = "node-2"; 20 | nodeId = 2; 21 | org = "IOHK"; 22 | region = "us-east-1"; 23 | producers = ["node-0" "node-1"]; 24 | pools = 1; 25 | } 26 | ]; 27 | 28 | relayNodes = []; 29 | } 30 | -------------------------------------------------------------------------------- /globals-p2p.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs.iohkNix.cardanoLib; with pkgs.globals; { 2 | 3 | # This should match the name of the topology file. 4 | deploymentName = "p2p"; 5 | 6 | withFaucet = true; 7 | 8 | explorerBackends = { 9 | a = explorer12; 10 | }; 11 | explorerBackendsInContainers = true; 12 | 13 | overlay = self: super: { 14 | sourcePaths = super.sourcePaths // { 15 | # Use p2p branch everywhere: 16 | cardano-node = super.sourcePaths.cardano-node-service; 17 | }; 18 | }; 19 | 20 | ec2 = { 21 | credentials = { 22 | accessKeyIds = { 23 | IOHK = "default"; 24 | dns = "dev"; 25 | }; 26 | }; 27 | }; 28 | } 29 | -------------------------------------------------------------------------------- /topologies/shelley-dev.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs; with lib; with topology-lib; { 2 | coreNodes = [ 3 | { 4 | name = "a"; 5 | nodeId = 1; 6 | org = "IOHK"; 7 | region = "eu-central-1"; 8 | producers = ["b" "c"]; 9 | stakePool = false; 10 | } 11 | { 12 | name = "b"; 13 | nodeId = 2; 14 | org = "IOHK"; 15 | region = "eu-central-1"; 16 | producers = ["c" "a"]; 17 | stakePool = false; 18 | } 19 | { 20 | name = "c"; 21 | nodeId = 3; 22 | org = "IOHK"; 23 | region = "eu-central-1"; 24 | producers = ["a" "b"]; 25 | stakePool = false; 26 | } 27 | ]; 28 | 29 | relayNodes = []; 30 | } 31 | -------------------------------------------------------------------------------- /bench/lib-sheets.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=1091,2016 3 | 4 | sheet_list=() 5 | 6 | sheet_list+=(sheet_message_types_summary) 7 | sheet_message_types_summary() { 8 | local dir=${1:-.} name 9 | name=$(echo ${FUNCNAME[0]} | cut -d_ -f2-) 10 | 11 | mkdir -p "$dir"/report 12 | 13 | jq ' .message_types 14 | | to_entries 15 | | map ( .key as $mach 16 | | .value 17 | | to_entries 18 | | map([ $mach, .key, .value | tostring])) 19 | | add 20 | | .[] 21 | | join(",")' < "$dir"/analysis.json --raw-output \ 22 | > "$dir"/report/"$name".csv 23 | 24 | sed -i '1inode, message, occurences' "$dir"/report/"$name".csv 25 | } 26 | -------------------------------------------------------------------------------- /roles/relay-high-load.nix: -------------------------------------------------------------------------------- 1 | pkgs: 2 | with pkgs; with lib; 3 | {name, config, ...}: let 4 | cfg = config.services.cardano-node; 5 | in { 6 | 7 | imports = [ 8 | cardano-ops.roles.relay 9 | ]; 10 | 11 | # Add host and container auto metrics and alarming 12 | services.custom-metrics.enableNetdata = true; 13 | 14 | services.cardano-node.extraNodeConfig = { 15 | AcceptedConnectionsLimit = { 16 | # Ensure limits are above our alerts threshold: 17 | hardLimit = topology-lib.roundToInt 18 | (globals.alertTcpCrit / cfg.instances * 1.05); 19 | softLimit = topology-lib.roundToInt 20 | (globals.alertTcpHigh / cfg.instances * 1.05); 21 | delay = 5; 22 | }; 23 | }; 24 | } 25 | -------------------------------------------------------------------------------- /roles/relay.nix: -------------------------------------------------------------------------------- 1 | 2 | pkgs: with pkgs; {config, ...}: { 3 | 4 | imports = [ 5 | cardano-ops.modules.base-service 6 | ../modules/tcpdump.nix 7 | ../modules/ssh.nix 8 | ]; 9 | 10 | deployment.ec2.ebsInitialRootDiskSize = globals.systemDiskAllocationSize 11 | + (globals.nodeDbDiskAllocationSize * config.services.cardano-node.instances); 12 | 13 | services.cardano-node = { 14 | instances = lib.mkDefault globals.nbInstancesPerRelay; 15 | totalCpuCores = lib.mkDefault config.node.cpus; 16 | extraServiceConfig = _: { 17 | # Since multiple node instances might monopolize CPU, preventing ssh access, lower nice priority: 18 | serviceConfig.Nice = 5; 19 | }; 20 | }; 21 | 22 | } 23 | -------------------------------------------------------------------------------- /topologies/bench-eu-central-1-3.nix: -------------------------------------------------------------------------------- 1 | { 2 | coreNodes = [ 3 | { 4 | name = "node-0"; 5 | nodeId = 0; 6 | org = "IOHK"; 7 | region = "eu-central-1"; 8 | producers = ["node-1" "node-2"]; 9 | stakePool = true; 10 | } 11 | { 12 | name = "node-1"; 13 | nodeId = 1; 14 | org = "IOHK"; 15 | region = "eu-central-1"; 16 | producers = ["node-2" "node-0"]; 17 | stakePool = true; 18 | } 19 | { 20 | name = "node-2"; 21 | nodeId = 2; 22 | org = "IOHK"; 23 | region = "eu-central-1"; 24 | producers = ["node-0" "node-1"]; 25 | } 26 | ]; 27 | 28 | relayNodes = []; 29 | 30 | legacyCoreNodes = []; 31 | 32 | legacyRelayNodes = []; 33 | } 34 | -------------------------------------------------------------------------------- /globals-staging.nix: -------------------------------------------------------------------------------- 1 | pkgs: { 2 | 3 | deploymentName = "rc-staging"; 4 | deploymentPath = "$HOME/staging"; 5 | 6 | dnsZone = "${pkgs.globals.domain}"; 7 | 8 | domain = "staging.cardano.org"; 9 | 10 | environmentName = "staging"; 11 | 12 | withSubmitApi = true; 13 | withSmash = true; 14 | withFaucet = true; 15 | faucetHostname = "faucet"; 16 | 17 | topology = import ./topologies/staging.nix pkgs; 18 | 19 | ec2 = { 20 | credentials = { 21 | accessKeyIds = { 22 | IOHK = "iohk"; 23 | Emurgo = "fifth-party"; 24 | CF = "third-party"; 25 | dns = "dns"; 26 | }; 27 | }; 28 | instances.relay-node = pkgs.iohk-ops-lib.physical.aws.t3-xlarge; 29 | }; 30 | 31 | alertChainDensityLow = "90"; 32 | } 33 | -------------------------------------------------------------------------------- /scripts/upload-with-checksum.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | cd $(dirname "$1") 6 | FILE="$(basename "$1")" 7 | BUCKET=$2 8 | PREFIX=${3:-} 9 | if [ $PREFIX = "" ]; then 10 | PATH_PREFIX="" 11 | else 12 | PATH_PREFIX="$PREFIX/" 13 | fi 14 | 15 | if [[ $BUCKET == *"."* ]]; then 16 | HOST=$BUCKET 17 | else 18 | HOST="$BUCKET.s3.amazonaws.com" 19 | fi 20 | 21 | sha256sum "$FILE" > "$FILE.sha256sum" 22 | 23 | export PYTHONPATH= 24 | for f in "$FILE.sha256sum" "$FILE"; do 25 | >&2 echo "Uploading $f" 26 | 27 | >&2 s3cmd put --acl-public --multipart-chunk-size-mb=512 $f s3://$BUCKET/$PATH_PREFIX 28 | done 29 | 30 | echo "Uploaded files:" 31 | for f in "$FILE" "$FILE.sha256sum"; do 32 | echo " - https://$HOST/$PATH_PREFIX$f" 33 | done 34 | -------------------------------------------------------------------------------- /release.nix: -------------------------------------------------------------------------------- 1 | { cardano-ops ? { outPath = ./.; rev = "abcdef"; } }: 2 | let 3 | sources = import ./nix/sources.nix; 4 | pkgs = import ./nix {}; 5 | 6 | in pkgs.lib.fix (self: { 7 | inherit (pkgs) 8 | kes-rotation 9 | nginxExplorer 10 | node-update 11 | prometheus-varnish-exporter 12 | varnish; 13 | 14 | varnish-modules = pkgs.varnishPackages.modules; 15 | 16 | shell = import ./shell.nix { inherit pkgs; }; 17 | 18 | forceNewEval = pkgs.writeText "forceNewEval" cardano-ops.rev; 19 | 20 | required = pkgs.releaseTools.aggregate { 21 | name = "required"; 22 | constituents = with self; [ 23 | forceNewEval 24 | kes-rotation 25 | nginxExplorer 26 | node-update 27 | prometheus-varnish-exporter 28 | varnish 29 | varnish-modules 30 | shell 31 | ]; 32 | }; 33 | }) 34 | -------------------------------------------------------------------------------- /topologies/bench-dense-3.nix: -------------------------------------------------------------------------------- 1 | { 2 | coreNodes = [ 3 | { 4 | name = "node-0"; 5 | nodeId = 0; 6 | org = "IOHK"; 7 | region = "eu-central-1"; 8 | producers = ["node-1" "node-2"]; 9 | } 10 | { 11 | name = "node-1"; 12 | nodeId = 1; 13 | org = "IOHK"; 14 | region = "ap-southeast-2"; 15 | producers = ["node-2" "node-0"]; 16 | pools = 1; 17 | } 18 | { 19 | name = "node-2"; 20 | nodeId = 2; 21 | org = "IOHK"; 22 | region = "us-east-1"; 23 | producers = ["node-0" "node-1"]; 24 | pools = 10; 25 | } 26 | ]; 27 | 28 | relayNodes = [ 29 | { 30 | name = "explorer"; 31 | nodeId = 3; 32 | org = "IOHK"; 33 | region = "eu-central-1"; 34 | producers = ["node-0" "node-1" "node-2"]; 35 | } 36 | ]; 37 | } 38 | -------------------------------------------------------------------------------- /physical/aws/security-groups/allow-explorer-gw.nix: -------------------------------------------------------------------------------- 1 | { region, org, pkgs, lib, ... }: 2 | with lib; { 3 | "allow-to-explorer-backends" = { resources, ... }: { 4 | inherit region; 5 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.${org}; 6 | _file = ./allow-explorer-gw.nix; 7 | description = "Allow to TCP/80,81 from explorer gateway"; 8 | rules = [{ 9 | protocol = "tcp"; 10 | fromPort = 80; 11 | toPort = 81; 12 | sourceIp = resources.elasticIPs."explorer-ip"; 13 | } 14 | { 15 | protocol = "tcp"; 16 | fromPort = 8080; 17 | toPort = 8080; 18 | sourceIp = resources.elasticIPs."explorer-ip"; 19 | } 20 | { 21 | protocol = "tcp"; 22 | fromPort = 9999; 23 | toPort = 9999; 24 | sourceIp = resources.elasticIPs."monitoring-ip"; 25 | }]; 26 | }; 27 | } 28 | -------------------------------------------------------------------------------- /scripts/create-libvirtd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euxo pipefail 4 | 5 | cd "$(dirname "$0")/.." 6 | 7 | # https://nixos.org/nixops/manual/#idm140737322394336 8 | # Needed for libvirtd: 9 | # 10 | # virtualisation.libvirtd.enable = true; 11 | # networking.firewall.checkReversePath = false; 12 | 13 | # See also: https://github.com/simon3z/virt-deploy/issues/8#issuecomment-73111541 14 | 15 | if [ ! -d /var/lib/libvirt/images ]; then 16 | sudo mkdir -p /var/lib/libvirt/images 17 | sudo chgrp libvirtd /var/lib/libvirt/images 18 | sudo chmod g+w /var/lib/libvirt/images 19 | fi 20 | 21 | # Credential setup 22 | if [ ! -f ./static/graylog-creds.nix ]; then 23 | nix-shell -A gen-graylog-creds 24 | fi 25 | 26 | nixops destroy || true 27 | nixops delete || true 28 | nixops create ./deployments/cardano-libvirtd.nix -I nixpkgs=./nix 29 | nixops deploy --show-trace 30 | -------------------------------------------------------------------------------- /globals-shelley-dev.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs; with iohkNix.cardanoLib; rec { 2 | 3 | withMonitoring = false; 4 | withExplorer = false; 5 | 6 | environmentName = "shelley-dev"; 7 | 8 | environmentConfig = rec { 9 | relays = "relays.${pkgs.globals.domain}"; 10 | genesisFile = ./keys/genesis.json; 11 | genesisHash = builtins.replaceStrings ["\n"] [""] (builtins.readFile ./keys/GENHASH); 12 | nodeConfig = lib.recursiveUpdate environments.shelley_qa.nodeConfig { 13 | ShelleyGenesisFile = genesisFile; 14 | ShelleyGenesisHash = genesisHash; 15 | Protocol = "TPraos"; 16 | TraceBlockFetchProtocol = true; 17 | }; 18 | explorerConfig = mkExplorerConfig environmentName nodeConfig; 19 | }; 20 | 21 | ec2 = { 22 | credentials = { 23 | accessKeyIds = { 24 | IOHK = "dev-deployer"; 25 | dns = "dev-deployer"; 26 | }; 27 | }; 28 | }; 29 | } 30 | -------------------------------------------------------------------------------- /pkgs/node-update/default.nix: -------------------------------------------------------------------------------- 1 | { callPackage 2 | , crystal 3 | , lib 4 | , openssl 5 | , pkg-config 6 | }: 7 | 8 | let 9 | inherit (lib) cleanSourceWith hasSuffix removePrefix; 10 | filter = name: type: let 11 | baseName = baseNameOf (toString name); 12 | sansPrefix = removePrefix (toString ../.) name; 13 | in ( 14 | baseName == "src" || 15 | hasSuffix ".cr" baseName || 16 | hasSuffix ".yml" baseName || 17 | hasSuffix ".lock" baseName || 18 | hasSuffix ".nix" baseName 19 | ); 20 | in { 21 | node-update = crystal.buildCrystalPackage { 22 | pname = "node-update"; 23 | version = "0.1.0"; 24 | src = cleanSourceWith { 25 | inherit filter; 26 | src = ./.; 27 | name = "node-update"; 28 | }; 29 | format = "shards"; 30 | crystalBinaries.node-update.src = "src/node-update.cr"; 31 | shardsFile = ./shards.nix; 32 | buildInputs = [ openssl pkg-config ]; 33 | doCheck = true; 34 | doInstallCheck = false; 35 | }; 36 | } 37 | -------------------------------------------------------------------------------- /pkgs/kes-rotation/default.nix: -------------------------------------------------------------------------------- 1 | { callPackage 2 | , crystal 3 | , lib 4 | , openssl 5 | , pkg-config 6 | }: 7 | 8 | let 9 | inherit (lib) cleanSourceWith hasSuffix removePrefix; 10 | filter = name: type: let 11 | baseName = baseNameOf (toString name); 12 | sansPrefix = removePrefix (toString ../.) name; 13 | in ( 14 | baseName == "src" || 15 | hasSuffix ".cr" baseName || 16 | hasSuffix ".yml" baseName || 17 | hasSuffix ".lock" baseName || 18 | hasSuffix ".nix" baseName 19 | ); 20 | in { 21 | kes-rotation = crystal.buildCrystalPackage { 22 | pname = "kes-rotation"; 23 | version = "0.1.0"; 24 | src = cleanSourceWith { 25 | inherit filter; 26 | src = ./.; 27 | name = "kes-rotation"; 28 | }; 29 | format = "shards"; 30 | crystalBinaries.kes-rotation.src = "src/kes-rotation.cr"; 31 | shardsFile = ./shards.nix; 32 | buildInputs = [ openssl pkg-config ]; 33 | doCheck = true; 34 | doInstallCheck = false; 35 | }; 36 | } 37 | -------------------------------------------------------------------------------- /pkgs/snapshot-states/default.nix: -------------------------------------------------------------------------------- 1 | { callPackage 2 | , crystal 3 | , lib 4 | , openssl 5 | , pkg-config 6 | }: 7 | 8 | let 9 | inherit (lib) cleanSourceWith hasSuffix removePrefix; 10 | filter = name: type: let 11 | baseName = baseNameOf (toString name); 12 | sansPrefix = removePrefix (toString ../.) name; 13 | in ( 14 | baseName == "src" || 15 | hasSuffix ".cr" baseName || 16 | hasSuffix ".yml" baseName || 17 | hasSuffix ".lock" baseName || 18 | hasSuffix ".nix" baseName 19 | ); 20 | in { 21 | snapshot-states = crystal.buildCrystalPackage { 22 | pname = "snapshot-states"; 23 | version = "0.1.0"; 24 | src = cleanSourceWith { 25 | inherit filter; 26 | src = ./.; 27 | name = "snapshot-states"; 28 | }; 29 | format = "shards"; 30 | crystalBinaries.snapshot-states.src = "src/snapshot-states.cr"; 31 | shardsFile = ./shards.nix; 32 | buildInputs = [ openssl pkg-config ]; 33 | doCheck = true; 34 | doInstallCheck = false; 35 | }; 36 | } 37 | -------------------------------------------------------------------------------- /examples/shelley-testnet/globals.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs.iohkNix.cardanoLib; rec { 2 | 3 | withMonitoring = false; 4 | withExplorer = false; 5 | 6 | # This should match the name of the topology file. 7 | environmentName = "example"; 8 | 9 | environmentConfig = rec { 10 | relays = "relays.${pkgs.globals.domain}"; 11 | genesisFile = ./keys/genesis.json; 12 | genesisHash = builtins.replaceStrings ["\n"] [""] (builtins.readFile ./keys/GENHASH); 13 | nodeConfig = 14 | pkgs.lib.recursiveUpdate 15 | environments.shelley_qa.nodeConfig 16 | { 17 | ShelleyGenesisFile = genesisFile; 18 | ShelleyGenesisHash = genesisHash; 19 | Protocol = "TPraos"; 20 | TraceForge = true; 21 | TraceTxInbound = true; 22 | }; 23 | explorerConfig = mkExplorerConfig environmentName nodeConfig; 24 | }; 25 | 26 | topology = import (./topologies + "/${environmentName}.nix") pkgs; 27 | 28 | ec2 = { 29 | credentials = { 30 | accessKeyIds = { 31 | IOHK = "dev-deployer"; 32 | dns = "dev-deployer"; 33 | }; 34 | }; 35 | }; 36 | } 37 | -------------------------------------------------------------------------------- /modules/common.nix: -------------------------------------------------------------------------------- 1 | pkgs: 2 | with pkgs; 3 | let 4 | boolOption = lib.mkOption { 5 | type = lib.types.bool; 6 | default = false; 7 | }; 8 | in { 9 | imports = [ 10 | iohk-ops-lib.modules.common 11 | ]; 12 | 13 | config = { 14 | services.monitoring-exporters.logging = false; 15 | }; 16 | 17 | options = { 18 | node = { 19 | coreIndex = lib.mkOption { 20 | type = lib.types.int; 21 | }; 22 | nodeId = lib.mkOption { 23 | type = lib.types.int; 24 | }; 25 | roles = { 26 | isByronProxy = boolOption; 27 | isCardanoCore = boolOption; 28 | isCardanoDensePool = boolOption; 29 | isCardanoLegacyCore = boolOption; 30 | isCardanoLegacyRelay = boolOption; 31 | isCardanoRelay = boolOption; 32 | isSnapshots = boolOption; 33 | isCustom = boolOption; 34 | isExplorer = boolOption; 35 | isExplorerBackend = boolOption; 36 | isFaucet = boolOption; 37 | isMonitor = boolOption; 38 | isMetadata = boolOption; 39 | isPublicSsh = boolOption; 40 | }; 41 | }; 42 | }; 43 | } 44 | -------------------------------------------------------------------------------- /scripts/genesis.spec.json: -------------------------------------------------------------------------------- 1 | { 2 | "activeSlotsCoeff": 0.1, 3 | "protocolParams": { 4 | "poolDecayRate": 0, 5 | "poolDeposit": 500000000, 6 | "protocolVersion": { 7 | "minor": 0, 8 | "major": 0 9 | }, 10 | "decentralisationParam": 0.5, 11 | "maxTxSize": 16384, 12 | "minFeeA": 44, 13 | "maxBlockBodySize": 65536, 14 | "keyMinRefund": 0, 15 | "minFeeB": 155381, 16 | "eMax": 1, 17 | "extraEntropy": { 18 | "tag": "NeutralNonce" 19 | }, 20 | "maxBlockHeaderSize": 1400, 21 | "keyDeposit": 400000, 22 | "keyDecayRate": 0, 23 | "nOpt": 50, 24 | "rho": 0.00178650067, 25 | "poolMinRefund": 0, 26 | "tau": 0.1, 27 | "a0": 0.1 28 | }, 29 | "protocolMagicId": 42, 30 | "startTime": "2020-05-15T02:15:00.000000000Z", 31 | "genDelegs": {}, 32 | "updateQuorum": 3, 33 | "maxMajorPV": 0, 34 | "initialFunds": {}, 35 | "maxLovelaceSupply": 4.5E+16, 36 | "networkMagic": 42, 37 | "epochLength": 1800, 38 | "staking": null, 39 | "slotsPerKESPeriod": 900, 40 | "slotLength": 1, 41 | "maxKESEvolutions": 8, 42 | "securityParam": 18 43 | } 44 | -------------------------------------------------------------------------------- /globals-testnet.nix: -------------------------------------------------------------------------------- 1 | pkgs: { 2 | 3 | deploymentName = "testnet"; 4 | 5 | dnsZone = "${pkgs.globals.domain}"; 6 | 7 | domain = "cardano-testnet.iohkdev.io"; 8 | 9 | withSubmitApi = true; 10 | withFaucet = true; 11 | withSmash = true; 12 | withMetadata = true; 13 | withHighLoadRelays = true; 14 | withSnapshots = true; 15 | 16 | faucetHostname = "faucet"; 17 | 18 | initialPythonExplorerDBSyncDone = true; 19 | 20 | environmentName = "testnet"; 21 | 22 | topology = import ./topologies/testnet.nix pkgs; 23 | 24 | ec2 = { 25 | credentials = { 26 | accessKeyIds = { 27 | IOHK = "default"; 28 | dns = "default"; 29 | }; 30 | }; 31 | instances = with pkgs.iohk-ops-lib.physical.aws; { 32 | metadata = t3a-xlarge; 33 | core-node = r5-large; 34 | }; 35 | }; 36 | 37 | relayUpdateArgs = "-m 50 -s -e devops@iohk.io"; 38 | # Trigger relay topology refresh 12 hours before next epoch 39 | relayUpdateHoursBeforeNextEpoch = 12; 40 | snapshotStatesArgs = "-e devops@iohk.io"; 41 | snapshotStatesS3Bucket = "updates-cardano-testnet"; 42 | 43 | alertChainDensityLow = "50"; 44 | 45 | metadataVarnishTtl = 15; 46 | } 47 | -------------------------------------------------------------------------------- /bench/lib-report.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=1091,2016 3 | 4 | tag_format_timetoblock_header="tx id,tx time,block time,block no,delta t" 5 | patch_run() { 6 | local dir=${1:-.} 7 | dir=$(realpath "$dir") 8 | 9 | if test "$(head -n1 "$dir"/analysis/timetoblock.csv)" \ 10 | != "${tag_format_timetoblock_header}" 11 | then echo "---| patching $dir/analysis/timetoblock.csv" 12 | sed -i "1 s_^_${tag_format_timetoblock_header}\n_; s_;_,_g" \ 13 | "$dir"/analysis/timetoblock.csv 14 | fi 15 | } 16 | 17 | package_run() { 18 | local tag report_name package 19 | dir=${1:-.} 20 | tag=$(run_tag "$dir") 21 | report_name=$(run_report_name "$dir") 22 | 23 | local dirgood dirbad 24 | dirgood=$(realpath ../bench-results-bad) 25 | dirbad=$(realpath ../bench-results) 26 | mkdir -p "$dirgood" 27 | mkdir -p "$dirbad" 28 | if is_run_broken "$dir" 29 | then resultroot=$dir 30 | else resultroot=$dirbad; fi 31 | 32 | package=${resultroot}/$report_name.tar.xz 33 | 34 | oprint "Packaging $tag as: $package" 35 | ln -sf "./runs/$tag" "$report_name" 36 | tar cf "$package" "$report_name" --xz --dereference || true 37 | rm -f "$report_name" 38 | } 39 | -------------------------------------------------------------------------------- /topologies/bench-distrib-6.nix: -------------------------------------------------------------------------------- 1 | { 2 | coreNodes = [ 3 | { 4 | name = "node-0"; 5 | nodeId = 0; 6 | org = "IOHK"; 7 | region = "eu-central-1"; 8 | producers = ["node-1" "node-2" "node-3" "node-4" "node-5"]; 9 | pools = 1; 10 | } 11 | { 12 | name = "node-1"; 13 | nodeId = 1; 14 | org = "IOHK"; 15 | region = "eu-central-1"; 16 | producers = ["node-0" "node-2" "node-3" "node-4" "node-5"]; 17 | pools = 1; 18 | } 19 | { 20 | name = "node-2"; 21 | nodeId = 2; 22 | org = "IOHK"; 23 | region = "ap-southeast-2"; 24 | producers = ["node-0" "node-1" "node-3" "node-4" "node-5"]; 25 | } 26 | { 27 | name = "node-3"; 28 | nodeId = 3; 29 | org = "IOHK"; 30 | region = "ap-southeast-2"; 31 | producers = ["node-0" "node-1" "node-2" "node-4" "node-5"]; 32 | pools = 1; 33 | } 34 | { 35 | name = "node-4"; 36 | nodeId = 4; 37 | org = "IOHK"; 38 | region = "us-east-1"; 39 | producers = ["node-0" "node-1" "node-2" "node-3" "node-5"]; 40 | pools = 1; 41 | } 42 | { 43 | name = "node-5"; 44 | nodeId = 5; 45 | org = "IOHK"; 46 | region = "us-east-1"; 47 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4"]; 48 | } 49 | ]; 50 | 51 | relayNodes = []; 52 | } 53 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | # This derivation assumes a toplogy file where the following attributes are 2 | # defined: 3 | # 4 | # - bftCoreNodes 5 | # - stakePoolNodes 6 | # - coreNodes 7 | # 8 | { config ? {} 9 | , pkgs ? import ./nix { 10 | inherit config; 11 | } 12 | }: 13 | with pkgs; with lib; 14 | let 15 | nivOverrides = writeShellScriptBin "niv-overrides" '' 16 | niv --sources-file ${toString globals.sourcesJsonOverride} $@ 17 | ''; 18 | 19 | in mkShell (globals.environmentVariables // { 20 | nativeBuildInputs = [ 21 | awscli2 22 | bashInteractive 23 | cardano-cli 24 | dnsutils 25 | niv 26 | locli 27 | nivOverrides 28 | nix 29 | nix-diff 30 | nixops 31 | pandoc 32 | perl 33 | pstree 34 | telnet 35 | git 36 | direnv 37 | nix-direnv 38 | lorri 39 | relayUpdateTimer 40 | snapshotStatesTimer 41 | s3cmd 42 | icdiff 43 | ] ++ (lib.optionals pkgs.stdenv.hostPlatform.isLinux ([ 44 | # Those fail to compile under macOS: 45 | node-update 46 | snapshot-states 47 | # script NOT for use on mainnet: 48 | ] ++ lib.optional (globals.environmentName != "mainnet") kes-rotation)); 49 | 50 | NIX_PATH = "nixpkgs=${path}"; 51 | NIXOPS_DEPLOYMENT = "${globals.deploymentName}"; 52 | 53 | passthru = { 54 | gen-graylog-creds = iohk-ops-lib.scripts.gen-graylog-creds { staticPath = ./static; }; 55 | }; 56 | }) 57 | -------------------------------------------------------------------------------- /scripts/resize-ebs-disks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | USAGE="Usage: $0 TARGET_SIZE_GB node1 [node2 ... nodeN]" 6 | 7 | if [ $# -lt 2 ]; then 8 | echo "$USAGE" 9 | exit 1 10 | fi 11 | 12 | TARGET_SIZE="$1"; shift 13 | TARGET_NODES=("$@") 14 | 15 | cd "$(dirname "$0")/.." 16 | 17 | DEPLOY_JSON=$(nixops export -d "$NIXOPS_DEPLOYMENT") 18 | 19 | for r in "${TARGET_NODES[@]}"; do 20 | AWS_PROFILE=$(jq -r ".[].resources.\"$r\".\"ec2.accessKeyId\"" <<< "$DEPLOY_JSON") 21 | REGION=$(jq -r ".[].resources.\"$r\".\"ec2.region\"" <<< "$DEPLOY_JSON") 22 | VOL_ID=$( (jq -r ".[].resources.\"$r\".\"ec2.blockDeviceMapping\"" | jq -r ".\"/dev/xvda\".volumeId") <<< "$DEPLOY_JSON") 23 | echo "resizing root volume for $r (profile: $AWS_PROFILE region: $REGION volume: $VOL_ID)" 24 | export AWS_PROFILE 25 | aws --region "$REGION" ec2 modify-volume --size "$TARGET_SIZE" --volume-id "$VOL_ID" 26 | done 27 | 28 | echo "Waiting 30 seconds for the new block device size to be recognized on the targets..." 29 | sleep 30 30 | 31 | # Grow the partition, grow the fs, and symlink /dev/xvda if not present to satisfy legacy nixops 32 | nixops ssh-for-each -p --include "${TARGET_NODES[@]}" -- ' 33 | nix-shell -p cloud-utils --run " \ 34 | { growpart /dev/xvda 1 || growpart /dev/nvme0n1 2; } \ 35 | && resize2fs /dev/disk/by-label/nixos \ 36 | && { [ -e /dev/xvda ] || ln -s /dev/nvme0n1 /dev/xvda; } \ 37 | " 38 | ' 39 | -------------------------------------------------------------------------------- /globals-alonzo-purple.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs.iohkNix.cardanoLib; with pkgs.globals; { 2 | 3 | # This should match the name of the topology file. 4 | deploymentName = "alonzo-purple"; 5 | 6 | withFaucet = true; 7 | withSmash = true; 8 | explorerBackends = { 9 | a = explorer12; 10 | }; 11 | explorerBackendsInContainers = true; 12 | 13 | environmentConfigLocal = rec { 14 | relaysNew = "relays.${domain}"; 15 | nodeConfig = 16 | pkgs.lib.recursiveUpdate 17 | environments.alonzo-qa.nodeConfig 18 | { 19 | ShelleyGenesisFile = ./keys/genesis.json; 20 | ShelleyGenesisHash = builtins.replaceStrings ["\n"] [""] (builtins.readFile ./keys/GENHASH); 21 | ByronGenesisFile = ./keys/byron/genesis.json; 22 | ByronGenesisHash = builtins.replaceStrings ["\n"] [""] (builtins.readFile ./keys/byron/GENHASH); 23 | TestShelleyHardForkAtEpoch = 1; 24 | TestAllegraHardForkAtEpoch = 2; 25 | TestMaryHardForkAtEpoch = 3; 26 | TestAlonzoHardForkAtEpoch = 4; 27 | MaxKnownMajorProtocolVersion = 5; 28 | LastKnownBlockVersion-Major = 5; 29 | }; 30 | explorerConfig = mkExplorerConfig environmentName nodeConfig; 31 | }; 32 | 33 | # Every 5 hours 34 | relayUpdatePeriod = "0/5:00:00"; 35 | 36 | ec2 = { 37 | credentials = { 38 | accessKeyIds = { 39 | IOHK = "default"; 40 | dns = "dev"; 41 | }; 42 | }; 43 | }; 44 | } 45 | -------------------------------------------------------------------------------- /scripts/gen-grafana-creds.nix: -------------------------------------------------------------------------------- 1 | { user ? null, password ? null, pkgs ? import ../nix { } }: 2 | pkgs.stdenv.mkDerivation { 3 | name = "gen-grafana-creds"; 4 | buildInputs = with pkgs; [ pwgen ]; 5 | shellHook = '' 6 | credsFilename="grafana-creds.nix" # Default grafana static filename 7 | defaultUser="root" # Default administrative user 8 | password="${toString password}" # password supplied by cli arg 9 | passwordChar="32" # Default grafana password length 10 | staticPath=${toString ../static} # Absolute path to the static dir 11 | user="${toString user}" # user supplied by cli arg 12 | 13 | if [[ -e "$staticPath/$credsFilename" ]]; then 14 | echo "File already exists: $staticPath/$credsFilename, aborting!" 15 | exit 1 16 | elif [[ -z $user ]]; then 17 | echo "User is empty -- setting to a default administrative user of $defaultUser" 18 | user=$defaultUser 19 | fi 20 | echo "Writing grafana creds for user $user..." 21 | if [[ -z $password ]]; then 22 | echo "Password is empty -- setting to a random alphanumeric password of length $passwordChar" 23 | password=$(pwgen -s $passwordChar 1) 24 | fi 25 | 26 | umask 077 27 | cd $path 28 | cat << EOF > $staticPath/$credsFilename 29 | { 30 | user = "$user"; 31 | password = "$password"; 32 | } 33 | EOF 34 | exit 0 35 | ''; 36 | } 37 | -------------------------------------------------------------------------------- /bench/latency-map.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | RUN_META_JSON=${1:-last-meta.json} 4 | 5 | hostmap=$(jq ' 6 | .public_ip 7 | | values 8 | | map({ "\(.hostname)": .public_ip}) 9 | | add' "$RUN_META_JSON") 10 | 11 | { 12 | echo 'obtaining latency matrix for the hostmap:' 13 | jq . -Cc <<<$hostmap 14 | } >&2 15 | 16 | nixops ssh-for-each --parallel -- " 17 | self=\$(hostname) 18 | 19 | function probe() { 20 | local host=\$1 ip=\$2 21 | 22 | ping -qAc21 \$ip | 23 | grep 'rtt\|transmitted' | 24 | sed 's_, \|/_\n_g' | 25 | sed 's_ packets transmitted\| received\| packet loss\|time \|rtt min\|avg\|max\|mdev = \|ipg\|ewma \| ms\|ms\|%__g' | 26 | grep -v '^$\|^pipe ' | 27 | jq '{ source: \"'\$self'\" 28 | , target: \"'\$host'\" 29 | 30 | , sent: .[0] 31 | , received: .[1] 32 | , percents_lost: .[2] 33 | , duration_ms: .[3] 34 | , ipg: .[8] 35 | , ewma: .[9] 36 | 37 | , rtt: { min: .[4], avg: .[5], max: .[6], mdev: .[7] } 38 | }' --slurp --compact-output 39 | } 40 | 41 | hostmap=${hostmap@Q} 42 | 43 | for host in \$(jq 'keys | .[]' <<<\$hostmap --raw-output) 44 | do ip=\$(jq '.[\$host]' --arg host \$host <<<\$hostmap --raw-output) 45 | probe \$host \$ip\ & 46 | done" 2>&1 | 47 | sed 's/^[^>]*> //' 48 | -------------------------------------------------------------------------------- /topologies/shelley-qa-p2p.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs; with lib; with topology-lib; 2 | let 3 | 4 | regions = { 5 | a = { name = "eu-central-1"; # Europe (Frankfurt); 6 | }; 7 | b = { name = "us-east-2"; # US East (Ohio) 8 | }; 9 | c = { name = "ap-southeast-1"; # Asia Pacific (Singapore) 10 | }; 11 | d = { name = "eu-west-2"; # Europe (London) 12 | }; 13 | e = { name = "us-west-1"; # US West (N. California) 14 | }; 15 | f = { name = "ap-northeast-1"; # Asia Pacific (Tokyo) 16 | }; 17 | }; 18 | 19 | nodes = with regions; map (composeAll [ 20 | (withAutoRestartEvery 6) 21 | (withModule { 22 | services.cardano-node = { 23 | asserts = true; 24 | systemdSocketActivation = mkForce false; 25 | }; 26 | }) 27 | ]) (concatLists [ 28 | (mkStakingPoolNodes "d" 1 "a" "P2P1" { org = "IOHK"; nodeId = 1; }) 29 | (mkStakingPoolNodes "e" 2 "b" "P2P2" { org = "IOHK"; nodeId = 2; }) 30 | (mkStakingPoolNodes "f" 3 "c" "P2P3" { org = "IOHK"; nodeId = 3; }) 31 | ]); 32 | 33 | relayNodes = filter (n: !(n ? stakePool)) nodes; 34 | 35 | coreNodes = filter (n: n ? stakePool) nodes; 36 | 37 | in { 38 | 39 | inherit coreNodes relayNodes regions; 40 | 41 | monitoring = { 42 | services.monitoring-services.publicGrafana = false; 43 | services.nginx.virtualHosts."monitoring.${globals.dnsZone}".locations."/p" = { 44 | root = ../static/pool-metadata; 45 | }; 46 | }; 47 | 48 | } 49 | -------------------------------------------------------------------------------- /modules/load-client.nix: -------------------------------------------------------------------------------- 1 | pkgs: { config, options, nodes, name, ... }: 2 | with pkgs; with lib; 3 | let 4 | cfg = config.services.cardano-node; 5 | nodePort = globals.cardanoNodePort; 6 | hostAddr = getListenIp nodes.${name}; 7 | monitoringPort = globals.cardanoNodePrometheusExporterPort; 8 | in 9 | { 10 | imports = [ 11 | cardano-ops.modules.common 12 | (sourcePaths.cardano-node + "/nix/nixos") 13 | ]; 14 | 15 | networking.firewall = { 16 | allowedTCPPorts = [ nodePort monitoringPort ]; 17 | 18 | # TODO: securing this depends on CSLA-27 19 | # NOTE: this implicitly blocks DHCPCD, which uses port 68 20 | allowedUDPPortRanges = [ { from = 1024; to = 65000; } ]; 21 | }; 22 | 23 | services.cardano-node = { 24 | enable = true; 25 | inherit cardanoNodePackages; 26 | rtsArgs = [ "-N2" "-A10m" "-qg" "-qb" "-M3G"]; 27 | environment = globals.environmentName; 28 | port = nodePort; 29 | environments = { 30 | "${globals.environmentName}" = globals.environmentConfig; 31 | }; 32 | nodeConfig = globals.environmentConfig.nodeConfig // { 33 | hasPrometheus = [ hostAddr globals.cardanoNodePrometheusExporterPort ]; 34 | # Use Journald output: 35 | defaultScribes = [ 36 | [ 37 | "JournalSK" 38 | "cardano" 39 | ] 40 | ]; 41 | }; 42 | topology = iohkNix.cardanoLib.mkEdgeTopology { 43 | inherit (cfg) port; 44 | edgeHost = globals.relaysNew; 45 | edgeNodes = []; 46 | }; 47 | }; 48 | } 49 | -------------------------------------------------------------------------------- /roles/snapshots.nix: -------------------------------------------------------------------------------- 1 | pkgs: { name, config, options, ... }: 2 | with pkgs; 3 | 4 | let 5 | getSrc = name: globals.snapshots.${name} or sourcePaths.${name}; 6 | 7 | dbSyncPkgs = let s = getSrc "cardano-db-sync"; in import (s + "/nix") { gitrev = s.rev; }; 8 | cardanoNodePackages = getCardanoNodePackages (getSrc "cardano-node"); 9 | inherit (nodeFlake.packages.${system}) cardano-node cardano-cli; 10 | 11 | in { 12 | imports = [ 13 | (cardano-ops.modules.db-sync { 14 | inherit dbSyncPkgs cardanoNodePackages; 15 | }) 16 | ]; 17 | 18 | # Use sigint for a clean stop signal 19 | # 20 | # This will result in success status on the next release after 8.1.1 for SIGINT, 21 | # whereas SIGTERM will still return 1 despite otherwise clean exit. 22 | # 23 | # Until then, temporarily force recognition of RC 1 as success for snapshots automation, 24 | # as clean service stoppage is expected to return 1. 25 | # 26 | # Refs: 27 | # https://github.com/input-output-hk/cardano-node/issues/5312 28 | # https://github.com/input-output-hk/cardano-node/pull/5356 29 | systemd.services.cardano-node.serviceConfig.KillSignal = "SIGINT"; 30 | systemd.services.cardano-node.serviceConfig.SuccessExitStatus = "FAILURE"; 31 | 32 | # Create a new snapshot every 24h (if not exist alreay): 33 | services.cardano-db-sync.takeSnapshot = "always"; 34 | 35 | # Increase stop timeout to 6h, to allow for snapshot creation on mainnet 36 | systemd.services.cardano-db-sync.serviceConfig.TimeoutStopSec = lib.mkForce "6h"; 37 | } 38 | -------------------------------------------------------------------------------- /bench/profile-genesis.jq: -------------------------------------------------------------------------------- 1 | def genesis_protocol_params($p; $composition): 2 | { activeSlotsCoeff: $p.active_slots_coeff 3 | , epochLength: $p.epoch_length 4 | , securityParam: $p.parameter_k 5 | , slotLength: $p.slot_duration 6 | , maxTxSize: $p.max_tx_size 7 | , protocolParams: 8 | { "decentralisationParam": $p.decentralisation_param 9 | , "maxBlockBodySize": $p.max_block_size 10 | , "nOpt": $p.n_pools 11 | } 12 | }; 13 | 14 | def genesis_cli_args($p; $composition; $cmd): 15 | { create0: 16 | [ "--supply", $p.total_balance 17 | , "--testnet-magic", $p.protocol_magic 18 | ] 19 | , create1: 20 | ([ "--supply", ($p.total_balance - $p.pools_balance) 21 | , "--gen-genesis-keys", $composition.n_bft_hosts 22 | , "--supply-delegated", $p.pools_balance 23 | , "--gen-pools", $p.n_pools 24 | , "--gen-stake-delegs", ([$p.n_pools, $p.delegators] | max) 25 | , "--testnet-magic", $p.protocol_magic 26 | , "--num-stuffed-utxo", ($p.utxo - $p.delegators - 1) 27 | ## 1 is for the generator's very own funds. 28 | ] + 29 | if $p.dense_pool_density != 1 30 | then 31 | [ "--bulk-pool-cred-files", $composition.n_dense_hosts 32 | , "--bulk-pools-per-file", $p.dense_pool_density ] 33 | else [] end) 34 | , pools: 35 | [ "--argjson", "initialPoolCoin", 36 | $p.pools_balance / $p.n_pools 37 | ] 38 | } | .[$cmd]; 39 | -------------------------------------------------------------------------------- /scripts/test-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cmd="${1:-build}"; shift 4 | 5 | CLEANUP=() 6 | at_exit() { 7 | for cleanup in "${CLEANUP[@]}" 8 | do eval ${cleanup}; done 9 | } 10 | trap at_exit EXIT 11 | 12 | nixops="$(nix-build ./nix -A nixops --no-out-link)" 13 | test -n "${nixops}" || { echo "ERROR: couldn't evaluate 'nixops'" >&2; exit 1; } 14 | 15 | nixexpr="$(mktemp --tmpdir test-deploy-XXXXXX.nix)" 16 | CLEANUP+=("rm -f ${nixexpr}") 17 | 18 | cat >"${nixexpr}" < { 20 | networkExprs = [ 21 | "$(realpath deployments/cardano-aws.nix)" 22 | "$(realpath physical/mock.nix)" 23 | ]; 24 | uuid = "11111111-1111-1111-1111-111111111111"; 25 | deploymentName = "deployme"; 26 | args = {}; 27 | checkConfigurationOptions = false; 28 | pluginNixExprs = []; 29 | } 30 | EOF 31 | 32 | export NIX_PATH="$NIX_PATH:nixops=${nixops}/share/nix/nixops" 33 | NODES=( 34 | explorer 35 | node-1 36 | ) 37 | ARGS=( 38 | "${nixexpr}" 39 | --show-trace 40 | -A machines 41 | --arg names "[ $(for x in "${NODES[@]}" 42 | do echo "\"$x\" " 43 | done) ]" 44 | ) 45 | 46 | case ${cmd} in 47 | build ) nix-build "${ARGS[@]}";; 48 | build-local ) nix-build "${ARGS[@]}" --arg;; 49 | repl ) echo -e "---\n--- dep = import ${nixexpr}\n---" 50 | nix repl "${nixexpr}";; 51 | * ) { echo "ERROR: valid commands: build, repl" >&2; exit 1; };; esac 52 | -------------------------------------------------------------------------------- /examples/shelley-testnet/scripts/setup-stakepools-block-production.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # This is a simple test that submits an update proposal so that stakepools can 4 | # produce blocks, and registers stakepools. 5 | # 6 | # This script requires the following environment variables to be defined: 7 | # 8 | # - BFT_NODES: names of the BFT nodes 9 | # - POOL_NODES: names of the stake pool nodes 10 | # 11 | set -euo pipefail 12 | 13 | [ -z ${BFT_NODES+x} ] && (echo "Environment variable BFT_NODES must be defined"; exit 1) 14 | [ -z ${POOL_NODES+x} ] && (echo "Environment variable POOL_NODES must be defined"; exit 1) 15 | 16 | if [ -z ${1+x} ]; 17 | then 18 | echo "'redeploy' command was not specified, so the test will run on an existing testnet"; 19 | else 20 | case $1 in 21 | redeploy ) 22 | echo "Redeploying the testnet" 23 | nixops destroy 24 | ./scripts/create-shelley-genesis-and-keys.sh 25 | nixops deploy -k 26 | ;; 27 | * ) 28 | echo "Unknown command $1" 29 | exit 30 | esac 31 | fi 32 | 33 | BFT_NODES=($BFT_NODES) 34 | POOL_NODES=($POOL_NODES) 35 | 36 | for f in ${BFT_NODES[@]} 37 | do 38 | nixops scp $f examples/shelley-testnet/scripts/submit-update-proposal.sh /root/ --to 39 | done 40 | 41 | for f in ${POOL_NODES[@]} 42 | do 43 | nixops scp $f examples/shelley-testnet/scripts/register-stake-pool.sh /root/ --to 44 | done 45 | 46 | for f in ${BFT_NODES[@]} 47 | do 48 | nixops ssh $f "./submit-update-proposal.sh" 49 | done 50 | 51 | for f in ${POOL_NODES[@]} 52 | do 53 | nixops ssh $f "./register-stake-pool.sh" & 54 | done 55 | 56 | wait 57 | -------------------------------------------------------------------------------- /bench/lib-benchrun.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | generate_run_tag() { 4 | local batch=$1 prof=$2 5 | 6 | echo "$(date +'%Y'-'%m'-'%d'-'%H.%M').$batch.$prof" 7 | } 8 | 9 | run_report_name() { 10 | local metafile meta prof suffix= 11 | dir=${1:-.} 12 | metafile="$dir"/meta.json 13 | meta=$(jq .meta "$metafile" --raw-output) 14 | batch=$(jq .batch <<<$meta --raw-output) 15 | prof=$(jq .profile <<<$meta --raw-output) 16 | date=$(date +'%Y'-'%m'-'%d'-'%H%M' --date=@"$(jq .timestamp <<<$meta)") 17 | 18 | test -n "$meta" -a -n "$prof" || 19 | fail "Bad run meta.json format: $metafile" 20 | 21 | if is_run_broken "$dir" 22 | then suffix='broken'; fi 23 | 24 | echo "$date.$batch.$prof${suffix:+.$suffix}" 25 | } 26 | 27 | is_run_broken() { 28 | local dir=${1:-} 29 | 30 | test -f "$dir"/analysis.json && 31 | jqtest .anomalies "$dir"/analysis.json || 32 | jqtest .broken "$dir"/meta.json 33 | } 34 | 35 | mark_run_broken() { 36 | local dir=$1 errors=$2 tag 37 | tag=$(run_tag "$dir") 38 | 39 | test -n "$2" || 40 | fail "asked to mark $tag as anomalous, but no anomalies passed" 41 | 42 | oprint "marking run as broken (results will be stored separately): $tag" 43 | json_file_prepend "$dir/analysis.json" '{ anomalies: $anomalies }' \ 44 | --argjson anomalies "$errors" <<<0 45 | } 46 | 47 | process_broken_run() { 48 | local dir=${1:-.} 49 | 50 | op_stop 51 | fetch_run "$dir" 52 | analyse_run "$dir" 53 | package_run "$dir" "$(realpath ../bench-results-bad)" 54 | } 55 | 56 | -------------------------------------------------------------------------------- /pkgs/varnish/modules.nix: -------------------------------------------------------------------------------- 1 | { lib, stdenv, fetchFromGitHub, autoreconfHook, pkg-config, varnish, docutils, removeReferencesTo }: 2 | let 3 | common = { version, sha256, extraNativeBuildInputs ? [] }: 4 | stdenv.mkDerivation rec { 5 | pname = "${varnish.name}-modules"; 6 | inherit version; 7 | 8 | src = fetchFromGitHub { 9 | owner = "varnish"; 10 | repo = "varnish-modules"; 11 | rev = version; 12 | inherit sha256; 13 | }; 14 | 15 | nativeBuildInputs = [ 16 | autoreconfHook 17 | docutils 18 | pkg-config 19 | removeReferencesTo 20 | varnish.python # use same python version as varnish server 21 | ]; 22 | 23 | buildInputs = [ varnish ]; 24 | 25 | postPatch = '' 26 | substituteInPlace bootstrap --replace "''${dataroot}/aclocal" "${varnish.dev}/share/aclocal" 27 | substituteInPlace Makefile.am --replace "''${LIBVARNISHAPI_DATAROOTDIR}/aclocal" "${varnish.dev}/share/aclocal" 28 | ''; 29 | 30 | postInstall = "find $out -type f -exec remove-references-to -t ${varnish.dev} '{}' +"; # varnish.dev captured only as __FILE__ in assert messages 31 | 32 | meta = with lib; { 33 | description = "Collection of Varnish Cache modules (vmods) by Varnish Software"; 34 | homepage = "https://github.com/varnish/varnish-modules"; 35 | inherit (varnish.meta) license platforms maintainers; 36 | }; 37 | }; 38 | in 39 | { 40 | modules15 = common { 41 | version = "0.15.1"; 42 | sha256 = "1lwgjhgr5yw0d17kbqwlaj5pkn70wvaqqjpa1i0n459nx5cf5pqj"; 43 | }; 44 | modules19 = common { 45 | version = "0.19.0"; 46 | sha256 = "0qq5g6bbd1a1ml1wk8jj9z39a899jzqbf7aizr3pvyz0f4kz8mis"; 47 | }; 48 | } 49 | -------------------------------------------------------------------------------- /physical/aws/security-groups/allow-peers.nix: -------------------------------------------------------------------------------- 1 | { region, org, pkgs, nodes, lib, ... }: 2 | with lib; 3 | let 4 | inherit (pkgs.globals) cardanoNodePort topology; 5 | inherit (topology) coreNodes relayNodes; 6 | privateRelayNodes = topology.privateRelayNodes or []; 7 | maxRules = pkgs.globals.maxRulesPerSg.${org} or 70; 8 | 9 | concernedCoreOrPrivateNodes = map (c: c.name) (filter (c: c.region == region && c.org == org) (coreNodes ++ privateRelayNodes)); 10 | connectingCoreNodes = filter (c: any (p: builtins.elem p concernedCoreOrPrivateNodes) c.producers) coreNodes; 11 | connectingRelays = partition (r: any (p: builtins.elem p concernedCoreOrPrivateNodes) r.producers) (privateRelayNodes ++ relayNodes); 12 | maxPrivilegedRelays = maxRules - (length connectingCoreNodes); 13 | privilegedRelays = lib.take maxPrivilegedRelays 14 | (builtins.trace (let nbCrelays = length connectingRelays.right; 15 | in if (nbCrelays > maxPrivilegedRelays) 16 | then "WARNING: ${toString (nbCrelays - maxPrivilegedRelays)} relays (${toString (map (n: n.name) (drop maxPrivilegedRelays connectingRelays.right))}) won't be able to connect to core/private nodes under ${org}/${region}" 17 | else "${org}/${region}: ${toString (maxPrivilegedRelays - nbCrelays)} relay nodes margin before hitting `globals.maxRulesPerSg.${org}` limit") 18 | (connectingRelays.right ++ connectingRelays.wrong)); 19 | peers = map (n: n.name) (connectingCoreNodes ++ privilegedRelays) 20 | # Allow explorer to connect directly to core nodes if there is no relay nodes. 21 | ++ (lib.optional (nodes ? explorer && relayNodes == []) "explorer"); 22 | in 23 | pkgs.iohk-ops-lib.physical.aws.security-groups.allow-to-tcp-port 24 | "cardano" cardanoNodePort peers { 25 | inherit region org pkgs; 26 | } 27 | -------------------------------------------------------------------------------- /bench/lib-attic.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=1091,2016 3 | 4 | analysis_list+=() 5 | analysis_message_types() { 6 | local dir=${1:-.} mach tnum sub_tids; shift 7 | local machines=("$@") 8 | 9 | for mach in ${machines[*]} 10 | do echo -n .$mach >&2 11 | local types key 12 | "$dir"/tools/msgtypes.sh \ 13 | "$dir/analysis/logs-$mach"/node-*.json | 14 | while read -r ty 15 | test -n "$ty" 16 | do key=$(jq .kind <<<$ty -r | sed 's_.*\.__g') 17 | jq '{ key: .kind, value: $count }' <<<$ty \ 18 | --argjson count "$(grep -Fh "$key\"" \ 19 | "$dir/analysis/logs-$mach"/node-*.json | 20 | wc -l)" 21 | done | 22 | jq '{ "\($name)": from_entries } 23 | ' --slurp --arg name "$mach" 24 | # jq '{ "\($name)": $types } 25 | # ' --arg name "$mach" --null-input \ 26 | # --argjson types "$("$dir"/tools/msgtypes.sh \ 27 | # "$dir/analysis/logs-$mach"/node-*.json | 28 | # jq . --slurp)" 29 | done | analysis_append "$dir" \ 30 | '{ message_types: add 31 | }' --slurp 32 | } 33 | 34 | analysis_list+=() 35 | analysis_tx_losses() { 36 | local dir=${1:-.} 37 | dir=$(realpath "$dir") 38 | 39 | pushd "$dir"/analysis >/dev/null || return 1 40 | if jqtest '(.tx_stats.tx_missing != 0)' "$dir"/analysis.json 41 | then echo -n " missing-txs" 42 | . "$dir"/tools/lib-loganalysis.sh 43 | op_analyse_losses; fi 44 | popd >/dev/null || return 1 45 | } 46 | -------------------------------------------------------------------------------- /bench/lib-tag.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=1091,2016 3 | 4 | run_tag() { 5 | jq --raw-output .meta.tag "$(realpath "${1:-.}")/meta.json" 6 | } 7 | 8 | cluster_last_meta_tag() { 9 | local meta=./last-meta.json tag dir meta2 10 | jq . "${meta}" >/dev/null || fail "malformed run metadata: ${meta}" 11 | 12 | tag=$(jq --raw-output .meta.tag "${meta}") 13 | test -n "${tag}" || fail "bad tag in run metadata: ${meta}" 14 | 15 | dir="./runs/${tag}" 16 | test -d "${dir}" || 17 | fail "bad tag in run metadata: ${meta} -- ${dir} is not a directory" 18 | meta2=${dir}/meta.json 19 | jq --exit-status . "${meta2}" >/dev/null || 20 | fail "bad tag in run metadata: ${meta} -- ${meta2} is not valid JSON" 21 | 22 | test "$(realpath ./last-meta.json)" = "$(realpath "${meta2}")" || 23 | fail "bad tag in run metadata: ${meta} -- ${meta2} is different from ${meta}" 24 | echo "${tag}" 25 | } 26 | 27 | fetch_tag() { 28 | local tag 29 | tag=${1:-$(cluster_last_meta_tag)} 30 | 31 | fetch_run "./runs/${tag}" 32 | } 33 | 34 | analyse_tag() { 35 | local tag 36 | tag=${1:-$(cluster_last_meta_tag)} 37 | 38 | analyse_run "${tagroot}/${tag}" || true 39 | } 40 | 41 | sanity_check_tag() { 42 | local tag 43 | tag=${1:-$(cluster_last_meta_tag)} 44 | 45 | sanity_check_run "${tagroot}/${tag}" 46 | } 47 | 48 | tag_report_name() { 49 | local tag 50 | tag=${1:-$(cluster_last_meta_tag)} 51 | 52 | run_report_name "${tagroot}/${tag}" 53 | } 54 | 55 | 56 | package_tag() { 57 | local tag 58 | tag=${1:-$(cluster_last_meta_tag)} 59 | 60 | package_run "${tagroot}/${tag}" 61 | } 62 | -------------------------------------------------------------------------------- /topologies/bench-distrib-9.nix: -------------------------------------------------------------------------------- 1 | { 2 | coreNodes = [ 3 | { 4 | name = "node-0"; 5 | nodeId = 0; 6 | org = "IOHK"; 7 | region = "eu-central-1"; 8 | producers = ["node-1" "node-2" "node-3"]; 9 | pools = 1; 10 | } 11 | { 12 | name = "node-1"; 13 | nodeId = 1; 14 | org = "IOHK"; 15 | region = "eu-central-1"; 16 | producers = ["node-0" "node-2" "node-6"]; 17 | pools = 1; 18 | } 19 | { 20 | name = "node-2"; 21 | nodeId = 2; 22 | org = "IOHK"; 23 | region = "eu-central-1"; 24 | producers = ["node-0" "node-1"]; 25 | } 26 | { 27 | name = "node-3"; 28 | nodeId = 3; 29 | org = "IOHK"; 30 | region = "ap-southeast-2"; 31 | producers = ["node-4" "node-5" "node-0"]; 32 | pools = 1; 33 | } 34 | { 35 | name = "node-4"; 36 | nodeId = 4; 37 | org = "IOHK"; 38 | region = "ap-southeast-2"; 39 | producers = ["node-3" "node-5" "node-7"]; 40 | pools = 1; 41 | } 42 | { 43 | name = "node-5"; 44 | nodeId = 5; 45 | org = "IOHK"; 46 | region = "ap-southeast-2"; 47 | producers = ["node-3" "node-4"]; 48 | } 49 | { 50 | name = "node-6"; 51 | nodeId = 6; 52 | org = "IOHK"; 53 | region = "us-east-1"; 54 | producers = ["node-7" "node-8" "node-1"]; 55 | pools = 1; 56 | } 57 | { 58 | name = "node-7"; 59 | nodeId = 7; 60 | org = "IOHK"; 61 | region = "us-east-1"; 62 | producers = ["node-6" "node-8" "node-4"]; 63 | pools = 1; 64 | } 65 | { 66 | name = "node-8"; 67 | nodeId = 8; 68 | org = "IOHK"; 69 | region = "us-east-1"; 70 | producers = ["node-6" "node-7"]; 71 | } 72 | ]; 73 | 74 | relayNodes = []; 75 | } 76 | -------------------------------------------------------------------------------- /scripts/gen-graylog-creds.nix: -------------------------------------------------------------------------------- 1 | { user ? null, password ? null, pkgs ? import ../nix { } }: 2 | pkgs.mkShell { 3 | name = "gen-graylog-creds"; 4 | buildInputs = with pkgs; [ pwgen gnused ]; 5 | shellHook = '' 6 | clusterChar="96" # Default graylog cluster secret length 7 | clusterSecret="" # Var for the clusterSecret 8 | credsFilename="graylog-creds.nix" # Default graylog static filename 9 | defaultUser="root" # Default administrative user 10 | password="${toString password}" # password supplied by cli arg 11 | passwordChar="32" # Default graylog password length 12 | passwordHash="" # Sha256 hash of the plaintext password 13 | staticPath=${toString ../static} # Absolute path to the static dir 14 | user="${toString user}" # user supplied by cli arg 15 | 16 | if [[ -e "$staticPath/$credsFilename" ]]; then 17 | echo "File already exists: $staticPath/$credsFilename, aborting!" 18 | exit 1 19 | elif [[ -z $user ]]; then 20 | echo "User is empty -- setting to a default administrative user of $defaultUser" 21 | user=$defaultUser 22 | fi 23 | echo "Writing graylog creds for user $user..." 24 | if [[ -z $password ]]; then 25 | echo "Password is empty -- setting to a random alphanumeric password of length $passwordChar" 26 | password=$(pwgen -s $passwordChar 1) 27 | fi 28 | 29 | passwordHash=$(echo -n $password | sha256sum | sed -z 's/ -\n//g') 30 | clusterSecret=$(pwgen -s $clusterChar 1) 31 | 32 | umask 077 33 | cd $path 34 | cat << EOF > $staticPath/$credsFilename 35 | { 36 | user = "$user"; 37 | password = "$password"; 38 | passwordHash = "$passwordHash"; 39 | clusterSecret = "$clusterSecret"; 40 | } 41 | EOF 42 | exit 0 43 | ''; 44 | } 45 | -------------------------------------------------------------------------------- /bench/lib-fetch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=2155 3 | 4 | 5 | fetch_effective_service_node_config() { 6 | local mach=$1 svc=$2 7 | 8 | local svcfilename execstart configfilename 9 | svcfilename=$(nixops ssh "$mach" -- \ 10 | sh -c "'systemctl status $svc || true'" 2>&1 | 11 | grep "/nix/store/.*/$svc\.service" | 12 | cut -d'(' -f2 | cut -d';' -f1 || 13 | fail "Failed to fetch & parse status of '$svc' on '$mach'") 14 | execstart=$(nixops ssh "$mach" -- \ 15 | grep ExecStart= "$svcfilename" | 16 | cut -d= -f2 || 17 | fail "Failed to extract ExecStart from service file '$svcfilename' on '$mach'") 18 | test -n "$execstart" || \ 19 | fail "Couldn't determine ExecStart for '$svc' on '$mach'" 20 | configfilename=$(nixops ssh "$mach" -- \ 21 | grep -e '-config.*\.json' "$execstart" | 22 | sed 's_^.*\(/nix/store/.*\.json\).*_\1_' | 23 | head -n1 || 24 | fail "Failed to fetch & parse ExecStart of '$svc' on '$mach'") 25 | test -n "$configfilename" || \ 26 | fail "Couldn't determine config file name for '$svc' on '$mach'" 27 | nixops ssh "$mach" -- jq . "$configfilename" || 28 | fail "Failed to fetch config file for '$svc' on '$mach'" 29 | } 30 | 31 | fetch_effective_service_node_configs() { 32 | local rundir=$1; shift; local nodes=($*) 33 | 34 | local cfroot="$rundir"/configs mach 35 | mkdir -p "$cfroot" 36 | for mach in ${nodes[*]} 'explorer' 37 | do fetch_effective_service_node_config "$mach" 'cardano-node' \ 38 | > "$cfroot"/"$mach"-cardano-node.config.json & 39 | done 40 | fetch_effective_service_node_config 'explorer' 'tx-generator' \ 41 | > "$cfroot"/explorer-tx-generator.config.json 42 | } 43 | -------------------------------------------------------------------------------- /scripts/renew-kes-keys.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # This script assumes: 4 | # 5 | # - the kes period is passed as the first argument, 6 | # - the number of bft nodes is passed as second argument, 7 | # - the total number of nodes is passed as third argument, 8 | # - the node keys are in a keys/node-keys directory, which must exist. 9 | # 10 | set -euo pipefail 11 | 12 | cd "$(dirname "$0")/.." 13 | 14 | [ -z ${1+x} ] && (echo "Missing KES period (must be passed as first argument)"; exit 1); 15 | 16 | PERIOD=$1 17 | 18 | cd keys/node-keys 19 | 20 | # Generate new KES key pairs 21 | for i in `seq 1 $NB_CORE_NODES`; do 22 | cardano-cli node key-gen-KES \ 23 | --verification-key-file node-kes$i.vkey.new \ 24 | --signing-key-file node-kes$i.skey.new 25 | done 26 | 27 | # Genereate an operational certificate for the BFT nodes, using the delegate 28 | # keys as cold signing key. 29 | for i in `seq 1 $NB_BFT_NODES`; do 30 | cardano-cli node issue-op-cert \ 31 | --hot-kes-verification-key-file node-kes$i.vkey.new \ 32 | --cold-signing-key-file ../delegate-keys/delegate$i.skey \ 33 | --operational-certificate-issue-counter ../delegate-keys/delegate$i.counter \ 34 | --kes-period $PERIOD \ 35 | --out-file node$i.opcert 36 | done 37 | # Genereate an operational certificate for the staking pool nodes, using the pool 38 | # keys as cold signing key. 39 | for i in `seq $((NB_BFT_NODES+1)) $NB_CORE_NODES`; do 40 | cardano-cli node issue-op-cert \ 41 | --hot-kes-verification-key-file node-kes$i.vkey.new \ 42 | --cold-signing-key-file ../pools/cold$((i - $NB_BFT_NODES)).skey \ 43 | --operational-certificate-issue-counter ../pools/opcert$((i - $NB_BFT_NODES)).counter \ 44 | --kes-period $PERIOD \ 45 | --out-file node$i.opcert 46 | done 47 | 48 | # Replace existing KES key pair with new (because above commands succeeded) 49 | for i in `seq 1 $NB_CORE_NODES`; do 50 | mv node-kes$i.vkey.new node-kes$i.vkey 51 | mv node-kes$i.skey.new node-kes$i.skey 52 | done 53 | -------------------------------------------------------------------------------- /roles/load-client.nix: -------------------------------------------------------------------------------- 1 | pkgs: { config, ... }: 2 | { 3 | imports = [ 4 | pkgs.cardano-ops.modules.load-client 5 | ]; 6 | 7 | systemd.services.cardano-node.after = [ "ephemeral.service" ]; 8 | 9 | # Configure high IOPS on any ec2 node supporting it for cardano-node load client 10 | systemd.services.ephemeral = { 11 | wantedBy = [ "multi-user.target" ]; 12 | before = [ "cardano-node.service" "sshd.service" ]; 13 | serviceConfig = { 14 | Type = "oneshot"; 15 | }; 16 | path = with pkgs; [ 17 | coreutils 18 | e2fsprogs 19 | gnugrep 20 | gnused 21 | gnutar 22 | kmod 23 | mdadm 24 | utillinux 25 | ]; 26 | script = let 27 | replacePath = "/var/lib/cardano-node"; 28 | in '' 29 | #!/run/current-system/sw/bin/bash 30 | # This script should work on any ec2 instance which has an EBS nvme0n1 root vol and additional 31 | # non-EBS local nvme[1-9]n1 ephemeral block storage devices, ex: c5, g4, i3, m5, r5, x1, z1. 32 | set -x 33 | df | grep -q ${replacePath} && { echo "${replacePath} is pre-mounted, exiting."; exit 0; } 34 | mapfile -t DEVS < <(find /dev -maxdepth 1 -regextype posix-extended -regex ".*/nvme[1-9]n1") 35 | [ "''${#DEVS[@]}" -eq "0" ] && { echo "No additional NVME found, exiting."; exit 0; } 36 | if [ -d ${replacePath} ]; then 37 | mv ${replacePath} ${replacePath}-backup 38 | fi 39 | mkdir -p ${replacePath} 40 | if [ "''${#DEVS[@]}" -gt "1" ]; then 41 | mdadm --create --verbose --auto=yes /dev/md0 --level=0 --raid-devices="''${#DEVS[@]}" "''${DEVS[@]}" 42 | mkfs.ext4 /dev/md0 43 | mount /dev/md0 ${replacePath} 44 | elif [ "''${#DEVS[@]}" -eq "1" ]; then 45 | mkfs.ext4 "''${DEVS[@]}" 46 | mount "''${DEVS[@]}" ${replacePath} 47 | fi 48 | if [ -d ${replacePath}-backup ]; then 49 | mv ${replacePath}-backup/* ${replacePath}/ 50 | fi 51 | set +x 52 | ''; 53 | }; 54 | 55 | services.netdata = { 56 | enable = true; 57 | config = { 58 | global = { 59 | "default port" = "19999"; 60 | "bind to" = "*"; 61 | "history" = "86400"; 62 | "error log" = "syslog"; 63 | "debug log" = "syslog"; 64 | }; 65 | }; 66 | }; 67 | } 68 | -------------------------------------------------------------------------------- /scripts/submit-update-proposal.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | 6 | FEE="${FEE:-250000}" 7 | PAYMENT_KEY_PREFIX="${PAYMENT_KEY_PREFIX:-keys/utxo-keys/utxo1}" 8 | CARDANO_NODE_SOCKET_PATH="${CARDANO_NODE_SOCKET_PATH:-$PWD/node.socket}" 9 | export CARDANO_NODE_SOCKET_PATH 10 | 11 | USAGE="Usage: $0 versionMajor versionMinor [targetEpoch] \n 12 | $0 path/to/update.proposal \n 13 | (use current epoch if not provided) \n 14 | \n 15 | Environnement variables: \n 16 | \n 17 | - FEE ($FEE)\n 18 | - PAYMENT_KEY_PREFIX ($PAYMENT_KEY_PREFIX)\n 19 | - CARDANO_NODE_SOCKET_PATH ($CARDANO_NODE_SOCKET_PATH) 20 | " 21 | 22 | if [ $# -eq 1 ]; then 23 | UPDATE_PROPOSAL=$1 24 | else 25 | if [ $# -lt 2 ]; then 26 | echo -e "$USAGE" 27 | exit 1 28 | else 29 | MAJOR=$1 30 | MINOR=$2 31 | EPOCH=${3:-$(cardano-cli query tip --testnet-magic $NETWORK_MAGIC | jq .epoch)} 32 | UPDATE_PROPOSAL="" 33 | fi 34 | fi 35 | 36 | set -x 37 | 38 | ADDR=$(cat $PAYMENT_KEY_PREFIX.addr) 39 | ADDR_AMOUNT=$(cardano-cli query utxo --address $ADDR --testnet-magic $NETWORK_MAGIC | awk '{if(NR==3) print $3}') 40 | UTXO=$(cardano-cli query utxo --address $ADDR --testnet-magic $NETWORK_MAGIC | awk '{if(NR==3) print $1 "#" $2}') 41 | 42 | if [ -z $UPDATE_PROPOSAL ]; then 43 | UPDATE_PROPOSAL="keys/update-to-protocol-v$MAJOR.$MINOR.proposal" 44 | cardano-cli governance create-update-proposal --epoch $EPOCH --protocol-major-version $MAJOR --protocol-minor-version $MINOR $( for g in keys/genesis-keys/genesis?.vkey; do echo " --genesis-verification-key-file $g"; done) --out-file "$UPDATE_PROPOSAL" 45 | fi 46 | 47 | ERA=$(cardano-cli query tip --testnet-magic $NETWORK_MAGIC | jq -r '.era | ascii_downcase') 48 | 49 | cardano-cli transaction build-raw --$ERA-era --ttl 100000000 --tx-in $UTXO --tx-out $ADDR+$(( $ADDR_AMOUNT - $FEE )) --update-proposal-file $UPDATE_PROPOSAL --out-file $UPDATE_PROPOSAL.txbody --fee $FEE 50 | 51 | cardano-cli transaction sign --tx-body-file $UPDATE_PROPOSAL.txbody --out-file $UPDATE_PROPOSAL.tx --signing-key-file $PAYMENT_KEY_PREFIX.skey $( for d in keys/delegate-keys/delegate?.skey; do echo " --signing-key-file $d"; done) 52 | 53 | echo "Press enter to submit update proposal" 54 | read -n 1 55 | 56 | cardano-cli transaction submit --tx-file $UPDATE_PROPOSAL.tx --testnet-magic $NETWORK_MAGIC 57 | -------------------------------------------------------------------------------- /topologies/shelley-qa.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs; with lib; with topology-lib; 2 | let 3 | 4 | regions = { 5 | a = { name = "eu-central-1"; /* Europe (Frankfurt) */ }; 6 | b = { name = "us-east-2"; /* US East (Ohio) */ }; 7 | c = { name = "ap-southeast-1"; /* Asia Pacific (Singapore) */ }; 8 | d = { name = "eu-west-2"; /* Europe (London) */ }; 9 | }; 10 | 11 | stakingPoolNodes = fullyConnectNodes [ 12 | (mkStakingPool "a" 1 "IOHK1" { nodeId = 1; }) 13 | (mkStakingPool "b" 1 "IOHK2" { nodeId = 2; }) 14 | (mkStakingPool "c" 1 "IOHK3" { nodeId = 3; }) 15 | ]; 16 | 17 | coreNodes = map (withAutoRestartEvery 6) stakingPoolNodes; 18 | 19 | relayNodes = map (composeAll [ 20 | (withAutoRestartEvery 6) 21 | #(withProfiling "time" ["rel-a-1"]) 22 | ]) (mkRelayTopology { 23 | inherit regions coreNodes; 24 | autoscaling = false; 25 | maxProducersPerNode = 20; 26 | maxInRegionPeers = 5; 27 | }); 28 | 29 | in { 30 | 31 | inherit coreNodes relayNodes regions; 32 | 33 | monitoring = { 34 | services.monitoring-services.publicGrafana = false; 35 | }; 36 | 37 | 38 | "${globals.faucetHostname}" = { 39 | services.cardano-faucet = { 40 | anonymousAccess = false; 41 | faucetLogLevel = "DEBUG"; 42 | secondsBetweenRequestsAnonymous = 86400; 43 | secondsBetweenRequestsApiKeyAuth = 86400; 44 | lovelacesToGiveAnonymous = 1000000000; 45 | lovelacesToGiveApiKeyAuth = 10000000000; 46 | useByronWallet = false; 47 | }; 48 | services.cardano-node = { 49 | package = mkForce cardano-node; 50 | }; 51 | }; 52 | 53 | 54 | explorer = { 55 | containers = mapAttrs (b: _: { 56 | config = { 57 | services.nginx.virtualHosts.explorer.locations."/p" = lib.mkIf (__pathExists ../static/pool-metadata) { 58 | root = ../static/pool-metadata; 59 | }; 60 | services.cardano-graphql = { 61 | allowListPath = mkForce null; 62 | allowIntrospection = true; 63 | }; 64 | services.cardano-db-sync = lib.mkIf (b == "a") { 65 | #takeSnapshot = "once"; 66 | #restoreSnapshot = "db-sync-snapshot-schema-10-block-1254641-x86_64.tgz"; 67 | }; 68 | }; 69 | }) globals.explorerBackends; 70 | }; 71 | } 72 | -------------------------------------------------------------------------------- /bench/lib-profile.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=2086 3 | 4 | ## Profile JQ 5 | profjq() { 6 | local prof=$1 q=$2; shift 2 7 | rparmjq "del(.meta) 8 | | if has(\"$prof\") then (.\"$prof\" | $q) 9 | else error(\"Can't query unknown profile $prof using $q\") end 10 | " "$@" 11 | } 12 | 13 | profgenjq() 14 | { 15 | local prof=$1 q=$2; shift 2 16 | profjq "$prof" ".genesis | ($q)" "$@" 17 | } 18 | 19 | profile_deploy() { 20 | local batch=$1 prof=${2:-default} include=() 21 | prof=$(params resolve-profile "$prof") 22 | 23 | mkdir -p runs/deploy-logs 24 | deploylog=runs/deploy-logs/$(timestamp).$batch.$prof.log 25 | 26 | mkdir -p "$(dirname "$deploylog")" 27 | echo >"$deploylog" 28 | ln -sf "$deploylog" 'last-deploy.log' 29 | 30 | watcher_pid= 31 | if test -n "${watch_deploy}" 32 | then { sleep 0.3; tail -f "$deploylog"; } & 33 | watcher_pid=$!; fi 34 | 35 | if test -n "$watcher_pid" 36 | then kill "$watcher_pid" >/dev/null 2>&1 || true; fi 37 | 38 | local genesis_timestamp=$(timestamp) 39 | 40 | if test -z "$no_prebuild" 41 | then oprint "prebuilding:" 42 | ## 0. Prebuild: 43 | ensure_genesis "$prof" "$genesis_timestamp" 44 | time deploy_build_only "$prof" "$deploylog" "$watcher_pid"; fi 45 | 46 | ensure_genesis "$prof" "$genesis_timestamp" 47 | 48 | include="explorer $(params producers)" 49 | 50 | if test -z "$no_deploy" 51 | then deploystate_deploy_profile "$prof" "$include" "$deploylog" 52 | else oprint "skippin' deploy, because: CLI override" 53 | ln -sf "$deploylog" 'last-deploy.log' 54 | fi 55 | } 56 | 57 | ### 58 | ### Aux 59 | ### 60 | goggles_fn='cat' 61 | 62 | goggles_ip() { 63 | sed "$(jq --raw-output '. 64 | | .local_ip as $local_ip 65 | | .public_ip as $public_ip 66 | | ($local_ip | map ("s_\(.local_ip | gsub ("\\."; "."; "x"))_HOST-\(.hostname)_g")) + 67 | ($public_ip | map ("s_\(.public_ip | gsub ("\\."; "."; "x"))_HOST-\(.hostname)_g")) 68 | | join("; ") 69 | ' last-meta.json)" 70 | } 71 | 72 | goggles() { 73 | ${goggles_fn} 74 | } 75 | export -f goggles goggles_ip 76 | -------------------------------------------------------------------------------- /topologies/staging.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs; with lib; with topology-lib; 2 | let 3 | 4 | regions = { 5 | a = { name = "eu-central-1"; # Europe (Frankfurt); 6 | minRelays = 3; 7 | }; 8 | b = { name = "us-east-2"; # US East (Ohio) 9 | minRelays = 2; 10 | }; 11 | c = { name = "ap-southeast-1"; # Asia Pacific (Singapore) 12 | minRelays = 1; 13 | }; 14 | d = { name = "eu-west-2"; # Europe (London) 15 | minRelays = 1; 16 | }; 17 | e = { name = "us-west-1"; # US West (N. California) 18 | minRelays = 1; 19 | }; 20 | f = { name = "ap-northeast-1"; # Asia Pacific (Tokyo) 21 | minRelays = 1; 22 | }; 23 | }; 24 | 25 | bftCoreNodes = regionalConnectGroupWith (reverseList stakingPoolNodes) (fullyConnectNodes [ 26 | # OBFT centralized nodes recovery nodes 27 | (mkBftCoreNode "a" 1 { 28 | org = "IOHK"; 29 | nodeId = 1; 30 | }) 31 | (mkBftCoreNode "b" 1 { 32 | org = "IOHK"; 33 | nodeId = 2; 34 | }) 35 | (mkBftCoreNode "c" 1 { 36 | org = "Emurgo"; 37 | nodeId = 3; 38 | }) 39 | (mkBftCoreNode "d" 1 { 40 | org = "Emurgo"; 41 | nodeId = 4; 42 | }) 43 | (mkBftCoreNode "e" 1 { 44 | org = "CF"; 45 | nodeId = 5; 46 | }) 47 | (mkBftCoreNode "f" 1 { 48 | org = "CF"; 49 | nodeId = 6; 50 | }) 51 | (mkBftCoreNode "a" 2 { 52 | org = "IOHK"; 53 | nodeId = 7; 54 | }) 55 | ]); 56 | 57 | stakingPoolNodes = regionalConnectGroupWith bftCoreNodes 58 | (fullyConnectNodes [ 59 | (mkStakingPool "a" 1 "IOGS1" { nodeId = 8; }) 60 | (mkStakingPool "b" 1 "IOGS2" { nodeId = 9; }) 61 | (mkStakingPool "c" 1 "IOGS3" { nodeId = 10; }) 62 | (mkStakingPool "d" 1 "IOGS4" { nodeId = 11; }) 63 | (mkStakingPool "e" 1 "IOGS5" { nodeId = 12; }) 64 | (mkStakingPool "f" 1 "IOGS6" { nodeId = 13; }) 65 | (mkStakingPool "a" 2 "IOGS7" { nodeId = 14; }) 66 | ]); 67 | 68 | coreNodes = map (composeAll [ 69 | (withAutoRestartEvery 6) 70 | #(withProfiling "time" ["bft-c-1" "bft-a-1"]) 71 | ]) (bftCoreNodes ++ stakingPoolNodes); 72 | 73 | relayNodes = map (withAutoRestartEvery 6) (mkRelayTopology { 74 | inherit regions coreNodes; 75 | autoscaling = false; 76 | }); 77 | 78 | in { 79 | 80 | inherit coreNodes relayNodes regions; 81 | 82 | "${globals.faucetHostname}" = { 83 | services.cardano-faucet = { 84 | anonymousAccess = false; 85 | faucetLogLevel = "DEBUG"; 86 | secondsBetweenRequestsAnonymous = 86400; 87 | secondsBetweenRequestsApiKeyAuth = 86400; 88 | lovelacesToGiveAnonymous = 1000000000; 89 | lovelacesToGiveApiKeyAuth = 10000000000; 90 | useByronWallet = false; 91 | }; 92 | }; 93 | } 94 | -------------------------------------------------------------------------------- /bench/lib-genesis-byron.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=1091,2016 3 | 4 | profile_byron_genesis_protocol_params() { 5 | jq ' 6 | { heavyDelThd: "300000" 7 | , maxBlockSize: "641000" 8 | , maxHeaderSize: "200000" 9 | , maxProposalSize: "700" 10 | , maxTxSize: "4096" 11 | , mpcThd: "200000" 12 | , scriptVersion: 0 13 | , slotDuration: "20000" 14 | , softforkRule: 15 | { initThd: "900000" 16 | , minThd: "600000" 17 | , thdDecrement: "100000" 18 | } 19 | , txFeePolicy: 20 | { multiplier: "439460" 21 | , summand: "155381" 22 | } 23 | , unlockStakeEpoch: "184467" 24 | , updateImplicit: "10000" 25 | , updateProposalThd: "100000" 26 | , updateVoteThd: "100000" 27 | } 28 | ' --null-input 29 | } 30 | 31 | profile_byron_genesis_cli_args() { 32 | jq ' 33 | def byron_genesis_cli_args: 34 | [ "--k", 10 35 | , "--protocol-magic", 42 36 | , "--secret-seed", 2718281828 37 | , "--total-balance", 2718281828 38 | 39 | , "--n-poor-addresses", 128 40 | , "--n-delegate-addresses", 1 41 | , "--delegate-share", 0.8 42 | , "--avvm-entry-count", 0 43 | , "--avvm-entry-balance", 0 44 | ]; 45 | 46 | byron_genesis_cli_args 47 | | join(" ") 48 | ' --null-input --raw-output 49 | } 50 | 51 | profile_genesis_byron() { 52 | local prof=${1:-default} 53 | local target_dir=${2:-./keys/byron} 54 | 55 | local byron_params_tmpfile 56 | byron_params_tmpfile=$(mktemp --tmpdir) 57 | profile_byron_genesis_protocol_params >"$byron_params_tmpfile" 58 | 59 | mkdir -p "$target_dir" 60 | rm -rf -- ./"$target_dir" 61 | 62 | genesis_cli_args=( 63 | --genesis-output-dir "$target_dir" 64 | --protocol-parameters-file "$byron_params_tmpfile" 65 | --start-time 1 66 | $(profile_byron_genesis_cli_args)) 67 | 68 | cardano-cli byron genesis genesis "${genesis_cli_args[@]}" 69 | rm -f "$byron_params_tmpfile" 70 | } 71 | 72 | genesis_update_starttime_byron() { 73 | local start_timestamp=$1 genesis_dir=${2:-./keys/byron} 74 | 75 | json_file_append "$genesis_dir"/genesis.json " 76 | { startTime: $start_timestamp }" <<<0 77 | } 78 | 79 | genesis_hash_byron() { 80 | local genesis_dir=${1:-./keys/byron} 81 | 82 | cardano-cli byron genesis print-genesis-hash --genesis-json "${genesis_dir}"/genesis.json | 83 | tail -1 84 | } 85 | -------------------------------------------------------------------------------- /nix/cardano.nix: -------------------------------------------------------------------------------- 1 | self: super: with self; let 2 | 3 | getCardanoNodePackages = src: let 4 | inherit (import (src + "/nix") { gitrev = src.rev; }) cardanoNodeProject; 5 | cardanoNodeHaskellPackages = lib.mapAttrsRecursiveCond (v: !(lib.isDerivation v)) 6 | (path: value: 7 | if (lib.isAttrs value) then 8 | lib.recursiveUpdate value 9 | { 10 | passthru = { 11 | profiled = lib.getAttrFromPath path profiledProject.hsPkgs; 12 | asserted = lib.getAttrFromPath path assertedProject.hsPkgs; 13 | eventlogged = lib.getAttrFromPath path eventloggedProject.hsPkgs; 14 | }; 15 | } else value) 16 | cardanoNodeProject.hsPkgs; 17 | profiledProject = cardanoNodeProject.appendModule { 18 | modules = [{ 19 | enableLibraryProfiling = true; 20 | packages.cardano-node.components.exes.cardano-node.enableProfiling = true; 21 | packages.tx-generator.components.exes.tx-generator.enableProfiling = true; 22 | packages.locli.components.exes.locli.enableProfiling = true; 23 | }]; 24 | }; 25 | assertedProject = cardanoNodeProject.appendModule { 26 | modules = [{ 27 | packages = lib.genAttrs [ 28 | "ouroboros-consensus" 29 | "ouroboros-consensus-cardano" 30 | "ouroboros-consensus-byron" 31 | "ouroboros-consensus-shelley" 32 | "ouroboros-network" 33 | "network-mux" 34 | ] 35 | (name: { flags.asserts = true; }); 36 | }]; 37 | }; 38 | eventloggedProject = cardanoNodeProject.appendModule 39 | { 40 | modules = [{ 41 | packages = lib.genAttrs [ "cardano-node" ] 42 | (name: { configureFlags = [ "--ghc-option=-eventlog" ]; }); 43 | }]; 44 | }; 45 | 46 | in { 47 | inherit cardanoNodeHaskellPackages; 48 | inherit (cardanoNodeHaskellPackages.cardano-cli.components.exes) cardano-cli; 49 | inherit (cardanoNodeHaskellPackages.cardano-submit-api.components.exes) cardano-submit-api; 50 | inherit (cardanoNodeHaskellPackages.cardano-node.components.exes) cardano-node; 51 | inherit (cardanoNodeHaskellPackages.locli.components.exes) locli; 52 | inherit (cardanoNodeHaskellPackages.tx-generator.components.exes) tx-generator; 53 | 54 | cardano-node-profiled = cardano-node.passthru.profiled; 55 | cardano-node-eventlogged = cardano-node.passthru.eventlogged; 56 | cardano-node-asserted = cardano-node.passthru.asserted; 57 | }; 58 | 59 | cardanoNodePackages = getCardanoNodePackages sourcePaths.cardano-node; 60 | 61 | in cardanoNodePackages // { 62 | inherit getCardanoNodePackages cardanoNodePackages; 63 | inherit (import (sourcePaths.cardano-db-sync + "/nix") {}) cardanoDbSyncHaskellPackages; 64 | cardano-node-services-def = (sourcePaths.cardano-node-service or sourcePaths.cardano-node) + "/nix/nixos"; 65 | } 66 | -------------------------------------------------------------------------------- /topologies/alonzo-purple.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs; with lib; with topology-lib; 2 | let 3 | 4 | regions = { 5 | a = { name = "eu-central-1"; # Europe (Frankfurt); 6 | }; 7 | b = { name = "us-east-2"; # US East (Ohio) 8 | }; 9 | c = { name = "ap-southeast-1"; # Asia Pacific (Singapore) 10 | }; 11 | d = { name = "eu-west-2"; # Europe (London) 12 | }; 13 | e = { name = "us-west-1"; # US West (N. California) 14 | }; 15 | f = { name = "ap-northeast-1"; # Asia Pacific (Tokyo) 16 | }; 17 | }; 18 | 19 | bftNodes = [ 20 | (mkBftCoreNode "a" 1 { org = "IOHK"; nodeId = 1; }) 21 | ]; 22 | 23 | nodes = with regions; map (composeAll [ 24 | (withAutoRestartEvery 6) 25 | ]) (concatLists [ 26 | (mkStakingPoolNodes "a" 1 "d" "IOGA1" { org = "IOHK"; nodeId = 2; }) 27 | (mkStakingPoolNodes "b" 2 "e" "IOGA2" { org = "IOHK"; nodeId = 3; }) 28 | (mkStakingPoolNodes "c" 3 "f" "IOGA3" { org = "IOHK"; nodeId = 4; }) 29 | ] ++ bftNodes); 30 | 31 | test-node = { 32 | name = "test-node"; 33 | nodeId = 99; 34 | org = "IOHK"; 35 | region = "eu-central-1"; 36 | producers = [ 37 | "ioga1.relays.alonzo-purple.dev.cardano.org" "ioga1.relays.alonzo-white.dev.cardano.org" 38 | "ioga2.relays.alonzo-purple.dev.cardano.org" "ioga2.relays.alonzo-white.dev.cardano.org" 39 | "ioga3.relays.alonzo-purple.dev.cardano.org" "ioga3.relays.alonzo-white.dev.cardano.org" 40 | ]; 41 | stakePool = false; 42 | public = false; 43 | }; 44 | 45 | relayNodes = (composeAll [ 46 | connectWithThirdPartyRelays 47 | (regionalConnectGroupWith bftNodes) 48 | fullyConnectNodes 49 | ] (filter (n: !(n ? stakePool)) nodes)) ++ [ 50 | test-node 51 | ]; 52 | 53 | coreNodes = filter (n: n ? stakePool) nodes; 54 | 55 | in { 56 | 57 | inherit coreNodes relayNodes regions; 58 | 59 | explorer = { 60 | containers = mapAttrs (b: _: { 61 | config = { 62 | services.cardano-graphql = { 63 | allowListPath = mkForce null; 64 | allowIntrospection = true; 65 | }; 66 | }; 67 | }) globals.explorerBackends; 68 | }; 69 | 70 | smash = { 71 | services.cardano-node = { 72 | package = mkForce cardano-node; 73 | }; 74 | }; 75 | 76 | "${globals.faucetHostname}" = { 77 | services.cardano-faucet = { 78 | anonymousAccess = false; 79 | faucetLogLevel = "DEBUG"; 80 | secondsBetweenRequestsAnonymous = 86400; 81 | secondsBetweenRequestsApiKeyAuth = 86400; 82 | lovelacesToGiveAnonymous = 1000000000; 83 | lovelacesToGiveApiKeyAuth = 10000000000; 84 | useByronWallet = false; 85 | }; 86 | services.cardano-node = { 87 | package = mkForce cardano-node; 88 | }; 89 | }; 90 | 91 | monitoring = { 92 | services.monitoring-services.publicGrafana = true; 93 | services.nginx.virtualHosts."monitoring.${globals.domain}".locations."/p" = { 94 | root = ../static/pool-metadata; 95 | }; 96 | }; 97 | 98 | } 99 | -------------------------------------------------------------------------------- /topologies/testnet.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs; with lib; with topology-lib; 2 | let 3 | 4 | regions = { 5 | a = { name = "eu-central-1"; # Europe (Frankfurt); 6 | minRelays = 3; 7 | }; 8 | b = { name = "us-east-2"; # US East (Ohio) 9 | minRelays = 2; 10 | }; 11 | c = { name = "ap-southeast-1"; # Asia Pacific (Singapore) 12 | minRelays = 1; 13 | }; 14 | d = { name = "eu-west-2"; # Europe (London) 15 | minRelays = 2; 16 | }; 17 | e = { name = "us-west-1"; # US West (N. California) 18 | minRelays = 2; 19 | }; 20 | f = { name = "ap-northeast-1"; # Asia Pacific (Tokyo) 21 | minRelays = 1; 22 | }; 23 | }; 24 | 25 | stakingPoolNodes = fullyConnectNodes [ 26 | (mkStakingPool "a" 1 "" { nodeId = 1; }) 27 | (mkStakingPool "b" 1 "" { nodeId = 2; }) 28 | (mkStakingPool "c" 1 "" { nodeId = 3; }) 29 | (mkStakingPool "d" 1 "" { nodeId = 4; }) 30 | (mkStakingPool "e" 1 "" { nodeId = 5; }) 31 | (mkStakingPool "f" 1 "" { nodeId = 6; }) 32 | (mkStakingPool "a" 2 "" { nodeId = 7; }) 33 | ]; 34 | 35 | coreNodes = stakingPoolNodes; 36 | 37 | relayNodes = map (composeAll [ 38 | (forNodes { 39 | services.cardano-node = { 40 | extraNodeInstanceConfig = i: optionalAttrs (i == 0) { 41 | TraceMempool = true; 42 | }; 43 | }; 44 | } [ "rel-a-1" "rel-b-1" "rel-c-1" "rel-d-1" "rel-e-1" "rel-f-1" ]) 45 | ]) (mkRelayTopology { 46 | inherit regions coreNodes; 47 | autoscaling = false; 48 | maxProducersPerNode = 20; 49 | maxInRegionPeers = 5; 50 | }); 51 | 52 | in { 53 | inherit coreNodes relayNodes regions; 54 | 55 | monitoring = { 56 | services.monitoring-services = { 57 | publicGrafana = true; 58 | prometheus.basicAuthFile = writeText "prometheus.htpasswd" globals.static.prometheusHtpasswd; 59 | }; 60 | }; 61 | 62 | "${globals.faucetHostname}" = { 63 | services.cardano-faucet = { 64 | anonymousAccess = true; 65 | anonymousAccessAssets = true; 66 | faucetLogLevel = "DEBUG"; 67 | secondsBetweenRequestsAnonymous = 86400; 68 | secondsBetweenRequestsAnonymousAssets = 86400; 69 | secondsBetweenRequestsApiKeyAuth = 86400; 70 | lovelacesToGiveAnonymous = 1000000000; 71 | assetsToGiveAnonymous = 2; 72 | lovelacesToGiveApiKeyAuth = 1000000000000; 73 | useByronWallet = false; 74 | faucetFrontendUrl = "https://developers.cardano.org/en/testnets/cardano/tools/faucet/"; 75 | }; 76 | }; 77 | } // (mapAttrs' (b: _: nameValuePair "explorer-${b}" { 78 | services.nginx.virtualHosts.explorer.locations."/p" = lib.mkIf (__pathExists ../static/pool-metadata) { 79 | root = ../static/pool-metadata; 80 | }; 81 | services.cardano-db-sync.restoreSnapshot = { 82 | a = "https://updates-cardano-testnet.s3.amazonaws.com/cardano-db-sync/13/db-sync-snapshot-schema-13-block-3673999-x86_64.tgz"; 83 | b = "https://updates-cardano-testnet.s3.amazonaws.com/cardano-db-sync/13/db-sync-snapshot-schema-13-block-3673999-x86_64.tgz"; 84 | }.${b}; 85 | }) globals.explorerBackends) 86 | -------------------------------------------------------------------------------- /modules/tcpdump.nix: -------------------------------------------------------------------------------- 1 | { 2 | config, 3 | lib, 4 | pkgs, 5 | ... 6 | }: let 7 | cfg = config.services.tcpdump; 8 | in { 9 | options.services.tcpdump = { 10 | enable = lib.mkEnableOption "tcpdump capture and upload"; 11 | 12 | bucketName = lib.mkOption { 13 | type = lib.types.str; 14 | }; 15 | 16 | ports = lib.mkOption { 17 | type = lib.types.listOf lib.types.port; 18 | default = [3001]; 19 | }; 20 | 21 | rotateSeconds = lib.mkOption { 22 | type = lib.types.ints.positive; 23 | default = 60 * 10; 24 | }; 25 | 26 | s3ExpirationDays = lib.mkOption { 27 | type = lib.types.ints.positive; 28 | default = 7; 29 | }; 30 | }; 31 | 32 | config = lib.mkIf cfg.enable { 33 | deployment.keys.pcap-upload = { 34 | keyFile = ../static/pcap-upload; 35 | destDir = "/var/lib/keys"; 36 | }; 37 | 38 | systemd.services = ( 39 | lib.listToAttrs (map ( 40 | port: { 41 | name = "tcpdump-${toString port}"; 42 | value = { 43 | description = "capture packets on port ${toString port}"; 44 | wantedBy = ["multi-user.target"]; 45 | after = ["network-online.target"]; 46 | startLimitIntervalSec = 10; 47 | startLimitBurst = 10; 48 | serviceConfig = { 49 | Restart = "always"; 50 | StateDirectory = "tcpdump"; 51 | WorkingDirectory = "/var/lib/tcpdump"; 52 | }; 53 | path = [pkgs.tcpdump pkgs.inetutils]; 54 | 55 | script = '' 56 | set -exuo pipefail 57 | 58 | dir="$(hostname)_${toString port}" 59 | mkdir -p "$dir" 60 | cd "$dir" 61 | 62 | tcpdump \ 63 | -i any \ 64 | -w '%Y-%m-%d_%H:%M:%S.pcap' \ 65 | -G ${toString cfg.rotateSeconds} \ 66 | -n 'port ${toString port}' 67 | ''; 68 | }; 69 | } 70 | ) 71 | cfg.ports) 72 | // { 73 | tcpdump-upload = { 74 | wantedBy = ["multi-user.target"]; 75 | after = ["tcpdump.service"]; 76 | startLimitIntervalSec = 10; 77 | startLimitBurst = 10; 78 | serviceConfig = { 79 | Restart = "always"; 80 | StateDirectory = "tcpdump"; 81 | WorkingDirectory = "/var/lib/tcpdump"; 82 | }; 83 | path = [pkgs.awscli2 pkgs.fd]; 84 | environment.HOME = "/var/lib/tcpdump"; 85 | 86 | # NOTE: the format in 's3://${cfg.bucketName}/{//}/{/}' may be subject to change in future versions of `fd`. 87 | script = '' 88 | set -exuo pipefail 89 | 90 | mkdir -p .aws 91 | cp /var/lib/keys/pcap-upload .aws/credentials 92 | 93 | while true; do 94 | fd -e pcap --changed-before='${toString (cfg.rotateSeconds * 2)} seconds' -j 1 -x \ 95 | aws s3 mv '{}' 's3://${cfg.bucketName}/{//}/{/}' 96 | sleep 60 97 | done 98 | ''; 99 | }; 100 | } 101 | ); 102 | }; 103 | } 104 | -------------------------------------------------------------------------------- /bench/lib-analysis.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=1091,2016 3 | 4 | 5 | logs_of_nodes() { 6 | local dir=$1; shift 7 | local machines=("$@") 8 | 9 | for mach in ${machines[*]} 10 | do ls -- "$dir"/analysis/logs-"$mach"/node-*.json; done 11 | } 12 | 13 | collect_jsonlog_inventory() { 14 | local dir=$1; shift 15 | local constituents=("$@") 16 | 17 | for mach in ${constituents[*]} 18 | do jsons=($(ls -- "$dir"/logs-"$mach"/node-*.json)) 19 | jsonlog_inventory "$mach" "${jsons[@]}"; done 20 | jsonlog_inventory "generator" "$dir"/logs-explorer/generator-*.json 21 | } 22 | 23 | analysis_append() { 24 | local dir=$1 expr=$2; shift 2 25 | json_file_append "$dir"/analysis.json ' 26 | $meta[0] as $meta 27 | | $analysis[0] as $analysis 28 | | '"$expr 29 | " --slurpfile meta "$dir/meta.json" \ 30 | --slurpfile analysis "$dir/analysis.json" \ 31 | "$@" 32 | } 33 | 34 | analysis_prepend() { 35 | local dir=$1 expr=$2; shift 2 36 | json_file_prepend "$dir"/analysis.json ' 37 | $meta[0] as $meta 38 | | $analysis[0] as $analysis 39 | | '"$expr 40 | " --slurpfile meta "$dir/meta.json" \ 41 | --slurpfile analysis "$dir/analysis.json" \ 42 | "$@" 43 | } 44 | 45 | ### 46 | ### 47 | 48 | analyse_run() { 49 | while test $# -ge 1 50 | do case "$1" in 51 | --list ) echo ${analysis_list[*]}; return;; 52 | * ) break;; esac; shift; done 53 | 54 | local dir=${1:-.} tag meta 55 | dir=$(realpath "$dir") 56 | 57 | if test ! -d "$dir" 58 | then fail "run directory doesn't exist: $dir"; fi 59 | if test ! -f "$dir/meta.json" 60 | then fail "run directory doesn't has no metafile: $dir"; fi 61 | 62 | machines=($(jq '.machine_info | keys | join(" ") 63 | ' --raw-output <"$dir/deployment-explorer.json")) 64 | meta=$(jq .meta "$dir/meta.json") 65 | tag=$(jq .tag <<<$meta --raw-output) 66 | 67 | echo "--( processing logs in: $(basename "$dir")" 68 | 69 | for a in "${analysis_list[@]}" 70 | do echo -n " $a" | sed 's/analysis_//' 71 | $a "$dir" "${machines[@]}"; done 72 | 73 | # patch_run "$dir" 74 | 75 | # rm -rf "$dir"/analysis/{analysis,logs-node-*,logs-explorer,startup} 76 | 77 | oprint "analysed tag: ${tag}" 78 | } 79 | 80 | runs_in() { 81 | local dir=${1:-.} 82 | dir=$(realpath $dir) 83 | find "$dir" -maxdepth 2 -mindepth 2 -name meta.json -type f | cut -d/ -f$(($(tr -cd / <<<$dir | wc -c) + 2)) 84 | } 85 | 86 | mass_analyse() { 87 | local parallel= 88 | while test $# -ge 1 89 | do case "$1" in 90 | --parallel ) parallel=t;; 91 | * ) break;; esac; shift; done 92 | 93 | local dir=${1:-.} runs 94 | runs=($(runs_in "$dir")) 95 | 96 | oprint "analysing runs: ${runs[*]}" 97 | 98 | for run in "${runs[@]}" 99 | do if test -n "$parallel" 100 | then analyse_run "$dir/$run" & 101 | else analyse_run "$dir/$run"; fi; done 102 | } 103 | -------------------------------------------------------------------------------- /topologies/bench-distrib-12.nix: -------------------------------------------------------------------------------- 1 | { 2 | coreNodes = [ 3 | { 4 | name = "node-0"; 5 | nodeId = 0; 6 | org = "IOHK"; 7 | region = "eu-central-1"; 8 | producers = ["node-1" "node-2" "node-3" "node-4" "node-5" "node-6" "node-7" "node-8" "node-9" "node-10" "node-11"]; 9 | pools = 1; 10 | } 11 | { 12 | name = "node-1"; 13 | nodeId = 1; 14 | org = "IOHK"; 15 | region = "eu-central-1"; 16 | producers = ["node-0" "node-2" "node-3" "node-4" "node-5" "node-6" "node-7" "node-8" "node-9" "node-10" "node-11"]; 17 | pools = 1; 18 | } 19 | { 20 | name = "node-2"; 21 | nodeId = 2; 22 | org = "IOHK"; 23 | region = "eu-central-1"; 24 | producers = ["node-0" "node-1" "node-3" "node-4" "node-5" "node-6" "node-7" "node-8" "node-9" "node-10" "node-11"]; 25 | } 26 | { 27 | name = "node-3"; 28 | nodeId = 3; 29 | org = "IOHK"; 30 | region = "eu-central-1"; 31 | producers = ["node-0" "node-1" "node-2" "node-4" "node-5" "node-6" "node-7" "node-8" "node-9" "node-10" "node-11"]; 32 | pools = 1; 33 | } 34 | { 35 | name = "node-4"; 36 | nodeId = 4; 37 | org = "IOHK"; 38 | region = "ap-southeast-2"; 39 | producers = ["node-0" "node-1" "node-2" "node-3" "node-5" "node-6" "node-7" "node-8" "node-9" "node-10" "node-11"]; 40 | pools = 1; 41 | } 42 | { 43 | name = "node-5"; 44 | nodeId = 5; 45 | org = "IOHK"; 46 | region = "ap-southeast-2"; 47 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-6" "node-7" "node-8" "node-9" "node-10" "node-11"]; 48 | } 49 | { 50 | name = "node-6"; 51 | nodeId = 6; 52 | org = "IOHK"; 53 | region = "ap-southeast-2"; 54 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-5" "node-7" "node-8" "node-9" "node-10" "node-11"]; 55 | pools = 1; 56 | } 57 | { 58 | name = "node-7"; 59 | nodeId = 7; 60 | org = "IOHK"; 61 | region = "ap-southeast-2"; 62 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-5" "node-6" "node-8" "node-9" "node-10" "node-11"]; 63 | pools = 1; 64 | } 65 | 66 | { 67 | name = "node-8"; 68 | nodeId = 8; 69 | org = "IOHK"; 70 | region = "us-east-1"; 71 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-5" "node-6" "node-7" "node-9" "node-10" "node-11"]; 72 | } 73 | { 74 | name = "node-9"; 75 | nodeId = 9; 76 | org = "IOHK"; 77 | region = "us-east-1"; 78 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-5" "node-6" "node-7" "node-8" "node-10" "node-11"]; 79 | pools = 1; 80 | } 81 | { 82 | name = "node-10"; 83 | nodeId = 10; 84 | org = "IOHK"; 85 | region = "us-east-1"; 86 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-5" "node-6" "node-7" "node-8" "node-9" "node-11"]; 87 | pools = 1; 88 | } 89 | { 90 | name = "node-11"; 91 | nodeId = 11; 92 | org = "IOHK"; 93 | region = "us-east-1"; 94 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-5" "node-6" "node-7" "node-8" "node-9" "node-10"]; 95 | } 96 | ]; 97 | 98 | relayNodes = []; 99 | } 100 | -------------------------------------------------------------------------------- /bench/sync-to.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eo pipefail 4 | 5 | this_repo=$(git rev-parse --show-toplevel) 6 | 7 | stage_changes= 8 | 9 | while test -n "$1" 10 | do case "$1" in 11 | --stage | --stage-changes ) 12 | stage_changes=t;; 13 | --help ) usage; exit 1;; 14 | * ) break;; esac; shift; done 15 | set -u 16 | 17 | repos=( 18 | cardano-node 19 | ) 20 | 21 | repo_path() { 22 | echo -n "../$1" 23 | } 24 | 25 | repo_project() { 26 | echo -n "../$1/cabal.project" 27 | } 28 | 29 | dir_nix_hash() { 30 | local dir=$1 31 | local commit=$2 32 | pushd "${dir}" >/dev/null || return 33 | nix-prefetch-git "file://$(realpath "${dir}")" "${commit}" 2>/dev/null \ 34 | | jq '.sha256' | xargs echo 35 | popd >/dev/null || return 36 | } 37 | 38 | cabal_project_current_commit() { 39 | local project_file=$1 40 | local repo_name=$2 41 | grep "^[ ]*location: .*/${repo_name}\$" "${project_file}" -A1 \ 42 | | tail -n-1 | sed 's/^.* tag: //' 43 | } 44 | 45 | cabal_project_current_hash() { 46 | local project_file=$1 47 | local repo_name=$2 48 | grep "^[ ]*location: .*/${repo_name}\$" "${project_file}" -A2 \ 49 | | tail -n-1 | sed 's/^.* --sha256: //' 50 | } 51 | 52 | fail() { 53 | echo "$*" >&2 54 | exit 1 55 | } 56 | 57 | # test -r "$other_project" || 58 | # fail "Usage: $(basename "$0") [SYNC-FROM-REPO=../${other_name}]" 59 | 60 | declare -A repo_commit 61 | declare -A repo_hash 62 | for r in ${repos[*]} 63 | do repo_commit[$r]=$(git -C "$(repo_path "$r")" rev-parse HEAD) 64 | test -n "${repo_commit[$r]}" || \ 65 | fail "Repository ${r} doesn't have a valid git state." 66 | 67 | repo_hash[$r]=$(dir_nix_hash "$(repo_path "$r")" "${repo_commit[$r]}") 68 | test -n "${repo_hash[$r]}" || \ 69 | fail "Failed to 'nix-prefetch-git' in $r" 70 | echo "--( $r: git ${repo_commit[$r]} / sha256 ${repo_hash[$r]}" 71 | done 72 | 73 | repo_sources_pin_commit() { 74 | local repo=$1 sources=$2 pin=$3 75 | jq --arg pin "$pin" '.[$pin].rev' "$repo"/nix/${sources}.json -r 76 | } 77 | 78 | repo_sources_pin_hash() { 79 | local repo=$1 sources=$2 pin=$3 80 | jq --arg pin "$pin" '.[$pin].sha256' "$repo"/nix/${sources}.json -r 81 | } 82 | 83 | update_sources_pin() { 84 | local repo=$1 sources=$2 pin=$3 commit=$4 hash=$5 oldcommit oldhash 85 | 86 | oldcommit=$(repo_sources_pin_commit "$repo" "$sources" "$pin") 87 | oldhash=$(repo_sources_pin_hash "$repo" "$sources" "$pin") 88 | if test "$oldcommit" != "$commit" -o "$oldhash" != "$hash" 89 | then sed -i "s/${oldcommit}/${commit}/" "${repo}"/nix/${sources}.json 90 | sed -i "s/${oldhash}/${hash}/" "${repo}"/nix/${sources}.json 91 | cat < ${commit} 94 | ${oldhash} -> ${hash} 95 | EOF 96 | fi 97 | } 98 | 99 | update_sources_pin "$this_repo" 'sources.bench' 'cardano-node' \ 100 | "${repo_commit['cardano-node']}" \ 101 | "${repo_hash['cardano-node']}" 102 | 103 | if test -n "$stage_changes" 104 | then git add "$this_repo"/nix/sources.json "$this_repo"/nix/sources.bench.json 105 | fi 106 | -------------------------------------------------------------------------------- /topologies/bench-dense-12.nix: -------------------------------------------------------------------------------- 1 | { 2 | coreNodes = [ 3 | { 4 | name = "node-0"; 5 | nodeId = 0; 6 | org = "IOHK"; 7 | region = "eu-central-1"; 8 | producers = ["node-1" "node-2" "node-3" "node-4" "node-5" "node-6" "node-7" "node-8" "node-9" "node-10" "node-11"]; 9 | } 10 | { 11 | name = "node-1"; 12 | nodeId = 1; 13 | org = "IOHK"; 14 | region = "eu-central-1"; 15 | producers = ["node-0" "node-2" "node-3" "node-4" "node-5" "node-6" "node-7" "node-8" "node-9" "node-10" "node-11"]; 16 | pools = 100; 17 | } 18 | { 19 | name = "node-2"; 20 | nodeId = 2; 21 | org = "IOHK"; 22 | region = "eu-central-1"; 23 | producers = ["node-0" "node-1" "node-3" "node-4" "node-5" "node-6" "node-7" "node-8" "node-9" "node-10" "node-11"]; 24 | pools = 100; 25 | } 26 | { 27 | name = "node-3"; 28 | nodeId = 3; 29 | org = "IOHK"; 30 | region = "eu-central-1"; 31 | producers = ["node-0" "node-1" "node-2" "node-4" "node-5" "node-6" "node-7" "node-8" "node-9" "node-10" "node-11"]; 32 | pools = 100; 33 | } 34 | { 35 | name = "node-4"; 36 | nodeId = 4; 37 | org = "IOHK"; 38 | region = "ap-southeast-2"; 39 | producers = ["node-0" "node-1" "node-2" "node-3" "node-5" "node-6" "node-7" "node-8" "node-9" "node-10" "node-11"]; 40 | pools = 100; 41 | } 42 | { 43 | name = "node-5"; 44 | nodeId = 5; 45 | org = "IOHK"; 46 | region = "ap-southeast-2"; 47 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-6" "node-7" "node-8" "node-9" "node-10" "node-11"]; 48 | pools = 100; 49 | } 50 | { 51 | name = "node-6"; 52 | nodeId = 6; 53 | org = "IOHK"; 54 | region = "ap-southeast-2"; 55 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-5" "node-7" "node-8" "node-9" "node-10" "node-11"]; 56 | pools = 100; 57 | } 58 | { 59 | name = "node-7"; 60 | nodeId = 7; 61 | org = "IOHK"; 62 | region = "ap-southeast-2"; 63 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-5" "node-6" "node-8" "node-9" "node-10" "node-11"]; 64 | pools = 100; 65 | } 66 | 67 | { 68 | name = "node-8"; 69 | nodeId = 8; 70 | org = "IOHK"; 71 | region = "us-east-1"; 72 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-5" "node-6" "node-7" "node-9" "node-10" "node-11"]; 73 | pools = 100; 74 | } 75 | { 76 | name = "node-9"; 77 | nodeId = 9; 78 | org = "IOHK"; 79 | region = "us-east-1"; 80 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-5" "node-6" "node-7" "node-8" "node-10" "node-11"]; 81 | pools = 100; 82 | } 83 | { 84 | name = "node-10"; 85 | nodeId = 10; 86 | org = "IOHK"; 87 | region = "us-east-1"; 88 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-5" "node-6" "node-7" "node-8" "node-9" "node-11"]; 89 | pools = 100; 90 | } 91 | { 92 | name = "node-11"; 93 | nodeId = 11; 94 | org = "IOHK"; 95 | region = "us-east-1"; 96 | producers = ["node-0" "node-1" "node-2" "node-3" "node-4" "node-5" "node-6" "node-7" "node-8" "node-9" "node-10"]; 97 | pools = 1; 98 | } 99 | ]; 100 | 101 | relayNodes = []; 102 | } 103 | -------------------------------------------------------------------------------- /modules/db-sync.nix: -------------------------------------------------------------------------------- 1 | pkgs: { 2 | dbSyncPkgs, 3 | cardanoNodePackages, 4 | additionalDbUsers ? [] 5 | } : { name, config, options, ... }: 6 | with pkgs; 7 | 8 | let 9 | cfg = config.services.cardano-db-sync; 10 | nodeCfg = config.services.cardano-node; 11 | 12 | cardanoNodeConfigPath = builtins.toFile "cardano-node-config.json" (builtins.toJSON nodeCfg.nodeConfig); 13 | 14 | inherit (dbSyncPkgs) cardanoDbSyncHaskellPackages; 15 | package = cardanoDbSyncHaskellPackages.cardano-db-sync-extended.components.exes.cardano-db-sync-extended 16 | # No more extended from 13.x onward: 17 | or cardanoDbSyncHaskellPackages.cardano-db-sync.components.exes.cardano-db-sync; 18 | inherit (cardanoDbSyncHaskellPackages.cardano-db-tool.components.exes) cardano-db-tool; 19 | in { 20 | 21 | imports = [ 22 | cardano-ops.modules.cardano-postgres 23 | cardano-ops.modules.base-service 24 | (sourcePaths.cardano-db-sync-service + "/nix/nixos") 25 | ]; 26 | 27 | environment.systemPackages = with pkgs; [ 28 | bat fd lsof netcat ncdu ripgrep tree vim dnsutils 29 | cardano-db-tool 30 | ]; 31 | 32 | services.cardano-postgres.enable = true; 33 | services.postgresql = { 34 | ensureDatabases = [ "cexplorer" "cgql" ]; 35 | initialScript = builtins.toFile "enable-pgcrypto.sql" '' 36 | \connect template1 37 | CREATE EXTENSION IF NOT EXISTS pgcrypto SCHEMA pg_catalog; 38 | ''; 39 | ensureUsers = [ 40 | { 41 | name = "cexplorer"; 42 | ensurePermissions = { 43 | "DATABASE cexplorer" = "ALL PRIVILEGES"; 44 | "DATABASE cgql" = "ALL PRIVILEGES"; 45 | "ALL TABLES IN SCHEMA information_schema" = "SELECT"; 46 | "ALL TABLES IN SCHEMA pg_catalog" = "SELECT"; 47 | }; 48 | } 49 | ]; 50 | identMap = '' 51 | explorer-users postgres postgres 52 | ${lib.concatMapStrings (user: '' 53 | explorer-users ${user} cexplorer 54 | '') (["root" "cardano-db-sync" ] ++ additionalDbUsers)}''; 55 | authentication = '' 56 | local all all ident map=explorer-users 57 | ''; 58 | }; 59 | 60 | services.cardano-node = { 61 | inherit cardanoNodePackages; 62 | allProducers = if (globals.topology.relayNodes != [] || (globals.deploymentName != globals.environmentName)) 63 | then [ (topology-lib.envRegionalRelaysProducer config.deployment.ec2.region 2) ] 64 | else if (globals.topology.coreNodes != []) 65 | then (map (n: n.name) globals.topology.coreNodes) 66 | else [ (topology-lib.envRegionalRelaysProducer config.deployment.ec2.region 2) ]; 67 | totalMaxHeapSizeMbytes = 0.25 * config.node.memory * 1024; 68 | }; 69 | 70 | services.cardano-db-sync = { 71 | enable = true; 72 | inherit package; 73 | cluster = globals.environmentName; 74 | environment = globals.environmentConfig; 75 | # inherit (cfg.environment) explorerConfig; 76 | socketPath = nodeCfg.socketPath 0; 77 | logConfig = iohkNix.cardanoLib.defaultExplorerLogConfig // { PrometheusPort = globals.cardanoExplorerPrometheusExporterPort; }; 78 | inherit dbSyncPkgs; 79 | postgres = { 80 | database = "cexplorer"; 81 | }; 82 | }; 83 | 84 | systemd.services.cardano-db-sync.serviceConfig = { 85 | # FIXME: https://github.com/input-output-hk/cardano-db-sync/issues/102 86 | Restart = "always"; 87 | RestartSec = "30s"; 88 | }; 89 | 90 | services.monitoring-exporters.extraPrometheusExporters = [ 91 | # TODO: remove once explorer exports metrics at path `/metrics` 92 | { 93 | job_name = "explorer-exporter"; 94 | scrape_interval = "10s"; 95 | port = globals.cardanoExplorerPrometheusExporterPort; 96 | metrics_path = "/"; 97 | labels = { alias = "${name}-exporter"; }; 98 | } 99 | ]; 100 | } 101 | -------------------------------------------------------------------------------- /bench/lib.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | msg() { 4 | echo "$*" >&2 5 | } 6 | 7 | msg_ne() { 8 | echo -ne "$*" >&2 9 | } 10 | 11 | fail() { 12 | msg "$*" 13 | exit 1 14 | } 15 | 16 | failusage() { 17 | msg "USAGE: $(basename "$0") $*" 18 | exit 1 19 | } 20 | 21 | oprint() { 22 | msg "--( $*" 23 | } 24 | export -f oprint 25 | oprint_ne() { 26 | msg_ne "--( $*" 27 | } 28 | 29 | oprint_top() { 30 | ## This only prints if ran from the top-level shell process. 31 | if test -z "${lib_recursing}"; then oprint "$@"; fi 32 | } 33 | export -f oprint_top 34 | 35 | vprint() { 36 | if test -n "${verbose}${debug}"; then msg "-- $*"; fi 37 | } 38 | export -f vprint 39 | vprint_top() { 40 | ## This only prints if either in debug mode, 41 | ## or ran from the top-level shell process. 42 | if test -z "${lib_recursing}" -o -n "${debug}"; then vprint "$@"; fi 43 | } 44 | export -f vprint_top 45 | 46 | dprint() { 47 | if test -n "${debug}"; then msg "-- $*"; fi 48 | } 49 | export -f dprint 50 | 51 | errprint() { 52 | msg_ne "***\n*** ERROR: $*\n***\n" 53 | } 54 | 55 | fprint() { 56 | msg "-- FATAL: $*" 57 | } 58 | export -f fprint 59 | 60 | jqtest() { 61 | jq --exit-status "$@" > /dev/null 62 | } 63 | 64 | ## Null input jq test 65 | njqtest() { 66 | jqtest --null-input "$@" 67 | } 68 | 69 | ## Reverse JQ -- essentially flips its two first args 70 | rjq() { 71 | local f="$1"; q="$2"; shift 2 72 | jq "$q" "$f" "$@" 73 | } 74 | 75 | ## Raw Reverse JQ -- as "rjq", but also --raw-output, for shell convenience. 76 | rrjq() { 77 | local f="$1"; q="$2"; shift 2 78 | jq "$q" "$f" --raw-output "$@" 79 | } 80 | 81 | ## Reverse JQ TEST -- as "rjq", but with --exit-status, for shell convenience. 82 | rjqtest() { 83 | local f="$1"; q="$2"; shift 2 84 | jq --exit-status "$q" "$f" "$@" >/dev/null 85 | } 86 | 87 | timestamp() { 88 | date +'%Y''%m''%d''%H''%M' 89 | } 90 | 91 | words_to_lines() { 92 | sed 's_ _\n_g' 93 | } 94 | 95 | json_file_append() { 96 | local f=$1 extra=$2 tmp; shift 2 97 | tmp=$(mktemp --tmpdir) 98 | 99 | test -f "$f" || echo "{}" > "$f" 100 | jq ' $origf[0] as $orig 101 | | $orig * ('"$extra"') 102 | ' --slurpfile origf "$f" "$@" > "$tmp" 103 | mv "$tmp" "$f" 104 | } 105 | 106 | json_file_prepend() { 107 | local f=$1 extra=$2 tmp; shift 2 108 | tmp=$(mktemp --tmpdir) 109 | 110 | test -f "$f" || echo "{}" > "$f" 111 | jq ' $origf[0] as $orig 112 | | ('"$extra"') * $orig 113 | ' --slurpfile origf "$f" "$@" > "$tmp" 114 | mv "$tmp" "$f" 115 | } 116 | 117 | shell_list_to_json() { 118 | words_to_lines | jq --raw-input | jq --slurp --compact-output 119 | } 120 | 121 | generate_mnemonic() 122 | { 123 | local mnemonic timestamp commit status 124 | mnemonic=$(nix-shell -p diceware --run 'diceware --no-caps --num 2 --wordlist en_eff -d-') 125 | # local timestamp=$(date +%s) 126 | timestamp=$(timestamp) 127 | commit=$(git rev-parse HEAD | cut -c-8) 128 | status='' 129 | 130 | if git diff --quiet --exit-code 131 | then status= 132 | else status=+ 133 | fi 134 | 135 | echo "${timestamp}.${commit}${status}.${mnemonic}" 136 | } 137 | 138 | maybe_local_repo_branch() { 139 | local local_repo_path=$1 rev=$2 140 | git -C "$local_repo_path" describe --all "$rev" | 141 | sed 's_^\(.*/\|\)\([^/]*\)$_\2_' 142 | ## This needs a shallow clone to be practical. 143 | } 144 | 145 | min() { 146 | if test "$1" -gt "$2"; then echo -n "$2"; else echo -n "$1"; fi 147 | } 148 | 149 | max() { 150 | if test "$1" -lt "$2"; then echo -n "$2"; else echo -n "$1"; fi 151 | } 152 | -------------------------------------------------------------------------------- /topologies/p2p.nix: -------------------------------------------------------------------------------- 1 | pkgs: with pkgs; with lib; with topology-lib; 2 | let 3 | 4 | regions = { 5 | a = { name = "eu-central-1"; # Europe (Frankfurt); 6 | }; 7 | b = { name = "us-east-2"; # US East (Ohio) 8 | }; 9 | c = { name = "ap-southeast-1"; # Asia Pacific (Singapore) 10 | }; 11 | d = { name = "eu-west-2"; # Europe (London) 12 | }; 13 | e = { name = "us-west-1"; # US West (N. California) 14 | }; 15 | f = { name = "ap-northeast-1"; # Asia Pacific (Tokyo) 16 | }; 17 | }; 18 | 19 | bftNodes = [ 20 | (mkBftCoreNode "a" 1 { org = "IOHK"; nodeId = 1; }) 21 | ]; 22 | 23 | nodes = with regions; map (composeAll [ 24 | (withAutoRestartEvery 6) 25 | (withModule { 26 | services.cardano-node = { 27 | asserts = true; 28 | systemdSocketActivation = mkForce false; 29 | }; 30 | }) 31 | ]) (concatLists [ 32 | (mkStakingPoolNodes "a" 1 "d" "P2P1" { org = "IOHK"; nodeId = 2; }) 33 | (mkStakingPoolNodes "b" 2 "e" "P2P2" { org = "IOHK"; nodeId = 3; }) 34 | (mkStakingPoolNodes "c" 3 "f" "P2P3" { org = "IOHK"; nodeId = 4; }) 35 | (mkStakingPoolNodes "d" 4 "a" "P2P4" { org = "IOHK"; nodeId = 5; }) 36 | (mkStakingPoolNodes "e" 5 "b" "P2P5" { org = "IOHK"; nodeId = 6; }) 37 | (mkStakingPoolNodes "f" 6 "c" "P2P6" { org = "IOHK"; nodeId = 7; }) 38 | (mkStakingPoolNodes "a" 7 "d" "P2P7" { org = "IOHK"; nodeId = 8; }) 39 | (mkStakingPoolNodes "b" 8 "e" "P2P8" { org = "IOHK"; nodeId = 9; }) 40 | (mkStakingPoolNodes "c" 9 "f" "P2P9" { org = "IOHK"; nodeId = 10; }) 41 | (mkStakingPoolNodes "d" 10 "a" "P2P10" { org = "IOHK"; nodeId = 11; }) 42 | (mkStakingPoolNodes "e" 11 "b" "P2P11" { org = "IOHK"; nodeId = 12; }) 43 | (mkStakingPoolNodes "f" 12 "c" "P2P12" { org = "IOHK"; nodeId = 13; }) 44 | (mkStakingPoolNodes "a" 13 "d" "P2P13" { org = "IOHK"; nodeId = 14; }) 45 | (mkStakingPoolNodes "b" 14 "e" "P2P14" { org = "IOHK"; nodeId = 15; }) 46 | (mkStakingPoolNodes "c" 15 "f" "P2P15" { org = "IOHK"; nodeId = 16; }) 47 | (mkStakingPoolNodes "d" 16 "a" "P2P16" { org = "IOHK"; nodeId = 17; }) 48 | (mkStakingPoolNodes "e" 17 "b" "P2P17" { org = "IOHK"; nodeId = 18; }) 49 | (mkStakingPoolNodes "f" 18 "c" "P2P18" { org = "IOHK"; nodeId = 19; }) 50 | (mkStakingPoolNodes "a" 19 "d" "P2P19" { org = "IOHK"; nodeId = 20; }) 51 | (mkStakingPoolNodes "b" 20 "e" "P2P20" { org = "IOHK"; nodeId = 21; }) 52 | ] ++ bftNodes); 53 | 54 | relayNodes = regionalConnectGroupWith bftNodes 55 | (filter (n: !(n ? stakePool)) nodes); 56 | 57 | coreNodes = filter (n: n ? stakePool) nodes; 58 | 59 | in { 60 | 61 | inherit coreNodes relayNodes regions; 62 | 63 | explorer = { 64 | services.cardano-node = { 65 | package = mkForce cardano-node; 66 | systemdSocketActivation = mkForce false; 67 | }; 68 | containers = mapAttrs (b: _: { 69 | config = { 70 | services.cardano-graphql = { 71 | allowListPath = mkForce null; 72 | allowIntrospection = true; 73 | }; 74 | services.cardano-node = { 75 | package = mkForce cardano-node; 76 | systemdSocketActivation = mkForce false; 77 | }; 78 | }; 79 | }) globals.explorerBackends; 80 | }; 81 | 82 | smash = { 83 | services.cardano-node = { 84 | package = mkForce cardano-node; 85 | systemdSocketActivation = mkForce false; 86 | }; 87 | }; 88 | 89 | "${globals.faucetHostname}" = { 90 | services.cardano-faucet = { 91 | anonymousAccess = false; 92 | faucetLogLevel = "DEBUG"; 93 | secondsBetweenRequestsAnonymous = 86400; 94 | secondsBetweenRequestsApiKeyAuth = 86400; 95 | lovelacesToGiveAnonymous = 1000000000; 96 | lovelacesToGiveApiKeyAuth = 10000000000; 97 | useByronWallet = false; 98 | }; 99 | services.cardano-node = { 100 | package = mkForce cardano-node; 101 | systemdSocketActivation = mkForce false; 102 | }; 103 | }; 104 | 105 | monitoring = { 106 | services.monitoring-services.publicGrafana = false; 107 | services.nginx.virtualHosts."monitoring.${globals.domain}".locations."/p" = { 108 | root = ../static/pool-metadata; 109 | }; 110 | }; 111 | 112 | } 113 | -------------------------------------------------------------------------------- /examples/shelley-testnet/scripts/submit-update-proposal.sh: -------------------------------------------------------------------------------- 1 | set -euo pipefail 2 | # TODO: Wait till the network starts 3 | # SLOT_NO=`cardano-cli query tip --testnet-magic 42 | jq ".slotNo"` 4 | # while [ ] 5 | 6 | # Payment key to pay for the different transactions in this script. 7 | UTXO=keys/utxo 8 | DELEGATE=keys/delegate 9 | FEE=0 10 | TTL=1000000 11 | 12 | PROPOSAL_FILE=update.proposal 13 | 14 | # Wait till the beginning of a new epoch and set that epoch as $PROPOSAL_EPOCH 15 | SLOT_NO=`cardano-cli query tip --testnet-magic 42 | jq ".slotNo"` 16 | while [ -z "${SLOT_NO-}" ]; do 17 | SLOT_NO=`cardano-cli query tip --testnet-magic 42 | jq ".slotNo"` 18 | sleep 10 19 | done 20 | 21 | [ -z "${SLOT_NO=:-}" ] && echo "SLOT_NO is not set or is empty" || echo "SLOT_NO is set to $SLOT_NO" 22 | 23 | CURRENT_EPOCH=$((SLOT_NO / EPOCH_LENGTH)) 24 | PROPOSAL_EPOCH=$((CURRENT_EPOCH + 1)) 25 | echo "Waiting on epoch change for submitting the proposal" 26 | echo "Current epoch: $CURRENT_EPOCH, proposal epoch: $PROPOSAL_EPOCH" 27 | while [ "$CURRENT_EPOCH" -lt "$PROPOSAL_EPOCH" ]; do 28 | sleep 5 29 | SLOT_NO=`cardano-cli query tip --testnet-magic 42 | jq ".slotNo"` 30 | CURRENT_EPOCH=$((SLOT_NO / EPOCH_LENGTH)) 31 | echo -ne "Current slot: $SLOT_NO, epoch will change on slot $((EPOCH_LENGTH*PROPOSAL_EPOCH))\r" 32 | done 33 | echo 34 | 35 | GENESIS=keys/genesis 36 | D_PARAM=0.59 37 | cardano-cli governance create-update-proposal \ 38 | --epoch $PROPOSAL_EPOCH \ 39 | --decentralization-parameter $D_PARAM \ 40 | --out-file $PROPOSAL_FILE \ 41 | --genesis-verification-key-file $GENESIS.vkey 42 | 43 | 44 | INITIAL_ADDR=initial.addr 45 | # Get the initial address, which will be used as input by the transaction that 46 | # submits the update proposal. 47 | cardano-cli genesis initial-addr \ 48 | --testnet-magic 42 \ 49 | --verification-key-file $UTXO.vkey > $INITIAL_ADDR 50 | 51 | TX_INFO=/tmp/tx-info.json 52 | # Build a transaction that contains the update proposal 53 | cardano-cli query utxo --testnet-magic 42 --shelley-mode\ 54 | --address $(cat $INITIAL_ADDR) \ 55 | --out-file $TX_INFO 56 | TX_IN=`grep -oP '"\K[^"]+' -m 1 $TX_INFO | head -1 | tr -d '\n'` 57 | 58 | cardano-cli query utxo --testnet-magic 42 --shelley-mode \ 59 | --address $(cat initial.addr) \ 60 | --out-file /tmp/balance.json 61 | BALANCE=`jq '.[].amount' /tmp/balance.json | xargs printf '%.0f\n'` 62 | CHANGE=`expr $BALANCE - $FEE` 63 | 64 | cardano-cli transaction build-raw \ 65 | --tx-in $TX_IN \ 66 | --tx-out $(cat $INITIAL_ADDR)+$CHANGE \ 67 | --ttl $TTL \ 68 | --fee $FEE \ 69 | --update-proposal-file $PROPOSAL_FILE \ 70 | --out-file tx.raw 71 | cardano-cli transaction sign \ 72 | --tx-body-file tx.raw \ 73 | --signing-key-file $UTXO.skey \ 74 | --signing-key-file $DELEGATE.skey \ 75 | --testnet-magic 42 \ 76 | --out-file tx.signed 77 | cardano-cli transaction submit \ 78 | --tx-file tx.signed \ 79 | --testnet-magic 42 \ 80 | --shelley-mode 81 | 82 | SLOT_NO=`cardano-cli query tip --testnet-magic 42 | jq ".slotNo"` 83 | CURRENT_EPOCH=`expr $SLOT_NO / $EPOCH_LENGTH` 84 | ACTIVATION_EPOCH=`expr $PROPOSAL_EPOCH + 1` 85 | echo "Waiting for the proposal to become active." 86 | echo "Current epoch: $CURRENT_EPOCH, proposal active on epoch $ACTIVATION_EPOCH" 87 | while [ "$CURRENT_EPOCH" -lt "$ACTIVATION_EPOCH" ]; do 88 | sleep 1 89 | SLOT_NO=`cardano-cli query tip --testnet-magic 42 | jq ".slotNo"` 90 | CURRENT_EPOCH=`expr $SLOT_NO / $EPOCH_LENGTH` 91 | echo -ne "Current slot: $SLOT_NO, epoch will change on slot $((EPOCH_LENGTH*(CURRENT_EPOCH+1)))\r" 92 | done 93 | 94 | echo 95 | 96 | CURRENT_D_PARAM=`cardano-cli query protocol-parameters --testnet-magic 42 --shelley-mode | jq '.decentralisationParam'` 97 | 98 | if [ "$CURRENT_D_PARAM" = "$D_PARAM" ]; 99 | then 100 | echo "Decentralization parameter successfully changed." 101 | else 102 | echo "Decentralization parameter was not changed." 103 | echo "Current decentralization parameter: $CURRENT_D_PARAM" 104 | echo "Expected decentralization parameter: $D_PARAM " 105 | exit 1 106 | fi 107 | -------------------------------------------------------------------------------- /globals-mainnet.nix: -------------------------------------------------------------------------------- 1 | pkgs: { 2 | 3 | deploymentName = "mainnet"; 4 | 5 | dnsZone = "${pkgs.globals.domain}"; 6 | 7 | domain = "cardano-mainnet.iohk.io"; 8 | 9 | # Override relaysNew with the legacy FQDN since the 8.7.2 10 | # iohk-nix pin contains an updated backbone CNAME reference 11 | relaysNew = "relays-new.cardano-mainnet.iohk.io"; 12 | 13 | # Base line number of cardano-node instance per relay. 14 | # Override the default machine sized calculation with a fixed value of 1 15 | # for mainnet legacy relay scale down. 16 | nbInstancesPerRelay = 1; 17 | 18 | # Explorer gateway and backends have moved to ci-world 19 | withExplorer = false; 20 | explorerHostName = "explorer.cardano.org"; 21 | explorerForceSSL = true; 22 | explorerAliases = [ "explorer.mainnet.cardano.org" "explorer.${pkgs.globals.domain}" ]; 23 | explorerBackends = { 24 | a = pkgs.globals.explorer13; 25 | b = pkgs.globals.explorer13; 26 | c = pkgs.globals.explorer13; 27 | }; 28 | 29 | explorerActiveBackends = [ 30 | "a" 31 | "b" 32 | "c" 33 | ]; 34 | 35 | withHighCapacityMonitoring = true; 36 | withHighCapacityExplorer = true; 37 | withHighLoadRelays = true; 38 | withSmash = true; 39 | withSnapshots = false; 40 | 41 | withMetadata = false; 42 | metadataHostName = "tokens.cardano.org"; 43 | 44 | initialPythonExplorerDBSyncDone = true; 45 | 46 | environmentName = "mainnet"; 47 | 48 | topology = import ./topologies/mainnet.nix pkgs; 49 | 50 | maxRulesPerSg = { 51 | IOHK = 61; 52 | Emurgo = 36; 53 | CF = 36; 54 | }; 55 | 56 | minMemoryPerInstance = 15; 57 | 58 | # GB per node instance 59 | nodeDbDiskAllocationSize = 140; 60 | 61 | ec2 = { 62 | credentials = { 63 | accessKeyIds = { 64 | IOHK = "mainnet-iohk"; 65 | Emurgo = "mainnet-emurgo"; 66 | CF = "mainnet-cf"; 67 | dns = "mainnet-iohk"; 68 | }; 69 | }; 70 | instances = with pkgs.iohk-ops-lib.physical.aws; { 71 | core-node = r5-large; 72 | }; 73 | }; 74 | 75 | relayUpdateArgs = "-m 2400 --maxNodes 11 -s -e devops@iohk.io"; 76 | # Trigger relay topology refresh 12 hours before next epoch 77 | relayUpdateHoursBeforeNextEpoch = 12; 78 | 79 | snapshotStatesArgs = "-e devops@iohk.io"; 80 | 81 | alertChainDensityLow = "85"; 82 | 83 | snapshotStatesS3Bucket = "update-cardano-mainnet.iohk.io"; 84 | 85 | smashDelistedPools = [ 86 | "413b0496a93ff4ef5d7436828e9764d37778d74d60a62451cfbed057" 87 | "ce2e5bbae0caa514670d63cfdad3123a5d32cf7c37df87add5a0f75f" 88 | "2b830258888a09e846b63474c642ad4e18aecd08dafb1f2a4d653e80" 89 | "027a08f49ad5ece08e3a1575fb9cd8e8d7cf3b7815807a20b1a715f1" 90 | "4eb1fac09251f8af19ad6b7e06b71cbad09dbe896b481e4670fe565d" 91 | "bf44d3187cbdd8874dca1f714a6107beea642753228490bc02c8e038" 92 | "00429f0a3e8c48d644a9b45babd09b86c367efe745a35b31f10e859f" 93 | "8bc067247b8a85500d40d7bb78afd4de6a5fed2cfcc82c9b9c2fa8a2" 94 | "e7e18f2050fa307fc9405f1d517760e894f8fbdf41a9b1b280571b38" 95 | "27f4e3c309659f824026893b811dd6e70332881867cb2cba4974191c" 96 | "c73186434c6fc6676bd67304d34518fc6fd7d5eaddaf78641b1e7dcf" 97 | "2064da38531dad327135edd98003032cefa059c4c8c50c2b0440c63d" 98 | "d9df218f8099261e019bdd304b9a40228070ce61272af835ea13d161" 99 | "d7d56e1703630780176cf944a77b7829b4ba97888fa9a32468011985" 100 | "82e5cb6e4b443c36b087e6218a5629291585d35083ce2cb625506e1f" 101 | "0e76c44520b9d7f2e211eccd82de49350288368802c7aaa72a13c3fa" 102 | "d471e981d54a7f60496f9239d2d706db7a71df8517025f478c112e3e" 103 | "f537b3a5ac2ecdc854a535a15f7732632375a0bf2af17dccbe5b422d" 104 | "033fa1cdc17193fa3d549e795591999621e749fd7ef48f7380468d14" 105 | "47e694d52e08b1a65636c07911e7dd4282afbc555bccfda22c3c52f0" 106 | "8e2f5c1e8ca0f70f00a9b17de911af716678f9e2b653728f356c7ef6" 107 | "57ff19460990690bc6a2edfae4dbaaa56ce2fedbcec9b37334d33a1c" 108 | "2d6765748cc86efe862f5abeb0c0271f91d368d300123ecedc078ef2" 109 | "58b5c7e14957e2f32e988440b0e353ff939dc1597f5c8a4674b53d47" 110 | "93932f14ee3117ba4ac119f9f7722f1143d02760cd5328955804ea36" 111 | "40efc97d08315ff9be5898f24af5b8b120669b43027662c3499dd785" 112 | "ae8dbaaa4ebfdba74618653a619d28d58232638ac83ccb5d66edee36" 113 | "78e07590b2a28ca0e90602fb9319e11770b516ec34387c663ebda287" 114 | "718268428577002a004ef37ef62933c21d82e41cd7816da381716291" 115 | ]; 116 | } 117 | -------------------------------------------------------------------------------- /modules/grafana/cardano/systemd-service-restarts.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": "-- Grafana --", 7 | "enable": true, 8 | "hide": true, 9 | "iconColor": "rgba(0, 211, 255, 1)", 10 | "name": "Annotations & Alerts", 11 | "type": "dashboard" 12 | } 13 | ] 14 | }, 15 | "description": "Shows systemd service restarts for a selected node", 16 | "editable": true, 17 | "gnetId": null, 18 | "graphTooltip": 0, 19 | "id": 29, 20 | "iteration": 1580327126583, 21 | "links": [], 22 | "panels": [ 23 | { 24 | "aliasColors": {}, 25 | "bars": false, 26 | "dashLength": 10, 27 | "dashes": false, 28 | "datasource": "prometheus", 29 | "description": "Shows how many times cardano related services have been restarted within a 1 hour lookback window", 30 | "fill": 1, 31 | "fillGradient": 0, 32 | "gridPos": { 33 | "h": 15, 34 | "w": 24, 35 | "x": 0, 36 | "y": 0 37 | }, 38 | "id": 2, 39 | "interval": "", 40 | "legend": { 41 | "avg": false, 42 | "current": false, 43 | "max": false, 44 | "min": false, 45 | "show": true, 46 | "total": false, 47 | "values": false 48 | }, 49 | "lines": true, 50 | "linewidth": 1, 51 | "nullPointMode": "null", 52 | "options": { 53 | "dataLinks": [] 54 | }, 55 | "percentage": false, 56 | "pointradius": 2, 57 | "points": false, 58 | "renderer": "flot", 59 | "seriesOverrides": [], 60 | "spaceLength": 10, 61 | "stack": false, 62 | "steppedLine": false, 63 | "targets": [ 64 | { 65 | "expr": "round(increase(node_systemd_unit_state{alias=\"$ALIAS\",state=\"active\"}[1h:10s])) > 0", 66 | "hide": false, 67 | "interval": "", 68 | "legendFormat": "{{alias}}-{{name}}", 69 | "refId": "A" 70 | } 71 | ], 72 | "thresholds": [], 73 | "timeFrom": null, 74 | "timeRegions": [], 75 | "timeShift": null, 76 | "title": "Systemd Service Restarts Per Hour", 77 | "tooltip": { 78 | "shared": true, 79 | "sort": 0, 80 | "value_type": "individual" 81 | }, 82 | "type": "graph", 83 | "xaxis": { 84 | "buckets": null, 85 | "mode": "time", 86 | "name": null, 87 | "show": true, 88 | "values": [] 89 | }, 90 | "yaxes": [ 91 | { 92 | "format": "none", 93 | "label": null, 94 | "logBase": 1, 95 | "max": null, 96 | "min": null, 97 | "show": true 98 | }, 99 | { 100 | "format": "short", 101 | "label": null, 102 | "logBase": 1, 103 | "max": null, 104 | "min": null, 105 | "show": true 106 | } 107 | ], 108 | "yaxis": { 109 | "align": false, 110 | "alignLevel": null 111 | } 112 | } 113 | ], 114 | "schemaVersion": 20, 115 | "style": "dark", 116 | "tags": [], 117 | "templating": { 118 | "list": [ 119 | { 120 | "allValue": null, 121 | "current": { 122 | "text": "c-a-1-ip", 123 | "value": "c-a-1-ip" 124 | }, 125 | "datasource": "prometheus", 126 | "definition": "up", 127 | "hide": 0, 128 | "includeAll": false, 129 | "label": null, 130 | "multi": false, 131 | "name": "ALIAS", 132 | "options": [], 133 | "query": "up", 134 | "refresh": 2, 135 | "regex": "/.*alias=\"([^\"]*).*/", 136 | "skipUrlSync": false, 137 | "sort": 5, 138 | "tagValuesQuery": "", 139 | "tags": [], 140 | "tagsQuery": "", 141 | "type": "query", 142 | "useTags": false 143 | } 144 | ] 145 | }, 146 | "time": { 147 | "from": "now-6h", 148 | "to": "now" 149 | }, 150 | "timepicker": { 151 | "refresh_intervals": [ 152 | "5s", 153 | "10s", 154 | "30s", 155 | "1m", 156 | "5m", 157 | "15m", 158 | "30m", 159 | "1h", 160 | "2h", 161 | "1d" 162 | ] 163 | }, 164 | "timezone": "", 165 | "title": "Systemd Service Restarts", 166 | "uid": "3V_B7ryWk", 167 | "version": 1 168 | } 169 | -------------------------------------------------------------------------------- /physical/mock.nix: -------------------------------------------------------------------------------- 1 | { 2 | node-0 = { config, lib, pkgs, ... }: { 3 | config = { 4 | boot.kernelModules = []; 5 | networking = { 6 | extraHosts = '' 7 | 1.1.1.1 node-0 node-0-ip 8 | 1.1.1.1 node-1 node-1-ip 9 | 1.1.1.1 node-2 node-2-ip 10 | 1.1.1.1 explorer explorer-ip 11 | ''; 12 | firewall.trustedInterfaces = []; 13 | privateIPv4 = "1.1.1.1"; 14 | publicIPv4 = "1.1.1.1"; 15 | vpnPublicKey = ""; 16 | }; 17 | services.openssh.knownHosts = { 18 | explorer = { hostNames = ["explorer"]; publicKey = ""; }; 19 | }; 20 | system.stateVersion = ( lib.mkDefault "19.09" ); 21 | }; 22 | imports = [ 23 | { 24 | deployment.ec2 = { 25 | blockDeviceMapping = {}; 26 | instanceId = ""; 27 | }; 28 | ec2.hvm = true; 29 | imports = [ ]; 30 | } 31 | ]; 32 | }; 33 | node-1 = { config, lib, pkgs, ... }: { 34 | config = { 35 | boot.kernelModules = []; 36 | networking = { 37 | extraHosts = '' 38 | 1.1.1.1 node-0 node-0-ip 39 | 1.1.1.1 node-1 node-1-ip 40 | 1.1.1.1 node-2 node-2-ip 41 | 1.1.1.1 explorer explorer-ip 42 | ''; 43 | firewall.trustedInterfaces = []; 44 | privateIPv4 = "1.1.1.1"; 45 | publicIPv4 = "1.1.1.1"; 46 | vpnPublicKey = ""; 47 | }; 48 | services.openssh.knownHosts = { 49 | explorer = { hostNames = ["explorer"]; publicKey = ""; }; 50 | }; 51 | system.stateVersion = ( lib.mkDefault "19.09" ); 52 | }; 53 | imports = [ 54 | { 55 | deployment.ec2 = { 56 | blockDeviceMapping = {}; 57 | instanceId = ""; 58 | }; 59 | ec2.hvm = true; 60 | imports = [ ]; 61 | } 62 | ]; 63 | }; 64 | node-2 = { config, lib, pkgs, ... }: { 65 | config = { 66 | boot.kernelModules = []; 67 | networking = { 68 | extraHosts = '' 69 | 1.1.1.1 node-0 node-0-ip 70 | 1.1.1.1 node-1 node-1-ip 71 | 1.1.1.1 node-2 node-2-ip 72 | 1.1.1.1 explorer explorer-ip 73 | ''; 74 | firewall.trustedInterfaces = []; 75 | privateIPv4 = "1.1.1.1"; 76 | publicIPv4 = "1.1.1.1"; 77 | vpnPublicKey = ""; 78 | }; 79 | services.openssh.knownHosts = { 80 | explorer = { hostNames = ["explorer"]; publicKey = ""; }; 81 | }; 82 | system.stateVersion = ( lib.mkDefault "19.09" ); 83 | }; 84 | imports = [ 85 | { 86 | deployment.ec2 = { 87 | blockDeviceMapping = {}; 88 | instanceId = ""; 89 | }; 90 | ec2.hvm = true; 91 | imports = [ ]; 92 | } 93 | ]; 94 | }; 95 | explorer = { config, lib, pkgs, ... }: { 96 | config = { 97 | boot.kernelModules = []; 98 | networking = { 99 | extraHosts = '' 100 | 1.1.1.1 node-0 node-0-ip 101 | 1.1.1.1 node-1 node-1-ip 102 | 1.1.1.1 node-2 node-2-ip 103 | 1.1.1.1 explorer explorer-ip 104 | ''; 105 | firewall.trustedInterfaces = []; 106 | privateIPv4 = "1.1.1.1"; 107 | publicIPv4 = "1.1.1.1"; 108 | vpnPublicKey = ""; 109 | }; 110 | services.openssh.knownHosts = { 111 | explorer = { hostNames = ["explorer"]; publicKey = ""; }; 112 | }; 113 | system.stateVersion = ( lib.mkDefault "21.09" ); 114 | }; 115 | imports = [ 116 | { 117 | deployment.ec2 = { 118 | blockDeviceMapping = {}; 119 | instanceId = ""; 120 | }; 121 | ec2.hvm = true; 122 | imports = [ ]; 123 | } 124 | ]; 125 | }; 126 | resources = { 127 | ec2SecurityGroups = { 128 | "allow-deployer-ssh-ap-southeast-2-IOHK" = { config, lib, pkgs, ... }: { 129 | config = {}; 130 | imports = [ { groupId = ""; } ]; 131 | }; 132 | }; 133 | elasticIPs = { 134 | "node-0-ip" = { config, lib, pkgs, ... }: { 135 | config = {}; 136 | imports = [ { address = "1.1.1.1"; } ]; 137 | }; 138 | "node-1-ip" = { config, lib, pkgs, ... }: { 139 | config = {}; 140 | imports = [ { address = "1.1.1.1"; } ]; 141 | }; 142 | "node-2-ip" = { config, lib, pkgs, ... }: { 143 | config = {}; 144 | imports = [ { address = "1.1.1.1"; } ]; 145 | }; 146 | "explorer-ip" = { config, lib, pkgs, ... }: { 147 | config = {}; 148 | imports = [ { address = "1.1.1.1"; } ]; 149 | }; 150 | }; 151 | }; 152 | } 153 | -------------------------------------------------------------------------------- /roles/core.nix: -------------------------------------------------------------------------------- 1 | 2 | pkgs: nodeId: {config, name, ...}: 3 | with pkgs; 4 | let 5 | 6 | signingKey = ../keys/byron/delegate-keys + ".${leftPad (nodeId - 1) 3}.key"; 7 | delegationCertificate = ../keys/byron/delegation-cert + ".${leftPad (nodeId - 1) 3}.json"; 8 | 9 | vrfKey = ../keys/node-keys/node-vrf + "${toString nodeId}.skey"; 10 | kesKey = ../keys/node-keys/node-kes + "${toString nodeId}.skey"; 11 | operationalCertificate = ../keys/node-keys/node + "${toString nodeId}.opcert"; 12 | bulkCredentials = ../keys/node-keys/bulk + "${toString nodeId}.creds"; 13 | 14 | keysConfig = rec { 15 | RealPBFT = { 16 | _file = ./core.nix; 17 | services.cardano-node = { 18 | signingKey = "/var/lib/keys/cardano-node-signing"; 19 | delegationCertificate = "/var/lib/keys/cardano-node-delegation-cert"; 20 | }; 21 | systemd.services."cardano-node" = { 22 | after = [ "cardano-node-signing-key.service" "cardano-node-delegation-cert-key.service" ]; 23 | wants = [ "cardano-node-signing-key.service" "cardano-node-delegation-cert-key.service" ]; 24 | }; 25 | deployment.keys = { 26 | "cardano-node-signing" = builtins.trace ("${name}: using " + (toString signingKey)) { 27 | keyFile = signingKey; 28 | user = "cardano-node"; 29 | group = "cardano-node"; 30 | destDir = "/var/lib/keys"; 31 | }; 32 | "cardano-node-delegation-cert" = builtins.trace ("${name}: using " + (toString delegationCertificate)) { 33 | keyFile = delegationCertificate; 34 | user = "cardano-node"; 35 | group = "cardano-node"; 36 | destDir = "/var/lib/keys"; 37 | }; 38 | }; 39 | }; 40 | TPraos = 41 | { 42 | _file = ./core.nix; 43 | 44 | services.cardano-node = 45 | if config.node.roles.isCardanoDensePool 46 | then { 47 | extraArgs = [ "--bulk-credentials-file" "/var/lib/keys/cardano-node-bulk-credentials" ]; 48 | } 49 | else { 50 | kesKey = "/var/lib/keys/cardano-node-kes-signing"; 51 | vrfKey = "/var/lib/keys/cardano-node-vrf-signing"; 52 | operationalCertificate = "/var/lib/keys/cardano-node-operational-cert"; 53 | }; 54 | 55 | systemd.services."cardano-node" = 56 | if config.node.roles.isCardanoDensePool 57 | then { 58 | after = [ "cardano-node-bulk-credentials-key.service" ]; 59 | wants = [ "cardano-node-bulk-credentials-key.service" ]; 60 | partOf = [ "cardano-node-bulk-credentials-key.service" ]; 61 | } 62 | else { 63 | after = [ "cardano-node-vrf-signing-key.service" "cardano-node-kes-signing-key.service" "cardano-node-operational-cert-key.service" ]; 64 | wants = [ "cardano-node-vrf-signing-key.service" "cardano-node-kes-signing-key.service" "cardano-node-operational-cert-key.service" ]; 65 | partOf = [ "cardano-node-vrf-signing-key.service" "cardano-node-kes-signing-key.service" "cardano-node-operational-cert-key.service" ]; 66 | }; 67 | 68 | deployment.keys = 69 | if config.node.roles.isCardanoDensePool 70 | then { 71 | "cardano-node-bulk-credentials" = builtins.trace ("${name}: using " + (toString bulkCredentials)) { 72 | keyFile = bulkCredentials; 73 | user = "cardano-node"; 74 | group = "cardano-node"; 75 | destDir = "/var/lib/keys"; 76 | }; 77 | } 78 | else { 79 | "cardano-node-vrf-signing" = builtins.trace ("${name}: using " + (toString vrfKey)) { 80 | keyFile = vrfKey; 81 | user = "cardano-node"; 82 | group = "cardano-node"; 83 | destDir = "/var/lib/keys"; 84 | }; 85 | "cardano-node-kes-signing" = builtins.trace ("${name}: using " + (toString kesKey)) { 86 | keyFile = kesKey; 87 | user = "cardano-node"; 88 | group = "cardano-node"; 89 | destDir = "/var/lib/keys"; 90 | }; 91 | "cardano-node-operational-cert" = builtins.trace ("${name}: using " + (toString operationalCertificate)) { 92 | keyFile = operationalCertificate; 93 | user = "cardano-node"; 94 | group = "cardano-node"; 95 | destDir = "/var/lib/keys"; 96 | }; 97 | }; 98 | }; 99 | Cardano = 100 | if !(builtins.pathExists signingKey) then TPraos 101 | else if !(builtins.pathExists vrfKey) then RealPBFT 102 | else lib.recursiveUpdate TPraos RealPBFT; 103 | }; 104 | 105 | in { 106 | 107 | imports = [ 108 | cardano-ops.modules.base-service 109 | keysConfig.${globals.environmentConfig.nodeConfig.Protocol} 110 | ]; 111 | 112 | users.users.cardano-node.extraGroups = [ "keys" ]; 113 | 114 | deployment.ec2.ebsInitialRootDiskSize = globals.systemDiskAllocationSize 115 | + globals.nodeDbDiskAllocationSize; 116 | 117 | } 118 | -------------------------------------------------------------------------------- /roles/faucet.nix: -------------------------------------------------------------------------------- 1 | pkgs: { name, config, nodes, resources, ... }: 2 | with pkgs; 3 | let 4 | faucetPkgs = (import (sourcePaths.cardano-faucet + "/nix") {}); 5 | walletPackages = import sourcePaths.cardano-wallet { gitrev = sourcePaths.cardano-wallet.rev; }; 6 | inherit (walletPackages) cardano-wallet; 7 | cardanoNodePackages = getCardanoNodePackages walletPackages.private.project.hsPkgs.cardano-node.src; 8 | inherit (pkgs.lib) mkIf; 9 | in { 10 | 11 | imports = [ 12 | cardano-ops.modules.base-service 13 | 14 | # Cardano faucet needs to pair a compatible version of wallet with node 15 | # The following service import will do this: 16 | (sourcePaths.cardano-faucet + "/nix/nixos/cardano-faucet-service.nix") 17 | ]; 18 | 19 | networking.firewall.allowedTCPPorts = [ 20 | 80 21 | 443 22 | ]; 23 | 24 | environment.systemPackages = with pkgs; [ 25 | sqlite-interactive 26 | ]; 27 | 28 | services.cardano-faucet = { 29 | enable = true; 30 | cardanoEnv = globals.environmentName; 31 | cardanoEnvAttrs = globals.environmentConfig; 32 | walletPackage = cardano-wallet; 33 | }; 34 | 35 | services.cardano-node = { 36 | inherit cardanoNodePackages; 37 | allProducers = if (globals.topology.relayNodes != []) 38 | then [ globals.relaysNew ] 39 | else (map (n: n.name) globals.topology.coreNodes); 40 | topology = lib.mkForce null; 41 | totalMaxHeapSizeMbytes = 0.6 * config.node.memory * 1024; 42 | }; 43 | 44 | deployment.keys = { 45 | "faucet.mnemonic" = { 46 | keyFile = ../static + "/faucet.mnemonic"; 47 | destDir = "/var/lib/keys/"; 48 | user = "cardano-node"; 49 | permissions = "0400"; 50 | }; 51 | 52 | "faucet.passphrase" = { 53 | keyFile = ../static + "/faucet.passphrase"; 54 | destDir = "/var/lib/keys/"; 55 | user = "cardano-node"; 56 | permissions = "0400"; 57 | }; 58 | 59 | "faucet.recaptcha" = { 60 | keyFile = ../static + "/faucet.recaptcha"; 61 | destDir = "/var/lib/keys/"; 62 | user = "cardano-node"; 63 | permissions = "0400"; 64 | }; 65 | 66 | "faucet.apikey" = { 67 | keyFile = ../static + "/faucet.apikey"; 68 | destDir = "/var/lib/keys/"; 69 | user = "cardano-node"; 70 | permissions = "0400"; 71 | }; 72 | }; 73 | users.users.cardano-node.extraGroups = [ "keys" ]; 74 | 75 | security.acme = mkIf (config.deployment.targetEnv != "libvirtd") { 76 | email = "devops@iohk.io"; 77 | acceptTerms = true; # https://letsencrypt.org/repository/ 78 | }; 79 | services.nginx = { 80 | enable = true; 81 | recommendedTlsSettings = true; 82 | recommendedOptimisation = true; 83 | recommendedGzipSettings = true; 84 | recommendedProxySettings = true; 85 | serverTokens = false; 86 | mapHashBucketSize = 128; 87 | 88 | commonHttpConfig = '' 89 | log_format x-fwd '$remote_addr - $remote_user [$time_local] ' 90 | '"$request" $status $body_bytes_sent ' 91 | '"$http_referer" "$http_user_agent" "$http_x_forwarded_for"'; 92 | 93 | access_log syslog:server=unix:/dev/log x-fwd; 94 | 95 | limit_req_zone $binary_remote_addr zone=faucetPerIP:100m rate=1r/s; 96 | limit_req_status 429; 97 | server_names_hash_bucket_size 128; 98 | 99 | map $http_origin $origin_allowed { 100 | default 0; 101 | https://testnets.cardano.org 1; 102 | https://developers.cardano.org 1; 103 | https://staging-testnets-cardano.netlify.app 1; 104 | http://localhost:8000 1; 105 | } 106 | 107 | map $origin_allowed $origin { 108 | default ""; 109 | 1 $http_origin; 110 | } 111 | ''; 112 | 113 | virtualHosts = { 114 | "${name}.${globals.domain}" = { 115 | forceSSL = config.deployment.targetEnv != "libvirtd"; 116 | enableACME = config.deployment.targetEnv != "libvirtd"; 117 | 118 | locations."/" = { 119 | extraConfig = let 120 | headers = '' 121 | add_header 'Vary' 'Origin' always; 122 | add_header 'Access-Control-Allow-Origin' $origin always; 123 | add_header 'Access-Control-Allow-Methods' 'POST, OPTIONS' always; 124 | add_header 'Access-Control-Allow-Headers' 'User-Agent,X-Requested-With,Content-Type' always; 125 | ''; 126 | in '' 127 | limit_req zone=faucetPerIP; 128 | 129 | if ($request_method = OPTIONS) { 130 | ${headers} 131 | add_header 'Access-Control-Max-Age' 1728000; 132 | add_header 'Content-Type' 'text/plain; charset=utf-8'; 133 | add_header 'Content-Length' 0; 134 | return 204; 135 | break; 136 | } 137 | 138 | if ($request_method = POST) { 139 | ${headers} 140 | } 141 | 142 | proxy_pass http://127.0.0.1:${ 143 | toString config.services.cardano-faucet.faucetListenPort 144 | }; 145 | proxy_set_header Host $host:$server_port; 146 | proxy_set_header X-Real-IP $remote_addr; 147 | ''; 148 | }; 149 | }; 150 | }; 151 | }; 152 | } 153 | -------------------------------------------------------------------------------- /examples/shelley-testnet/scripts/register-stake-pool.sh: -------------------------------------------------------------------------------- 1 | set -euo pipefail 2 | 3 | # A stakepool node needs: 4 | # 1. A cold key pair 5 | # 2. A VRF key pair 6 | # 3. A KES key pair 7 | # 4. An operational certificate 8 | 9 | UTXO=keys/utxo 10 | STAKE=stake 11 | PAYMENT_ADDR=payment.addr 12 | COLD=keys/cold 13 | VRF=keys/node-vrf 14 | 15 | # This script assumes the fee to be 0. We might want to check the protocol 16 | # parameters to make sure that this is indeed the case. 17 | FEE=0 18 | # We use a large time-to-live to keep the script simple. 19 | TTL=1000000 20 | 21 | # Create a new stake key pair 22 | cardano-cli stake-address key-gen \ 23 | --verification-key-file $STAKE.vkey \ 24 | --signing-key-file $STAKE.skey 25 | # Use these keys to create a payment address. This key should have funds 26 | # associated to it if we want the stakepool to have stake delegated to it. 27 | cardano-cli address build \ 28 | --payment-verification-key-file $UTXO.vkey \ 29 | --stake-verification-key-file $STAKE.vkey \ 30 | --out-file $PAYMENT_ADDR \ 31 | --testnet-magic 42 32 | 33 | # Register the stake address on the blockchain 34 | cardano-cli stake-address registration-certificate \ 35 | --stake-verification-key-file $STAKE.vkey \ 36 | --out-file $STAKE.cert 37 | INITIAL_ADDR=initial.addr 38 | # Get the initial address from which we will transfer the funds 39 | cardano-cli genesis initial-addr \ 40 | --testnet-magic 42 \ 41 | --verification-key-file $UTXO.vkey > $INITIAL_ADDR 42 | # Check the balance on the initial address so that we can submit different 43 | # transactions. 44 | TX_INFO=/tmp/tx-info.json 45 | cardano-cli query utxo --testnet-magic 42 --shelley-mode \ 46 | --address $(cat $INITIAL_ADDR) \ 47 | --out-file $TX_INFO 48 | BALANCE=`jq '.[].amount' $TX_INFO | xargs printf '%.0f\n'` 49 | TX_IN=`grep -oP '"\K[^"]+' -m 1 $TX_INFO | head -1 | tr -d '\n'` 50 | CHANGE=`expr $BALANCE - $FEE` 51 | 52 | cardano-cli transaction build-raw \ 53 | --tx-in $TX_IN \ 54 | --tx-out $(cat $INITIAL_ADDR)+$CHANGE \ 55 | --ttl $TTL \ 56 | --fee $FEE \ 57 | --out-file tx.raw \ 58 | --certificate-file $STAKE.cert 59 | cardano-cli transaction sign \ 60 | --tx-body-file tx.raw \ 61 | --signing-key-file $UTXO.skey \ 62 | --signing-key-file $STAKE.skey \ 63 | --testnet-magic 42 \ 64 | --out-file tx.signed 65 | cardano-cli transaction submit \ 66 | --tx-file tx.signed \ 67 | --testnet-magic 42 \ 68 | --shelley-mode 69 | 70 | METADATA_FILE=pool-metadata.json 71 | echo '{ 72 | "name": "PriviPool", 73 | "description": "Priviledge Pool", 74 | "ticker": "TEST", 75 | "homepage": "https://ppp" 76 | }' > $METADATA_FILE 77 | # Get the hash of the file: 78 | METADATA_HASH=`cardano-cli stake-pool metadata-hash --pool-metadata-file pool-metadata.json` 79 | 80 | # Pledge amount in Lovelace 81 | PLEDGE=1000000 82 | # Pool cost per-epoch in Lovelace 83 | COST=1000 84 | # Pool cost per epoch in percentage 85 | MARGIN=0.1 86 | POOL_REGISTRATION_CERT=pool-registration.cert 87 | # Create the registration certificate 88 | cardano-cli stake-pool registration-certificate \ 89 | --cold-verification-key-file $COLD.vkey \ 90 | --vrf-verification-key-file $VRF.vkey \ 91 | --pool-pledge $PLEDGE \ 92 | --pool-cost $COST \ 93 | --pool-margin $MARGIN \ 94 | --pool-reward-account-verification-key-file $STAKE.vkey \ 95 | --pool-owner-stake-verification-key-file $STAKE.vkey \ 96 | --testnet-magic 42 \ 97 | --metadata-url file://$METADATA_FILE \ 98 | --metadata-hash $METADATA_HASH \ 99 | --out-file $POOL_REGISTRATION_CERT 100 | 101 | # Generate a delegation certificate pledge 102 | DELEGATION_CERT=delegation.cert 103 | cardano-cli stake-address delegation-certificate \ 104 | --stake-verification-key-file $STAKE.vkey \ 105 | --cold-verification-key-file $COLD.vkey \ 106 | --out-file $DELEGATION_CERT 107 | 108 | # Wait a bit before querying the UTxO set... 109 | sleep 5 110 | 111 | # Registering a stake pool requires a deposit, which is specified in the 112 | # genesis file. Here we assume the deposit is 0. 113 | POOL_DEPOSIT=0 114 | cardano-cli query utxo --testnet-magic 42 --shelley-mode \ 115 | --address $(cat $INITIAL_ADDR) \ 116 | --out-file $TX_INFO 117 | BALANCE=`jq '.[].amount' $TX_INFO | xargs printf '%.0f\n'` 118 | TX_IN=`grep -oP '"\K[^"]+' -m 1 $TX_INFO | head -1 | tr -d '\n'` 119 | CHANGE=`expr $BALANCE - $POOL_DEPOSIT - $FEE` 120 | 121 | # Create, sign, and submit the transaction 122 | cardano-cli transaction build-raw \ 123 | --tx-in $TX_IN \ 124 | --tx-out $(cat $PAYMENT_ADDR)+$CHANGE \ 125 | --ttl $TTL \ 126 | --fee $FEE \ 127 | --out-file tx.raw \ 128 | --certificate-file $POOL_REGISTRATION_CERT \ 129 | --certificate-file $DELEGATION_CERT 130 | cardano-cli transaction sign \ 131 | --tx-body-file tx.raw \ 132 | --signing-key-file $UTXO.skey \ 133 | --signing-key-file $STAKE.skey \ 134 | --signing-key-file $COLD.skey \ 135 | --testnet-magic 42 \ 136 | --out-file tx.signed 137 | cardano-cli transaction submit \ 138 | --tx-file tx.signed \ 139 | --testnet-magic 42 \ 140 | --shelley-mode 141 | -------------------------------------------------------------------------------- /examples/shelley-testnet/topology.nix: -------------------------------------------------------------------------------- 1 | # Topology file for a simple testnet consisting of BFT nodes and stakepool nodes. 2 | # 3 | # See attributes `bftNodeRegionNames` and `poolRegionNames` to understand how 4 | # to customize the number of nodes in the network, and the regions in which 5 | # they are deployed. 6 | # 7 | # * Debugging the topology 8 | # 9 | # You can use `nix eval` to query the different attributes of the topology and 10 | # check that their values match your expectations. 11 | # 12 | # > nix eval '(with import ./nix {}; with lib; map (x: x.name) globals.topology.coreNodes)' 13 | # 14 | pkgs: with pkgs; with lib; with topology-lib; 15 | let 16 | regions = { 17 | a = { name = "eu-central-1"; /* Europe (Frankfurt) */ }; 18 | b = { name = "us-east-2"; /* US East (Ohio) */ }; 19 | c = { name = "ap-southeast-1"; /* Asia Pacific (Singapore) */ }; 20 | d = { name = "eu-west-2"; /* Europe (London) */ }; 21 | }; 22 | bftCoreNodes = 23 | let 24 | # The region names will determine the number of BFT nodes. These names 25 | # should belong to `attrNames regions`. 26 | bftNodeRegionNames = [ "a" ]; 27 | # BFT node specifications, which will be used to create BFT nodes. 28 | bftNodeSpecs = 29 | genList 30 | (i: { region = builtins.elemAt bftNodeRegionNames i; 31 | org = "IOHK"; 32 | nodeId = i + 1; 33 | } 34 | ) 35 | (length bftNodeRegionNames); 36 | bftNodes = 37 | map defineKeys 38 | (fullyConnectNodes 39 | (map ({ region, org, nodeId}: mkBftCoreNode region 1 { inherit org nodeId;}) 40 | bftNodeSpecs 41 | )); 42 | defineKeys = x : 43 | x // { 44 | imports = [{ 45 | deployment.keys = { 46 | "utxo.vkey" = { 47 | keyFile = ../keys/utxo-keys + "/utxo${toString x.nodeId}.vkey"; 48 | destDir = "/root/keys"; 49 | }; 50 | "utxo.skey" = { 51 | keyFile = ../keys/utxo-keys + "/utxo${toString x.nodeId}.skey"; 52 | destDir = "/root/keys"; 53 | }; 54 | "delegate.vkey" = { 55 | keyFile = ../keys/delegate-keys + "/delegate${toString x.nodeId}.vkey"; 56 | destDir = "/root/keys"; 57 | }; 58 | "delegate.skey" = { 59 | keyFile = ../keys/delegate-keys + "/delegate${toString x.nodeId}.skey"; 60 | destDir = "/root/keys"; 61 | }; 62 | "genesis.vkey" = { 63 | keyFile = ../keys/genesis-keys + "/genesis${toString x.nodeId}.vkey"; 64 | destDir = "/root/keys"; 65 | }; 66 | "genesis.skey" = { 67 | keyFile = ../keys/genesis-keys + "/genesis${toString x.nodeId}.skey"; 68 | destDir = "/root/keys"; 69 | }; 70 | }; 71 | }]; 72 | }; 73 | in connectGroupWith (reverseList stakePoolNodes) bftNodes; 74 | 75 | relayNodes = []; 76 | 77 | stakePoolNodes = 78 | let 79 | # The region names determine the number of stake pools. These names 80 | # should belong to `attrNames regions`. 81 | poolRegionNames = [ "b" "c" "d" ]; 82 | # Stake pool specifications, which will be used to create stake pools. 83 | poolSpecs = 84 | genList 85 | (i: { region = builtins.elemAt poolRegionNames i; 86 | nodeName = "IOHK" + toString ((length bftCoreNodes) + i + 1); 87 | nodeId = (length bftCoreNodes) + i + 1; 88 | } 89 | ) 90 | (length poolRegionNames); 91 | pools = 92 | map defineKeys 93 | (fullyConnectNodes 94 | (map ({ region, nodeName, nodeId}: mkStakingPool region 1 nodeName { nodeId = nodeId;}) 95 | poolSpecs 96 | )); 97 | # We need to have the keys available in the node to be able to perform 98 | # tests. This is OK for tests, but do not store keys in the node in 99 | # production. 100 | defineKeys = x : 101 | x // { 102 | imports = [{ 103 | deployment.keys = { 104 | "utxo.vkey" = { 105 | keyFile = ../keys/utxo-keys + "/utxo${toString x.nodeId}.vkey"; 106 | destDir = "/root/keys"; 107 | }; 108 | "utxo.skey" = { 109 | keyFile = ../keys/utxo-keys + "/utxo${toString x.nodeId}.skey"; 110 | destDir = "/root/keys"; 111 | }; 112 | "cold.vkey" = { 113 | keyFile = ../keys/pool-keys + "/node${toString x.nodeId}-cold.vkey"; 114 | destDir = "/root/keys"; 115 | }; 116 | "cold.skey" = { 117 | keyFile = ../keys/pool-keys + "/node${toString x.nodeId}-cold.skey"; 118 | destDir = "/root/keys"; 119 | }; 120 | "node-vrf.vkey" = { 121 | keyFile = ../keys/node-keys + "/node-vrf${toString x.nodeId}.vkey"; 122 | destDir = "/root/keys"; 123 | }; 124 | "node-vrf.skey" = { 125 | keyFile = ../keys/node-keys + "/node-vrf${toString x.nodeId}.skey"; 126 | destDir = "/root/keys"; 127 | }; 128 | }; 129 | }]; 130 | }; 131 | in 132 | connectGroupWith bftCoreNodes pools; 133 | 134 | coreNodes = bftCoreNodes ++ stakePoolNodes; 135 | in { 136 | inherit bftCoreNodes stakePoolNodes coreNodes relayNodes regions; 137 | } 138 | -------------------------------------------------------------------------------- /nix/default.nix: -------------------------------------------------------------------------------- 1 | { system ? builtins.currentSystem 2 | , crossSystem ? null 3 | , config ? {} 4 | }: 5 | let 6 | defaultSourcePaths = import ./sources.nix { inherit pkgs; }; 7 | 8 | # use our own nixpkgs if it exists in our sources, 9 | # otherwise use iohkNix default nixpkgs. 10 | defaultNixpkgs = if (defaultSourcePaths ? nixpkgs) 11 | then defaultSourcePaths.nixpkgs 12 | else (import defaultSourcePaths.iohk-nix {}).nixpkgs; 13 | 14 | inherit (import defaultNixpkgs { inherit system; overlays = [globalsOverlay]; }) globals; 15 | 16 | sourcesOverride = let sourcesFile = globals.sourcesJsonOverride; in 17 | if (builtins.pathExists sourcesFile) 18 | then import ./sources.nix { inherit pkgs sourcesFile; } 19 | else {}; 20 | 21 | sourcePaths = defaultSourcePaths // sourcesOverride; 22 | 23 | iohkNix = import sourcePaths.iohk-nix {}; 24 | 25 | nixpkgs = if (sourcesOverride ? nixpkgs) then sourcesOverride.nixpkgs else defaultNixpkgs; 26 | 27 | # overlays from ops-lib (include ops-lib sourcePaths): 28 | ops-lib-overlays = (import sourcePaths.ops-lib { withRustOverlays = false; }).overlays; 29 | nginx-overlay = self: super: let 30 | acceptLanguage = { 31 | src = self.fetchFromGitHub { 32 | name = "nginx_accept_language_module"; 33 | owner = "giom"; 34 | repo = "nginx_accept_language_module"; 35 | rev = "2f69842f83dac77f7d98b41a2b31b13b87aeaba7"; 36 | sha256 = "1hjysrl15kh5233w7apq298cc2bp4q1z5mvaqcka9pdl90m0vhbw"; 37 | }; 38 | }; 39 | in rec { 40 | luajit = super.luajit.withPackages (ps: with ps; [cjson]); 41 | nginxExplorer = super.nginxStable.override (oldAttrs: { 42 | modules = oldAttrs.modules ++ [ 43 | acceptLanguage 44 | self.nginxModules.develkit 45 | self.nginxModules.lua 46 | ]; 47 | }); 48 | nginxSmash = super.nginxStable.override (oldAttrs: { 49 | modules = oldAttrs.modules ++ [ 50 | self.nginxModules.develkit 51 | self.nginxModules.lua 52 | ]; 53 | }); 54 | nginxMetadataServer = nginxSmash; 55 | }; 56 | 57 | varnish-overlay = self: super: { 58 | varnish70Packages = super.varnish70Packages // { 59 | varnish = super.varnish70Packages.varnish.overrideAttrs (oA: { 60 | # Work-around excessive malloc overhead (https://github.com/varnishcache/varnish-cache/issues/3511#issuecomment-773889001) 61 | buildInputs = oA.buildInputs ++ [ self.jemalloc ]; 62 | buildFlags = oA.buildFlags ++ [ "JEMALLOC_LDADD=${self.jemalloc}/lib/libjemalloc.so" ]; 63 | }); 64 | modules = (self.callPackages ../pkgs/varnish/modules.nix { varnish = self.varnish70Packages.varnish; }).modules19; 65 | }; 66 | varnish60Packages = super.varnish60Packages // { 67 | varnish = super.varnish60Packages.varnish.overrideAttrs (oA: { 68 | buildInputs = oA.buildInputs ++ [ self.jemalloc ]; 69 | buildFlags = oA.buildFlags ++ [ "JEMALLOC_LDADD=${self.jemalloc}/lib/libjemalloc.so" ]; 70 | }); 71 | modules = (self.callPackages ../pkgs/varnish/modules.nix { varnish = self.varnish60Packages.varnish; }).modules15; 72 | }; 73 | prometheus-varnish-exporter = super.prometheus-varnish-exporter.override { 74 | buildGoModule = args: self.buildGoModule (args // rec { 75 | version = "1.6.1"; 76 | src = self.fetchFromGitHub { 77 | owner = "jonnenauha"; 78 | repo = "prometheus_varnish_exporter"; 79 | rev = version; 80 | sha256 = "15w2ijz621caink2imlp1666j0ih5pmlj62cbzggyb34ncl37ifn"; 81 | }; 82 | vendorSha256 = "sha256-P2fR0U2O0Y4Mci9jkAMb05WR+PrpuQ59vbLMG5b9KQI="; 83 | }); 84 | }; 85 | }; 86 | 87 | traefik-overlay = self: super: { 88 | traefik = super.traefik.override { 89 | buildGoModule = args: self.buildGoModule (args // rec { 90 | version = "2.5.6"; 91 | src = self.fetchzip { 92 | url = "https://github.com/traefik/traefik/releases/download/v${version}/traefik-v${version}.src.tar.gz"; 93 | sha256 = "sha256-HHJTfAigUH7C0VuKUeGypqFlQwVdy05Ki/aTxDsl+tg="; 94 | stripRoot = false; 95 | }; 96 | vendorSha256 = "sha256-DqjqJPyoFlCjIIaHYS5jrROQWDxZk+RGfccC2jYZ8LE="; 97 | }); 98 | }; 99 | }; 100 | 101 | # our own overlays: 102 | local-overlays = [ 103 | (import ./cardano.nix) 104 | (import ./packages.nix) 105 | ]; 106 | 107 | globalsOverlay = 108 | if builtins.pathExists ../globals.nix 109 | then (pkgs: _: with pkgs.lib; let 110 | globalsDefault = import ../globals-defaults.nix pkgs; 111 | globalsSpecific = import ../globals.nix pkgs; 112 | in { 113 | globals = globalsDefault // (recursiveUpdate { 114 | inherit (globalsDefault) ec2 libvirtd environmentVariables; 115 | } globalsSpecific); 116 | }) 117 | else builtins.trace "globals.nix missing, please add symlink" (pkgs: _: { 118 | globals = import ../globals-defaults.nix pkgs; 119 | }); 120 | 121 | # merge upstream sources with our own: 122 | upstream-overlay = self: super: { 123 | inherit iohkNix; 124 | cardano-ops = { 125 | inherit overlays; 126 | modules = self.importWithPkgs ../modules; 127 | roles = self.importWithPkgs ../roles; 128 | }; 129 | sourcePaths = (super.sourcePaths or {}) // sourcePaths; 130 | }; 131 | 132 | overlays = 133 | ops-lib-overlays ++ 134 | local-overlays ++ 135 | [ 136 | upstream-overlay 137 | nginx-overlay 138 | varnish-overlay 139 | traefik-overlay 140 | globalsOverlay 141 | globals.overlay 142 | ]; 143 | 144 | pkgs = import nixpkgs { 145 | inherit system crossSystem config overlays; 146 | }; 147 | in 148 | pkgs 149 | -------------------------------------------------------------------------------- /roles/explorer-gateway.nix: -------------------------------------------------------------------------------- 1 | pkgs: { config, nodes, ... }: 2 | with pkgs; 3 | let backendAddr = let 4 | suffix = { 5 | ec2 = "-ip"; 6 | libvirtd = ""; 7 | packet = ""; 8 | }.${config.deployment.targetEnv}; 9 | in name: if (globals.explorerBackendsInContainers) 10 | then "${name}.containers" 11 | else "explorer-${name}${suffix}"; 12 | in { 13 | 14 | imports = [ 15 | cardano-ops.modules.common 16 | ]; 17 | 18 | environment.systemPackages = with pkgs; [ 19 | bat fd lsof netcat ncdu ripgrep tree vim dnsutils 20 | ]; 21 | 22 | networking.firewall.allowedTCPPorts = [ 80 443 8080 ]; 23 | 24 | services.traefik = { 25 | enable = true; 26 | 27 | staticConfigOptions = { 28 | api.dashboard = false; 29 | metrics.prometheus = { 30 | entryPoint = "metrics"; 31 | }; 32 | entryPoints = { 33 | web = { 34 | address = ":80"; 35 | http = { 36 | redirections = { 37 | entryPoint = { 38 | to = "websecure"; 39 | scheme = "https"; 40 | }; 41 | }; 42 | }; 43 | }; 44 | websecure = { 45 | address = ":443"; 46 | }; 47 | metrics = { 48 | address = ":${toString globals.cardanoExplorerGwPrometheusExporterPort}"; 49 | }; 50 | }; 51 | certificatesResolvers.default.acme = { 52 | email = "devops@iohk.io"; 53 | storage = "/var/lib/traefik/acme.json"; 54 | httpChallenge = { 55 | entryPoint = "web"; 56 | }; 57 | }; 58 | }; 59 | dynamicConfigOptions = { 60 | http = { 61 | routers = { 62 | #traefik = { 63 | # rule = (lib.concatStringsSep " || " 64 | # (map (a: "Host(`${a}`)") ([globals.explorerHostName] ++ globals.explorerAliases))) + " && (PathPrefix(`/api`) || PathPrefix(`/dashboard`))"; 65 | # service = "api@internal"; 66 | # tls.certResolver = "default"; 67 | #}; 68 | rosetta = { 69 | rule = (lib.concatStringsSep " || " 70 | (map (a: "Host(`${a}`)") ([globals.explorerHostName] ++ globals.explorerAliases))) + " && PathPrefix(`/rosetta`)"; 71 | service = "rosetta"; 72 | tls.certResolver = "default"; 73 | }; 74 | explorer = { 75 | rule = lib.concatStringsSep " || " 76 | (map (a: "Host(`${a}`)") ([globals.explorerHostName] ++ globals.explorerAliases)); 77 | service = "explorer"; 78 | tls.certResolver = "default"; 79 | }; 80 | smash = { 81 | rule = "Host(`smash.${globals.domain}`)"; 82 | service = "smash"; 83 | tls.certResolver = "default"; 84 | }; 85 | }; 86 | services = { 87 | rosetta = { 88 | loadBalancer = { 89 | servers = map (b: { 90 | url = "http://${backendAddr b}"; 91 | }) globals.explorerRosettaActiveBackends; 92 | }; 93 | }; 94 | explorer = { 95 | loadBalancer = { 96 | servers = map (b: { 97 | url = "http://${backendAddr b}"; 98 | }) globals.explorerActiveBackends; 99 | }; 100 | }; 101 | smash = { 102 | loadBalancer = { 103 | servers = map (b: { 104 | url = "http://${backendAddr b}:81"; 105 | }) globals.explorerActiveBackends; 106 | }; 107 | }; 108 | }; 109 | }; 110 | }; 111 | }; 112 | 113 | # Reduce default interval to allow for more restarts (5 in 1 min) before it fails 114 | systemd.services.traefik.startLimitIntervalSec = lib.mkForce 60; 115 | 116 | services.monitoring-exporters.extraPrometheusExporters = [ 117 | { 118 | job_name = "explorer-gateway-exporter"; 119 | scrape_interval = "10s"; 120 | metrics_path = "/metrics"; 121 | port = globals.cardanoExplorerGwPrometheusExporterPort; 122 | } 123 | ]; 124 | 125 | services.dnsmasq.enable = true; 126 | 127 | networking.nat = { 128 | enable = globals.explorerBackendsInContainers; 129 | internalInterfaces = [ "ve-+" ]; 130 | externalInterface = "ens5"; 131 | }; 132 | networking.firewall.trustedInterfaces = config.networking.nat.internalInterfaces; 133 | 134 | containers = lib.optionalAttrs globals.explorerBackendsInContainers (let 135 | indexes = lib.listToAttrs (lib.imap1 (lib.flip lib.nameValuePair) (lib.attrNames globals.explorerBackends)); 136 | in 137 | lib.mapAttrs (z: variant: let 138 | hostAddress = "192.168.100.${toString indexes.${z}}0"; 139 | localAddress = "192.168.100.${toString indexes.${z}}1"; 140 | in { 141 | privateNetwork = true; 142 | autoStart = true; 143 | inherit hostAddress localAddress; 144 | config = { 145 | _module.args = { 146 | name = "explorer-${z}"; 147 | nodes = nodes // { 148 | "explorer-${z}" = { 149 | config.networking.privateIPv4 = localAddress; 150 | options.networking.privateIPv4.isDefined = true; 151 | options.networking.publicIPv4.isDefined = false; 152 | }; 153 | }; 154 | resources = {}; 155 | }; 156 | nixpkgs.pkgs = pkgs; 157 | imports = [(cardano-ops.roles.explorer variant)]; 158 | networking.nameservers = [ hostAddress ]; 159 | node = { 160 | nodeId = config.node.nodeId + 1; 161 | memory = config.node.memory / (lib.length (lib.attrNames globals.explorerBackends)); 162 | }; 163 | }; 164 | }) globals.explorerBackends 165 | ); 166 | } 167 | -------------------------------------------------------------------------------- /modules/cardano-postgres.nix: -------------------------------------------------------------------------------- 1 | pkgs: { config, ... }: 2 | with pkgs; 3 | 4 | let 5 | inherit (lib) mkForce mkIf mkEnableOption mkOption types; 6 | cfg = config.services.cardano-postgres; 7 | in { 8 | options = { 9 | services.cardano-postgres = { 10 | enable = mkEnableOption "Cardano Postgres"; 11 | postgresqlSocketPath = mkOption { 12 | description = "The postgresql socket path to use, typically `/run/postgresql`."; 13 | type = types.str; 14 | default = "/run/postgresql"; 15 | }; 16 | postgresqlDataDir = mkOption { 17 | description = "The directory for postgresql data. If null, this parameter is not configured."; 18 | type = types.nullOr types.str; 19 | default = null; 20 | }; 21 | withHighCapacityPostgres = mkOption { 22 | description = "Configure postgresql to use additional resources to support high RAM and connection requirements."; 23 | type = types.bool; 24 | default = globals.withHighCapacityExplorer; 25 | }; 26 | }; 27 | }; 28 | config = mkIf cfg.enable { 29 | services.postgresql = { 30 | enable = true; 31 | package = postgresql_12; 32 | dataDir = mkIf (cfg.postgresqlDataDir != null) cfg.postgresqlDataDir; 33 | enableTCPIP = false; 34 | } // (lib.optionalAttrs (!(lib.hasPrefix "20.03" lib.version)) { 35 | settings = if cfg.withHighCapacityPostgres then { 36 | # Optimized for: 37 | # DB Version: 12 38 | # OS Type: linux 39 | # DB Type: web 40 | # Total Memory (RAM): 24 GB (75% the RAM of high capacity explorer) 41 | # CPUs num: 8 (high capacity explorer vCPUs) 42 | # Connections num: 2000 43 | # Data Storage: ssd 44 | # Suggested optimization for 45 | # other configurations can be 46 | # found at: 47 | # https://pgtune.leopard.in.ua/ 48 | max_connections = 2000; 49 | shared_buffers = "6GB"; 50 | effective_cache_size = "18GB"; 51 | maintenance_work_mem = "1536MB"; 52 | checkpoint_completion_target = 0.7; 53 | wal_buffers = "16MB"; 54 | default_statistics_target = 100; 55 | random_page_cost = 1.1; 56 | effective_io_concurrency = 200; 57 | work_mem = "5242kB"; 58 | min_wal_size = "1GB"; 59 | max_wal_size = "4GB"; 60 | max_worker_processes = 8; 61 | max_parallel_workers_per_gather = 4; 62 | max_parallel_workers = 8; 63 | max_parallel_maintenance_workers = 4; 64 | shared_preload_libraries = "pg_stat_statements"; 65 | "pg_stat_statements.track" = "all"; 66 | } else { 67 | # DB Version: 12 68 | # OS Type: linux 69 | # DB Type: web 70 | # Total Memory (RAM): 8 GB (half the RAM of regular explorer) 71 | # CPUs num: 4 (explorer vCPUs) 72 | # Connections num: 200 73 | # Data Storage: ssd 74 | max_connections = 200; 75 | shared_buffers = "2GB"; 76 | effective_cache_size = "6GB"; 77 | maintenance_work_mem = "512MB"; 78 | checkpoint_completion_target = 0.7; 79 | wal_buffers = "16MB"; 80 | default_statistics_target = 100; 81 | random_page_cost = 1.1; 82 | effective_io_concurrency = 200; 83 | work_mem = "5242kB"; 84 | min_wal_size = "1GB"; 85 | max_wal_size = "4GB"; 86 | max_worker_processes = 4; 87 | max_parallel_workers_per_gather = 2; 88 | max_parallel_workers = 4; 89 | max_parallel_maintenance_workers = 2; 90 | shared_preload_libraries = "pg_stat_statements"; 91 | "pg_stat_statements.track" = "all"; 92 | }; 93 | }) // (lib.optionalAttrs (lib.hasPrefix "20.03" lib.version) { 94 | extraConfig = if cfg.withHighCapacityPostgres then '' 95 | # Optimized for: 96 | # DB Version: 12 97 | # OS Type: linux 98 | # DB Type: web 99 | # Total Memory (RAM): 24 GB (75% the RAM of high capacity explorer) 100 | # CPUs num: 8 (high capacity explorer vCPUs) 101 | # Connections num: 2000 102 | # Data Storage: ssd 103 | # Suggested optimization for 104 | # other configurations can be 105 | # found at: 106 | # https://pgtune.leopard.in.ua/ 107 | max_connections = 2000 108 | shared_buffers = 6GB 109 | effective_cache_size = 18GB 110 | maintenance_work_mem = 1536MB 111 | checkpoint_completion_target = 0.7 112 | wal_buffers = 16MB 113 | default_statistics_target = 100 114 | random_page_cost = 1.1 115 | effective_io_concurrency = 200 116 | work_mem = 5242kB 117 | min_wal_size = 1GB 118 | max_wal_size = 4GB 119 | max_worker_processes = 8 120 | max_parallel_workers_per_gather = 4 121 | max_parallel_workers = 8 122 | max_parallel_maintenance_workers = 4 123 | shared_preload_libraries = 'pg_stat_statements' 124 | pg_stat_statements.track = all 125 | '' else '' 126 | # DB Version: 12 127 | # OS Type: linux 128 | # DB Type: web 129 | # Total Memory (RAM): 8 GB (half the RAM of regular explorer) 130 | # CPUs num: 4 (explorer vCPUs) 131 | # Connections num: 200 132 | # Data Storage: ssd 133 | max_connections = 200 134 | shared_buffers = 2GB 135 | effective_cache_size = 6GB 136 | maintenance_work_mem = 512MB 137 | checkpoint_completion_target = 0.7 138 | wal_buffers = 16MB 139 | default_statistics_target = 100 140 | random_page_cost = 1.1 141 | effective_io_concurrency = 200 142 | work_mem = 5242kB 143 | min_wal_size = 1GB 144 | max_wal_size = 4GB 145 | max_worker_processes = 4 146 | max_parallel_workers_per_gather = 2 147 | max_parallel_workers = 4 148 | max_parallel_maintenance_workers = 2 149 | shared_preload_libraries = 'pg_stat_statements' 150 | pg_stat_statements.track = all 151 | ''; 152 | }); 153 | }; 154 | } 155 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cardano-ops 2 | 3 | NixOps deployment configuration for IOHK/Cardano devops. 4 | 5 | For examples on how you can deploy your own testnet please refer to the 6 | [REAME](examples/shelley-testnet/README.md) of the Shelley testnet example. 7 | 8 | ## Explorer upgrades 9 | 10 | The explorer is composed with a traefik frontend (`explorer`) and two backends (`explorer-a` and `explorer-b`). The backends are dedicated machines, unless `globals.explorerBackendsInContainers = true;` in which case the backends are packed together (as containers) with the traefik frontend. 11 | 12 | We upgrade one backend after the other to achieve zero-downtime upgrades. 13 | 14 | On `testnet` and `mainnet`, after the upgrade is completed, a snapshot of the cardano-db-sync database is done and uploaded to s3 buckets: 15 | - for `testnet`: `updates-cardano-testnet` (`https://updates-cardano-testnet.s3.amazonaws.com`) 16 | - for `mainnet`: `update-cardano-mainnet.iohk.io` (`https://update-cardano-mainnet.iohk.io`) 17 | 18 | 19 | ### Process 20 | 21 | 1. add pins for a set of new versions of explorer services (that work together), eg.: 22 | 23 | ```sh 24 | niv add input-output-hk/cardano-db-sync -n cardano-db-sync-10 -b refs/tags/10.0.0 25 | niv add input-output-hk/cardano-graphql -n cardano-graphql-next -b chore/cardano-db-sync-10-compat 26 | niv add input-output-hk/cardano-explorer-app -n cardano-explorer-app-next -b chore/cardano-graphql-5.0.0-compat 27 | ``` 28 | 29 | 2. Create a set for those new versions in `globals-default.nix` 30 | 31 | Set one of backend (`b`) to use this new set (`explorer10`), and keep only the untouched backend in the active set: 32 | ```nix 33 | explorerBackends = { 34 | # explorer-a is updated to use the new set: 35 | a = globals.explorer10; 36 | b = globals.explorer9; 37 | }; 38 | 39 | # explorer-a is being upgraded: we remove it from traefik load-balencer: 40 | explorerActiveBackends = ["b"]; 41 | 42 | # new set of versions (to be updated with final tags before mainnet release) 43 | explorer10 = { 44 | cardano-db-sync = sourcePaths.cardano-db-sync-10; 45 | cardano-explorer-app = sourcePaths.cardano-explorer-app-next; 46 | cardano-graphql = sourcePaths.cardano-graphql-next; 47 | }; 48 | ``` 49 | 50 | Commit this change to new branch and deploy it (`explorer` on `shelley-qa` or `explorer-a` on `staging`/`testnet`). 51 | 52 | If this is a major upgrade, database on `explorer-a` need to be deleted: 53 | `systemctl stop postgresql.service && rm -rf /var/lib/postgresql/12 && systemctl start postgresql.service && systemctl restart cardano-db-sync`. 54 | 55 | 2. Take a snapshot on explorer-a: 56 | 57 | First we need to wait until `cardano-db-sync` is fully synced. Then we modify topology file to include this bit: 58 | 59 | ```nix 60 | explorer-a.services.cardano-db-sync.takeSnapshot = "once"; 61 | ``` 62 | 63 | 3. Swith frontend to updated backend (`explorer-a`) and prepare upgrade of `explorer-b`: 64 | 65 | Edit `globals-default.nix` so that `explorer-a` use the new version, and the traefik frontend use the new version on `explorer-b`. 66 | 67 | ```nix 68 | explorerBackends = { 69 | a = globals.explorer10; 70 | # we now update explorer-b: 71 | b = globals.explorer10; 72 | }; 73 | 74 | # explorer-a is now fully synced and ready to serve requests: 75 | explorerActiveBackends = ["a"]; 76 | ``` 77 | Deploy frontend: 78 | ```sh 79 | $ nixops --include explorer 80 | ``` 81 | At this point please check explorer web ui and rollback this last change if there is any issue. 82 | 83 | 4. Upgrade `explorer-b` using the snapshot 84 | 85 | ```sh 86 | $ nixops ssh explorer-a -- ls /var/lib/cexplorer/*tgz 87 | /var/lib/cexplorer/db-sync-snapshot-schema-10-block-5886057-x86_64.tgz 88 | 89 | $ nixops scp --from explorer-a /var/lib/cexplorer/db-sync-snapshot-schema-10-block-5886057-x86_64.tgz ./ 90 | $ nixops scp --to explorer-b db-sync-snapshot-schema-10-block-5886057-x86_64.tgz /var/lib/cexplorer/ 91 | 92 | $ nixops deploy --include explorer-b 93 | ``` 94 | Then wait for `explorer-b` to be fully synced. 95 | 96 | 5. Update frontend to use both backend 97 | 98 | Edit `globals-default.nix` to activate both backends: 99 | 100 | ```nix 101 | explorerBackends = { 102 | a = globals.explorer10; 103 | b = globals.explorer10; 104 | }; 105 | explorerActiveBackends = ["a" "b"]; 106 | ``` 107 | Push this change to the branch and merge it to master. 108 | 109 | ```sh 110 | $ nixops deploy --include explorer 111 | ``` 112 | 113 | 6. Upload snapshot to S3 114 | 115 | On testnet: 116 | ``` 117 | source ../proposal-ui/static/proposal-ui-testnet.sh 118 | ./scripts/checksum-sign-upload.sh db-sync-snapshot-schema-10-block-2700107-x86_64.tgz updates-cardano-testnet cardano-db-sync 119 | ``` 120 | 121 | On mainnet: 122 | ``` 123 | source ../proposal-ui/static/proposal-ui-mainnet.sh 124 | ./scripts/checksum-sign-upload.sh db-sync-snapshot-schema-10-block-2700107-x86_64.tgz update-cardano-mainnet.iohk.io cardano-db-sync 125 | ``` 126 | 127 | ## Accessing Prometheus ## 128 | 129 | 130 | It is possible to query [Prometheus instances](https://monitoring.cardano-mainnet.iohk.io/prometheus "cardano-mainnet") directly (rather than via [Grafana](https://monitoring.cardano-mainnet.iohk.io/grafana/ "cardano-mainnet") using the Prometheus [query language](https://prometheus.io/docs/prometheus/latest/querying/basics/), for example 131 | 132 | ``` 133 | cardano_node_metrics_utxoSize_int{hostname="stk-a-1-IOG1-ip"}[5m] 134 | ``` 135 | 136 | For larger queries, replacing `5m` (minutes) by `5d` (days) the GUI is 137 | inconvenient and it is better to use a programming environment to 138 | submit an HTTP request and parse the response. One way to do this is 139 | to use Firefox as described 140 | [here](https://daniel.haxx.se/blog/2015/11/23/copy-as-curl/). 141 | 142 | Using this may give you several possible HTTP requests: 143 | 144 | ![](images/FirefoxDebugExample.png "Obtaining the HTTP request") 145 | 146 | Choose the one that corresponds to the required query and then copy as 147 | `cURL` and execute it at the command line. It should also be possible 148 | to use this in a programming language such as Python. 149 | 150 | # DB Sync Best Practices 151 | The database used in Cardano DB Sync is a PostgreSQL database. Like all databases, it needs to be set up in the best way for the intended environment. For best performance, the `db-sync` process needs to run on the same machine as the PostgreSQL server and the PostgreSQL database. For more information, see the [recommendation from the developers](https://docs.google.com/document/d/1dzINT5a-FSF4apgx8-VXSaqPTHMZmtJ1rPvY0MKS6Ak/edit?usp=sharing). -------------------------------------------------------------------------------- /deployments/cardano-aws.nix: -------------------------------------------------------------------------------- 1 | with import ../nix {}; 2 | let 3 | inherit (pkgs.lib) 4 | attrValues attrNames filter filterAttrs flatten foldl' hasAttrByPath listToAttrs 5 | mapAttrs' mapAttrs nameValuePair recursiveUpdate unique optional any concatMap 6 | getAttrs optionalString hasPrefix take drop length concatStringsSep head toLower 7 | elem; 8 | 9 | inherit (globals.topology) coreNodes relayNodes; 10 | privateRelayNodes = globals.topology.privateRelayNodes or []; 11 | inherit (globals.ec2.credentials) accessKeyIds; 12 | inherit (iohk-ops-lib.physical) aws; 13 | 14 | cluster = import ../clusters/cardano.nix { 15 | inherit pkgs; 16 | inherit (globals.ec2) instances; 17 | }; 18 | 19 | nodes = filterAttrs (name: node: 20 | ((node.deployment.targetEnv or null) == "ec2") 21 | && ((node.deployment.ec2.region or null) != null)) cluster; 22 | 23 | doMonitoring = any (n: n.node.roles.isMonitor or false) (attrValues nodes); 24 | 25 | regions = 26 | unique (map (node: node.deployment.ec2.region) (attrValues nodes)); 27 | 28 | orgs = 29 | unique (map (node: node.node.org) (attrValues nodes)); 30 | 31 | securityGroups = with aws.security-groups; [ 32 | { 33 | nodes = getAttrs (map (n: n.name) (coreNodes ++ privateRelayNodes)) nodes; 34 | groups = [ (import ../physical/aws/security-groups/allow-peers.nix) ]; 35 | } 36 | { 37 | nodes = getAttrs (map (n: n.name) relayNodes) nodes; 38 | groups = [ (import ../physical/aws/security-groups/allow-public.nix) ]; 39 | } 40 | { 41 | nodes = filterAttrs (_: n: n.node.roles.isMonitor or false) nodes; 42 | groups = [ 43 | allow-public-www-https 44 | allow-graylog 45 | ]; 46 | } 47 | { 48 | nodes = (filterAttrs (_: n: n.node.roles.isExplorer or false) nodes); 49 | groups = [ allow-public-www-https ]; 50 | } 51 | { 52 | nodes = (filterAttrs (_: n: n.node.roles.isExplorerBackend or false) nodes); 53 | groups = [ (import ../physical/aws/security-groups/allow-explorer-gw.nix) ]; 54 | } 55 | { 56 | nodes = (filterAttrs (_: n: n.node.roles.isMetadata or false) nodes); 57 | groups = [ allow-public-www-https ]; 58 | } 59 | { 60 | nodes = (filterAttrs (_: n: n.node.roles.isFaucet or false) nodes); 61 | groups = [ allow-public-www-https ]; 62 | } 63 | { 64 | nodes = (filterAttrs (_: n: n.node.roles.isPublicSsh or false) nodes); 65 | groups = [ allow-ssh ]; 66 | } 67 | { 68 | inherit nodes; 69 | groups = [ allow-deployer-ssh ] 70 | ++ optional doMonitoring 71 | allow-monitoring-collection; 72 | } 73 | ]; 74 | 75 | importSecurityGroup = node: securityGroup: 76 | securityGroup { 77 | inherit pkgs lib nodes; 78 | region = node.deployment.ec2.region; 79 | org = node.node.org; 80 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.${node.node.org}; 81 | }; 82 | 83 | 84 | importSecurityGroups = {nodes, groups}: 85 | mapAttrs 86 | (_: n: foldl' recursiveUpdate {} (map (importSecurityGroup n) groups)) 87 | nodes; 88 | 89 | securityGroupsByNode = 90 | foldl' recursiveUpdate {} (map importSecurityGroups securityGroups); 91 | 92 | settings = { 93 | resources = { 94 | ec2SecurityGroups = 95 | foldl' recursiveUpdate {} (attrValues securityGroupsByNode); 96 | 97 | elasticIPs = mapAttrs' (name: node: 98 | nameValuePair "${name}-ip" { 99 | accessKeyId = accessKeyIds.${node.node.org}; 100 | inherit (node.deployment.ec2) region; 101 | }) nodes; 102 | 103 | ec2KeyPairs = listToAttrs (concatMap (region: 104 | map (org: 105 | nameValuePair "cardano-keypair-${org}-${region}" { 106 | inherit region; 107 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.${org}; 108 | } 109 | ) orgs) 110 | regions); 111 | 112 | route53RecordSets = lib.optionalAttrs globals.withSmash { 113 | "smash-explorer-alias" = { resources, ... }: { 114 | zoneName = "${pkgs.globals.dnsZone}."; 115 | domainName = "smash.${globals.domain}."; 116 | recordValues = [ resources.machines.explorer ]; 117 | recordType = "A"; 118 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.dns; 119 | }; 120 | } // ( 121 | let mkRelayRecords = prefix: let 122 | relaysNewPrefix = "${prefix}${optionalString (prefix != "") "-"}relays-new"; 123 | in relayFilter: listToAttrs (map (relay: 124 | nameValuePair "${relaysNewPrefix}-${relay.name}" ( 125 | { resources, ... }: { 126 | zoneName = "${pkgs.globals.dnsZone}."; 127 | domainName = "${prefix}${optionalString (prefix != "") "."}${pkgs.globals.relaysNew}."; 128 | recordValues = [ resources.machines.${relay.name} ]; 129 | recordType = "A"; 130 | setIdentifier = relay.name; 131 | routingPolicy = "multivalue"; 132 | accessKeyId = pkgs.globals.ec2.credentials.accessKeyIds.dns; 133 | }) 134 | # AWS records are limited to 200 values: 135 | ) (let relays = filter (r: (r.public or true) && relayFilter r) relayNodes; 136 | numberOfRelays = length relays; 137 | in if (numberOfRelays > 200) then builtins.trace 138 | "WARNING: Getting over the 200 values limit for ${relaysNewPrefix} dns entry (${toString numberOfRelays} relays). Excluding ${concatStringsSep " " (map (r: r.name) (drop 200 relays))}." 139 | (take 200 relays) 140 | else relays)); 141 | in mkRelayRecords "" (_: true) 142 | // mkRelayRecords "asia-pacific" (n: hasPrefix "ap" n.region) 143 | // mkRelayRecords "north-america" (n: hasPrefix "us" n.region) 144 | // mkRelayRecords "europe" (n: hasPrefix "eu" n.region) 145 | // ( 146 | let records = map (coreNode: if coreNode ? ticker 147 | then mkRelayRecords (toLower coreNode.ticker) (r: elem coreNode.name r.producers) 148 | else {} 149 | ) coreNodes; 150 | in foldl' (a: b: a // b) {} records)); 151 | 152 | }; 153 | defaults = { name, resources, config, ... }: { 154 | deployment.ec2 = { 155 | keyPair = resources.ec2KeyPairs."cardano-keypair-${config.node.org}-${config.deployment.ec2.region}"; 156 | securityGroups = map (sgName: resources.ec2SecurityGroups.${sgName}) 157 | (attrNames (securityGroupsByNode.${name} or {})); 158 | }; 159 | }; 160 | }; 161 | in 162 | cluster // settings 163 | -------------------------------------------------------------------------------- /roles/tx-generator.nix: -------------------------------------------------------------------------------- 1 | pkgs: { config, name, lib, nodes, resources, ... }: 2 | with pkgs; with pkgs.lib; 3 | 4 | let 5 | inherit (globals.environmentConfig.networkConfig) Protocol; 6 | 7 | # We need a signing key with access to funds 8 | # to be able to run tx generator and sign generated transactions. 9 | signingKey = 10 | { Cardano = ../keys/utxo-keys/utxo1.skey; 11 | TPraos = ../keys/utxo-keys/utxo1.skey; 12 | RealPBft = ../keys/delegate-keys.000.key; 13 | }."${Protocol}" 14 | or (abort "Unsupported protocol: ${Protocol}"); 15 | 16 | cardanoNodes = filterAttrs 17 | (_: node: node.config.services.cardano-node.enable or false) 18 | nodes; 19 | poolNodes = filterAttrs 20 | (name: node: name != "explorer" && name != "node-0") 21 | cardanoNodes; 22 | 23 | node-src = sourcePaths.cardano-node; 24 | node-cfg = config.services.cardano-node; 25 | mayFetchNodeCfgAttr = attr: 26 | optionalAttrs (hasAttr attr (node-cfg.nodeConfig)) { ${attr} = node-cfg.nodeConfig.${attr}; }; 27 | in { 28 | imports = [ 29 | (import (node-src + "/nix/nixos/tx-generator-service.nix") 30 | ## XXX: ugly -- svclib should really move to iohk-nix. 31 | (pkgs 32 | // 33 | { commonLib = import (node-src + "/nix/svclib.nix") { inherit pkgs; }; })) 34 | ]; 35 | 36 | services.tx-generator = rec { 37 | enable = true; 38 | targetNodes = __mapAttrs 39 | (name: node: 40 | { ip = let ip = getPublicIp resources nodes name; 41 | in __trace "generator target: ${name}/${ip}" ip; 42 | port = node.config.services.cardano-node.port; 43 | }) 44 | poolNodes; 45 | 46 | ## nodeConfig of the locally running observer node. 47 | localNodeConf = node-cfg; 48 | localNodeSocketPath = node-cfg.socketPath; 49 | sigKey = "/var/lib/keys/cardano-node-signing"; 50 | 51 | ## The nodeConfig of the Tx generator itself. 52 | nodeConfig = { 53 | TurnOnLogging = true; 54 | TurnOnLogMetrics = false; 55 | minSeverity = "Debug"; 56 | TracingVerbosity = "MaximalVerbosity"; 57 | defaultBackends = [ 58 | "KatipBK" 59 | ]; 60 | setupBackends = [ 61 | "KatipBK" 62 | ]; 63 | defaultScribes = [ 64 | [ "StdoutSK" "stdout" ] 65 | [ "FileSK" "logs/generator.json" ] 66 | ]; 67 | setupScribes = [ 68 | { scKind = "StdoutSK"; scName = "stdout"; scFormat = "ScJson"; } 69 | { scKind = "FileSK"; scName = "logs/generator.json"; scFormat = "ScJson"; 70 | scRotation = { 71 | rpLogLimitBytes = 300000000; 72 | rpMaxAgeHours = 24; 73 | rpKeepFilesNum = 20; 74 | }; } 75 | ]; 76 | options = { 77 | }; 78 | } // __foldl' (x: y: x // y) {} 79 | (map mayFetchNodeCfgAttr 80 | [ "ByronGenesisFile" 81 | "ShelleyGenesisFile" 82 | "AlonzoGenesisFile" 83 | "Protocol" 84 | "LastKnownBlockVersion-Major" 85 | "LastKnownBlockVersion-Minor" 86 | "LastKnownBlockVersion-Alt" 87 | "TestEnableDevelopmentHardForkEras" 88 | "TestEnableDevelopmentNetworkProtocols" 89 | "TestShelleyHardForkAtEpoch" 90 | "TestAllegraHardForkAtEpoch" 91 | "TestMaryHardForkAtEpoch" 92 | "TestAlonzoHardForkAtEpoch" ]); 93 | nodeConfigFile = __toFile "generator-config.json" (__toJSON nodeConfig); 94 | 95 | dsmPassthrough = { 96 | # rtsOpts = ["-xc"]; 97 | }; 98 | } // globals.environmentConfig.generatorConfig; 99 | 100 | services.cardano-node = { 101 | instances = 1; 102 | 103 | socketPath = "/var/lib/cardano-node/node.socket"; 104 | systemdSocketActivation = mkForce false; 105 | 106 | nodeConfig = mkForce (globals.environmentConfig.nodeConfig // { 107 | defaultScribes = [ 108 | [ "StdoutSK" "stdout" ] 109 | [ "FileSK" "logs/node.json" ] 110 | ]; 111 | setupScribes = [ 112 | { scKind = "StdoutSK"; scName = "stdout"; scFormat = "ScJson"; } 113 | { scKind = "FileSK"; scName = "logs/node.json"; scFormat = "ScJson"; 114 | scRotation = { 115 | rpLogLimitBytes = 300000000; 116 | rpMaxAgeHours = 24; 117 | rpKeepFilesNum = 20; 118 | }; } 119 | ]; 120 | minSeverity = "Debug"; 121 | TracingVerbosity = "NormalVerbosity"; 122 | 123 | TestEnableDevelopmentHardForkEras = true; 124 | TestEnableDevelopmentNetworkProtocols = true; 125 | 126 | TraceAcceptPolicy = false; 127 | TraceBlockFetchClient = true; 128 | TraceBlockFetchDecisions = false; 129 | TraceBlockFetchProtocol = true; 130 | TraceBlockFetchProtocolSerialised = false; 131 | TraceBlockFetchServer = false; 132 | TraceBlockchainTime = false; 133 | TraceChainDB = true; 134 | TraceChainSyncBlockServer = false; 135 | TraceChainSyncClient = true; 136 | TraceChainSyncHeaderServer = false; 137 | TraceChainSyncProtocol = false; 138 | TraceDiffusionInitialization = false; 139 | TraceDnsResolver = false; 140 | TraceDnsSubscription = false; 141 | TraceErrorPolicy = true; 142 | TraceForge = false; 143 | TraceForgeStateInfo = false; 144 | TraceHandshake = false; 145 | TraceIpSubscription = false; 146 | TraceKeepAliveClient = false; 147 | TraceLocalChainSyncProtocol = false; 148 | TraceLocalErrorPolicy = false; 149 | TraceLocalHandshake = false; 150 | TraceLocalStateQueryProtocol = false; 151 | TraceLocalTxSubmissionProtocol = true; 152 | TraceLocalTxSubmissionServer = true; 153 | TraceMempool = true; 154 | TraceTxInbound = true; 155 | TraceTxOutbound = true; 156 | TraceTxSubmissionProtocol = true; 157 | TraceTxSubmission2Protocol = true; 158 | 159 | TurnOnLogMetrics = true; 160 | options = { 161 | mapBackends = { 162 | "cardano.node.resources" = [ "KatipBK" ]; 163 | }; 164 | }; 165 | } // 166 | ({ 167 | shelley = 168 | { TestShelleyHardForkAtEpoch = 0; 169 | }; 170 | allegra = 171 | { TestShelleyHardForkAtEpoch = 0; 172 | TestAllegraHardForkAtEpoch = 0; 173 | }; 174 | mary = 175 | { TestShelleyHardForkAtEpoch = 0; 176 | TestAllegraHardForkAtEpoch = 0; 177 | TestMaryHardForkAtEpoch = 0; 178 | }; 179 | }).${globals.environmentConfig.generatorConfig.era} 180 | // (globals.benchmarkingProfile.node.extra_config or {})); 181 | }; 182 | 183 | deployment.keys = { 184 | "cardano-node-signing" = { 185 | keyFile = signingKey; 186 | user = "cardano-node"; 187 | group = "cardano-node"; 188 | destDir = "/var/lib/keys"; 189 | }; 190 | }; 191 | 192 | users.users.cardano-node.extraGroups = [ "keys" ]; 193 | } 194 | -------------------------------------------------------------------------------- /bench/make-topology.hs: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env nix-shell 2 | #! nix-shell -p "haskellPackages.ghcWithPackages (pkgs: with pkgs; [aeson graphviz optparse-applicative split])" -i runhaskell 3 | 4 | {-# LANGUAGE DeriveGeneric #-} 5 | {-# LANGUAGE LambdaCase #-} 6 | {-# LANGUAGE RecordWildCards #-} 7 | {-# LANGUAGE TupleSections #-} 8 | 9 | import Prelude hiding (id) 10 | 11 | import Control.Monad 12 | import Data.Aeson 13 | import qualified Data.GraphViz as G 14 | import qualified Data.GraphViz.Attributes as G 15 | import qualified Data.GraphViz.Attributes.Complete as G 16 | import qualified Data.GraphViz.Attributes.Colors as G 17 | import qualified Data.GraphViz.Printing as G 18 | import qualified Data.GraphViz.Types.Graph as GV 19 | import Data.List (tails) 20 | import qualified Data.List.Split as List 21 | import qualified Data.Map as Map 22 | import Data.Map (Map) 23 | import Data.Semigroup ((<>)) 24 | import qualified Data.Text.Lazy as T 25 | import qualified Data.Text.Lazy.IO as T 26 | import qualified Data.ByteString.Lazy.Char8 as LBS 27 | import GHC.Generics 28 | 29 | import Options.Applicative 30 | 31 | import qualified System.IO as IO 32 | 33 | 34 | data TopoParams = TopoParams 35 | { tpSize :: Int 36 | , tpLocations :: [Location] 37 | , tpIdPools :: Int -> Maybe Int 38 | } 39 | 40 | data Spec = Spec 41 | { id :: Int 42 | , loc :: Location 43 | , mpools :: Maybe Int 44 | , links :: [Int] 45 | } 46 | deriving (Generic, Show) 47 | 48 | data Location 49 | = AP | EU | US 50 | deriving (Bounded, Eq, Enum, Ord, Read, Show) 51 | 52 | mkTopology :: TopoParams -> [Spec] 53 | mkTopology TopoParams{..} = 54 | concat phase4 55 | where 56 | phase0 = zipWith mkInitial specIds specLocs 57 | phase1 = [ filter ((== l) . loc) phase0 58 | | l <- tpLocations ] 59 | phase2 = intraConnect <$> phase1 60 | phase4 = interConnect phase2 61 | 62 | interConnect :: [[Spec]] -> [[Spec]] 63 | interConnect xss = 64 | take nlocs $ 65 | fmap linker (tails $ cycle xss) 66 | where 67 | nlocs = length xss 68 | linker (xs:xss') = 69 | [ x { links = ids <> links x } 70 | | (x, i) <- zip xs [0..] 71 | , let ids = idOf i <$> rings ] 72 | where rings = take (nlocs - 1) $ cycle <$> xss' 73 | idOf n xs = id (xs !! n) 74 | 75 | intraConnect :: [Spec] -> [Spec] 76 | intraConnect xs = connect 1 -- next 77 | $ connect (len - 1) -- prev 78 | $ connect (len `div` 3) -- chord 1 79 | $ connect ((len * 2) `div` 3) xs -- chord 2 80 | where 81 | len = length xs 82 | connect :: Int -> [Spec] -> [Spec] 83 | connect offt xs = 84 | take (length xs) $ 85 | fmap linker (tails ring) 86 | where linker (x:xs) = 87 | x { links = idOf (offt - 1) xs 88 | : links x } 89 | ring = cycle xs 90 | idOf n xs = id (xs !! n) 91 | 92 | mkInitial :: Int -> Location -> Spec 93 | mkInitial id loc = 94 | Spec{ links = [] 95 | , mpools = tpIdPools id 96 | , ..} 97 | specIds = [0..(tpSize - 1)] 98 | specLocs = take tpSize $ cycle $ tpLocations 99 | 100 | main :: IO () 101 | main = do 102 | (topoParams, topoJson, topoDot) <- execParser opts 103 | 104 | let topoSpec = mkTopology topoParams 105 | topo = mkNode <$> topoSpec 106 | 107 | writeTopo topo topoJson 108 | maybe (pure ()) (writeDot topoSpec) topoDot 109 | where 110 | cliParser :: Parser (TopoParams, FilePath, Maybe FilePath) 111 | cliParser = 112 | (,,) <$> topoParamsParser 113 | <*> strOption 114 | ( long "topology-output" 115 | <> help "Topology file to write" 116 | <> metavar "OUTFILE" ) 117 | <*> optional 118 | (strOption 119 | ( long "dot-output" 120 | <> help "Dot file to write" 121 | <> metavar "OUTFILE" )) 122 | 123 | topoParamsParser = TopoParams 124 | <$> option auto 125 | ( long "size" 126 | <> metavar "SIZE" 127 | <> help "Node count" ) 128 | <*> some 129 | (option auto 130 | ( long "loc" 131 | <> help "location" 132 | <> metavar "INT" )) 133 | <*> pure defaultRoleSelector 134 | 135 | defaultRoleSelector = \case 136 | 0 -> Nothing -- BFT node has no pools 137 | 1 -> Just 1 -- Regular pools have just 1 pool 138 | _ -> Just 2 -- Dense pools have any amount >1 as marker 139 | 140 | opts = info (cliParser <**> helper) 141 | ( fullDesc 142 | <> progDesc "Cardano topology generator" 143 | <> header "make-topology - generate Cardano node topologies" ) 144 | 145 | --- * To JSON topology 146 | --- 147 | writeTopo :: [Node] -> FilePath -> IO () 148 | writeTopo topo f = 149 | IO.withFile f IO.WriteMode $ \hnd -> 150 | LBS.hPutStrLn hnd . encode $ Topology topo [] 151 | 152 | mkNode :: Spec -> Node 153 | mkNode Spec{..} = Node{..} 154 | where 155 | name = idName nodeId 156 | org = "IOHK" 157 | nodeId = id 158 | pools = mpools 159 | region = locationRegion loc 160 | producers = idName <$> links 161 | 162 | data Topology = Topology 163 | { coreNodes :: [Node] 164 | , relayNodes :: [Node] 165 | } 166 | deriving (Generic, Show) 167 | 168 | data Node = Node 169 | { name :: String 170 | , org :: String 171 | , region :: String 172 | , producers :: [String] 173 | , nodeId :: Int 174 | , pools :: Maybe Int 175 | } 176 | deriving (Generic, Show) 177 | 178 | instance ToJSON Topology 179 | instance ToJSON Node where 180 | toEncoding = genericToEncoding defaultOptions { omitNothingFields = True } 181 | 182 | --- * To Graphviz 183 | --- 184 | writeDot :: [Spec] -> FilePath -> IO () 185 | writeDot topo f = 186 | IO.withFile f IO.WriteMode $ \hnd -> 187 | T.hPutStrLn hnd $ 188 | G.renderDot $ G.toDot $ 189 | uncurry (G.graphElemsToDot params) (toGV topo) 190 | where 191 | params = G.nonClusteredParams 192 | { G.globalAttributes = 193 | [ G.GraphAttrs 194 | [G.Scale $ G.DVal 5] 195 | ] 196 | , G.fmtNode = 197 | \(_, Spec{..})-> 198 | [ G.FillColor . G.toColorList . (:[]) $ 199 | case id of 200 | 0 -> G.RGB 250 250 150 201 | 1 -> G.RGB 150 250 250 202 | n -> locationColor loc 203 | , G.Style [G.SItem G.Filled []] 204 | ] 205 | } 206 | 207 | toGV :: [Spec] -> ([(String, Spec)], [(String, String, String)]) 208 | toGV xs = 209 | (,) ((\s@Spec{..} -> (("node-" <> show id), s)) <$> xs) 210 | (concat $ 211 | (\Spec{..} -> (("node-" <> show id, , "") 212 | . ("node-" <>) 213 | . show <$> links)) <$> xs) 214 | 215 | --- * Aux 216 | --- 217 | idName :: Int -> String 218 | idName = ("node-" <>) . show 219 | 220 | locationRegion :: Location -> String 221 | locationRegion = \case 222 | EU -> "eu-central-1" 223 | AP -> "ap-southeast-2" 224 | US -> "us-east-1" 225 | 226 | locationColor :: Location -> G.Color 227 | locationColor = \case 228 | AP -> G.RGB 250 200 200 229 | EU -> G.RGB 200 200 250 230 | US -> G.RGB 200 250 200 231 | -------------------------------------------------------------------------------- /nix/sources.nix: -------------------------------------------------------------------------------- 1 | # This file has been generated by Niv. 2 | 3 | let 4 | 5 | # 6 | # The fetchers. fetch_ fetches specs of type . 7 | # 8 | 9 | fetch_file = pkgs: name: spec: 10 | let 11 | name' = sanitizeName name + "-src"; 12 | in 13 | if spec.builtin or true then 14 | builtins_fetchurl { inherit (spec) url sha256; name = name'; } 15 | else 16 | pkgs.fetchurl { inherit (spec) url sha256; name = name'; }; 17 | 18 | fetch_tarball = pkgs: name: spec: 19 | let 20 | name' = sanitizeName name + "-src"; 21 | in 22 | if spec.builtin or true then 23 | builtins_fetchTarball { name = name'; inherit (spec) url sha256; } 24 | else 25 | pkgs.fetchzip { name = name'; inherit (spec) url sha256; }; 26 | 27 | fetch_git = name: spec: 28 | let 29 | ref = 30 | if spec ? ref then spec.ref else 31 | if spec ? branch then "refs/heads/${spec.branch}" else 32 | if spec ? tag then "refs/tags/${spec.tag}" else 33 | abort "In git source '${name}': Please specify `ref`, `tag` or `branch`!"; 34 | in 35 | builtins.fetchGit { url = spec.repo; inherit (spec) rev; inherit ref; }; 36 | 37 | fetch_local = spec: spec.path; 38 | 39 | fetch_builtin-tarball = name: throw 40 | ''[${name}] The niv type "builtin-tarball" is deprecated. You should instead use `builtin = true`. 41 | $ niv modify ${name} -a type=tarball -a builtin=true''; 42 | 43 | fetch_builtin-url = name: throw 44 | ''[${name}] The niv type "builtin-url" will soon be deprecated. You should instead use `builtin = true`. 45 | $ niv modify ${name} -a type=file -a builtin=true''; 46 | 47 | # 48 | # Various helpers 49 | # 50 | 51 | # https://github.com/NixOS/nixpkgs/pull/83241/files#diff-c6f540a4f3bfa4b0e8b6bafd4cd54e8bR695 52 | sanitizeName = name: 53 | ( 54 | concatMapStrings (s: if builtins.isList s then "-" else s) 55 | ( 56 | builtins.split "[^[:alnum:]+._?=-]+" 57 | ((x: builtins.elemAt (builtins.match "\\.*(.*)" x) 0) name) 58 | ) 59 | ); 60 | 61 | # The set of packages used when specs are fetched using non-builtins. 62 | mkPkgs = sources: system: 63 | let 64 | sourcesNixpkgs = 65 | import (builtins_fetchTarball { inherit (sources.nixpkgs) url sha256; }) { inherit system; }; 66 | hasNixpkgsPath = builtins.any (x: x.prefix == "nixpkgs") builtins.nixPath; 67 | hasThisAsNixpkgsPath = == ./.; 68 | in 69 | if builtins.hasAttr "nixpkgs" sources 70 | then sourcesNixpkgs 71 | else if hasNixpkgsPath && ! hasThisAsNixpkgsPath then 72 | import {} 73 | else 74 | abort 75 | '' 76 | Please specify either (through -I or NIX_PATH=nixpkgs=...) or 77 | add a package called "nixpkgs" to your sources.json. 78 | ''; 79 | 80 | # The actual fetching function. 81 | fetch = pkgs: name: spec: 82 | 83 | if ! builtins.hasAttr "type" spec then 84 | abort "ERROR: niv spec ${name} does not have a 'type' attribute" 85 | else if spec.type == "file" then fetch_file pkgs name spec 86 | else if spec.type == "tarball" then fetch_tarball pkgs name spec 87 | else if spec.type == "git" then fetch_git name spec 88 | else if spec.type == "local" then fetch_local spec 89 | else if spec.type == "builtin-tarball" then fetch_builtin-tarball name 90 | else if spec.type == "builtin-url" then fetch_builtin-url name 91 | else 92 | abort "ERROR: niv spec ${name} has unknown type ${builtins.toJSON spec.type}"; 93 | 94 | # If the environment variable NIV_OVERRIDE_${name} is set, then use 95 | # the path directly as opposed to the fetched source. 96 | replace = name: drv: 97 | let 98 | saneName = stringAsChars (c: if isNull (builtins.match "[a-zA-Z0-9]" c) then "_" else c) name; 99 | ersatz = builtins.getEnv "NIV_OVERRIDE_${saneName}"; 100 | in 101 | if ersatz == "" then drv else 102 | # this turns the string into an actual Nix path (for both absolute and 103 | # relative paths) 104 | if builtins.substring 0 1 ersatz == "/" then /. + ersatz else /. + builtins.getEnv "PWD" + "/${ersatz}"; 105 | 106 | # Ports of functions for older nix versions 107 | 108 | # a Nix version of mapAttrs if the built-in doesn't exist 109 | mapAttrs = builtins.mapAttrs or ( 110 | f: set: with builtins; 111 | listToAttrs (map (attr: { name = attr; value = f attr set.${attr}; }) (attrNames set)) 112 | ); 113 | 114 | # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/lists.nix#L295 115 | range = first: last: if first > last then [] else builtins.genList (n: first + n) (last - first + 1); 116 | 117 | # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L257 118 | stringToCharacters = s: map (p: builtins.substring p 1 s) (range 0 (builtins.stringLength s - 1)); 119 | 120 | # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L269 121 | stringAsChars = f: s: concatStrings (map f (stringToCharacters s)); 122 | concatMapStrings = f: list: concatStrings (map f list); 123 | concatStrings = builtins.concatStringsSep ""; 124 | 125 | # https://github.com/NixOS/nixpkgs/blob/8a9f58a375c401b96da862d969f66429def1d118/lib/attrsets.nix#L331 126 | optionalAttrs = cond: as: if cond then as else {}; 127 | 128 | # fetchTarball version that is compatible between all the versions of Nix 129 | builtins_fetchTarball = { url, name ? null, sha256 }@attrs: 130 | let 131 | inherit (builtins) lessThan nixVersion fetchTarball; 132 | in 133 | if lessThan nixVersion "1.12" then 134 | fetchTarball ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; })) 135 | else 136 | fetchTarball attrs; 137 | 138 | # fetchurl version that is compatible between all the versions of Nix 139 | builtins_fetchurl = { url, name ? null, sha256 }@attrs: 140 | let 141 | inherit (builtins) lessThan nixVersion fetchurl; 142 | in 143 | if lessThan nixVersion "1.12" then 144 | fetchurl ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; })) 145 | else 146 | fetchurl attrs; 147 | 148 | # Create the final "sources" from the config 149 | mkSources = config: 150 | mapAttrs ( 151 | name: spec: 152 | if builtins.hasAttr "outPath" spec 153 | then abort 154 | "The values in sources.json should not have an 'outPath' attribute" 155 | else 156 | spec // { outPath = replace name (fetch config.pkgs name spec); } 157 | ) config.sources; 158 | 159 | # The "config" used by the fetchers 160 | mkConfig = 161 | { sourcesFile ? if builtins.pathExists ./sources.json then ./sources.json else null 162 | , sources ? if isNull sourcesFile then {} else builtins.fromJSON (builtins.readFile sourcesFile) 163 | , system ? builtins.currentSystem 164 | , pkgs ? mkPkgs sources system 165 | }: rec { 166 | # The sources, i.e. the attribute set of spec name to spec 167 | inherit sources; 168 | 169 | # The "pkgs" (evaluated nixpkgs) to use for e.g. non-builtin fetchers 170 | inherit pkgs; 171 | }; 172 | 173 | in 174 | mkSources (mkConfig {}) // { __functor = _: settings: mkSources (mkConfig settings); } 175 | -------------------------------------------------------------------------------- /bench/lib-sanity.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=1091,2016 3 | 4 | sanity_check_list=() 5 | 6 | sanity_check_list+=(sanity_check_start_log_spread) 7 | sanity_check_start_log_spread() { 8 | local dir=$1 t=${2:-$(jq .meta.profile_content.tolerances $dir/meta.json)} 9 | sanity_check "$t" "$dir" ' 10 | $analysis.logs 11 | | map 12 | ( (.earliest - $meta.timestamp | fabs) 13 | as $delta 14 | | select ($delta > $allowed.start_log_spread_s) 15 | | . + 16 | { delta: $delta 17 | , start: $meta.timestamp }) 18 | ' '. 19 | | map 20 | ({ kind: "start-log-spread" 21 | } + .) 22 | | .[]' 23 | } 24 | sanity_check_list+=(sanity_check_last_log_spread) 25 | sanity_check_last_log_spread() { 26 | local dir=$1 t=${2:-$(jq .meta.profile_content.tolerances $dir/meta.json)} 27 | sanity_check "$t" "$dir" ' 28 | $analysis.logs 29 | | map ## Generator always finishes a bit early, and 30 | ## we have it analysed to death by other means.. 31 | (select (.name != "generator")) 32 | | map 33 | ( (.latest - $analysis.final_log_timestamp | fabs) 34 | as $delta 35 | | select ($delta > $allowed.last_log_spread_s) 36 | | . + 37 | { delta: $delta 38 | , final_log_timestamp: $analysis.final_log_timestamp }) 39 | ' '. 40 | | map 41 | ({ kind: "latest-log-spread" 42 | } + .) 43 | | .[]' 44 | } 45 | sanity_check_list+=() 46 | sanity_check_not_even_started() { 47 | local dir=$1 t=${2:-$(jq .meta.profile_content.tolerances $dir/meta.json)} 48 | sanity_check "$t" "$dir" ' 49 | $blocks 50 | | length == 0 51 | ' '. 52 | | { kind: "blockchain-not-even-started" 53 | }' --slurpfile blocks "$dir"/analysis/explorer.MsgBlock.json 54 | } 55 | sanity_check_list+=() 56 | sanity_check_silence_since_last_block() { 57 | local dir=$1 t=${2:-$(jq .meta.profile_content.tolerances $dir/meta.json)} 58 | sanity_check "$t" "$dir" ' 59 | $blocks[-1] // { timestamp: $analysis.first_node_log_timestamp } 60 | | ($analysis.final_node_log_timestamp - .timestamp) 61 | as $delta 62 | | if $delta >= $allowed.silence_since_last_block_s 63 | then $delta else empty end 64 | ' '. 65 | | { kind: "blockchain-stopped" 66 | , silence_since_last_block_s: . 67 | , allowance: $allowed.silence_since_last_block_s 68 | }' --slurpfile blocks "$dir"/analysis/explorer.MsgBlock.json 69 | } 70 | sanity_check_list+=(sanity_check_no_txs_in_blocks) 71 | sanity_check_no_txs_in_blocks() { 72 | local dir=$1 t=${2:-$(jq .meta.profile_content.tolerances $dir/meta.json)} 73 | sanity_check "$t" "$dir" ' 74 | $txstats.tx_seen_in_blocks == 0' ' 75 | { kind: "no-txs-in-blocks" 76 | }' 77 | } 78 | sanity_check_list+=(sanity_check_announced_less_txs_than_specified) 79 | sanity_check_announced_less_txs_than_specified() { 80 | local dir=$1 t=${2:-$(jq .meta.profile_content.tolerances $dir/meta.json)} 81 | sanity_check "$t" "$dir" ' 82 | ## Guard against old logs, where tx_annced is 0: 83 | $txstats.tx_annced >= $txstats.tx_sent and 84 | $prof.generator.tx_count > $txstats.tx_annced' ' 85 | { kind: "announced-less-txs-than-specified" 86 | , required: $prof.generator.tx_count 87 | , sent: $txstats.tx_sent 88 | }' 89 | } 90 | sanity_check_list+=(sanity_check_sent_less_txs_than_specified) 91 | sanity_check_sent_less_txs_than_specified() { 92 | local dir=$1 t=${2:-$(jq .meta.profile_content.tolerances $dir/meta.json)} 93 | sanity_check "$t" "$dir" ' 94 | $prof.generator.tx_count > $txstats.tx_sent' ' 95 | { kind: "sent-less-txs-than-specified" 96 | , required: $prof.generator.tx_count 97 | , sent: $txstats.tx_sent 98 | }' 99 | } 100 | sanity_check_list+=(sanity_check_tx_loss_over_threshold) 101 | sanity_check_tx_loss_over_threshold() { 102 | local dir=$1 t=${2:-$(jq .meta.profile_content.tolerances $dir/meta.json)} 103 | sanity_check "$t" "$dir" ' 104 | $txstats.tx_sent * (1.0 - $allowed.tx_loss_ratio) 105 | > $txstats.tx_seen_in_blocks' ' 106 | { kind: "txs-loss-over-threshold" 107 | , sent: $txstats.tx_sent 108 | , threshold: ($txstats.tx_sent * (1.0 - $allowed.tx_loss_ratio)) 109 | , received: $txstats.tx_seen_in_blocks 110 | }' 111 | } 112 | sanity_check_list+=() 113 | sanity_check_chain_density() { 114 | local dir=$1 t=${2:-$(jq .meta.profile_content.tolerances $dir/meta.json)} 115 | sanity_check "$t" "$dir" ' 116 | ($blocks | length) 117 | as $block_count 118 | | ($analysis.final_node_log_timestamp 119 | - $analysis.first_node_log_timestamp) 120 | as $cluster_lifetime_s 121 | | ($cluster_lifetime_s / $genesis.slot_duration | floor) 122 | as $cluster_lifetime_slots 123 | | ($block_count / ($cluster_lifetime_slots)) 124 | as $chain_density 125 | # | ($cluster_lifetime_slots - $block_count) 126 | # as $missed_slots 127 | | if $chain_density < $allowed.minimum_chain_density 128 | # or $missed_slots > $allowed.maximum_missed_slots 129 | then { lifetime_s: $cluster_lifetime_s 130 | , lifetime_slots: $cluster_lifetime_slots 131 | , block_count: $block_count 132 | # , missed_slots: $missed_slots 133 | , chain_density: $chain_density 134 | } else empty end' ' 135 | { kind: "insufficient_overall_chain_density" 136 | , lifetime_s: .lifetime_s 137 | , lifetime_slots: .lifetime_slots 138 | , block_count: .block_count 139 | # , missed_slots: .missed_slots 140 | , chain_density: .chain_density 141 | }' --slurpfile blocks "$dir"/analysis/explorer.MsgBlock.json 142 | } 143 | # sanity_check_list+=(sanity_check_) 144 | # sanity_check_() { 145 | # local t=$1 dir=$2 146 | # } 147 | 148 | sanity_check_run() { 149 | local dir=${1:-.} tolerances t 150 | 151 | for check in ${sanity_check_list[*]} 152 | do echo -n " $check" | sed 's/sanity_//' >&2 153 | $check "$dir" "$(jq .meta.profile_content.tolerances $dir/meta.json)" 154 | done | jq --slurp ' 155 | if length != 0 156 | then . + 157 | [{ kind: "tolerances" } 158 | + $tolerances] else . end 159 | ' --argjson tolerances "$(jq .meta.profile_content.tolerances $dir/meta.json)" 160 | } 161 | 162 | sanity_check() { 163 | local tolerances=$1 dir=$2 test=$3 err=$4; shift 4 164 | sanity_checker "$tolerances" "$dir" \ 165 | " ($test)"' as $test 166 | | if $test != {} and $test != [] and $test != "" and $test 167 | then ($test | '"$err"') else empty end 168 | ' "$@" 169 | } 170 | 171 | sanity_checker() { 172 | local tolerances=$1 dir=$2 expr=$3; shift 3 173 | 174 | jq ' $meta[0].meta as $meta 175 | | $analysis[0] as $analysis 176 | | $txstats[0] as $txstats 177 | | ($meta.profile_content 178 | ## TODO: backward compat 179 | // $meta.generator_params) 180 | as $prof 181 | | ($prof.genesis 182 | ## TODO: backward compat 183 | // $prof.genesis_params) 184 | as $genesis 185 | | $prof.generator as $generator 186 | | '"$expr"' 187 | ' --slurpfile meta "$dir/meta.json" \ 188 | --slurpfile analysis "$dir/analysis.json" \ 189 | --slurpfile txstats "$dir/analysis/tx-stats.json" \ 190 | --argjson allowed "$tolerances" \ 191 | "$@" <<<0 192 | } 193 | --------------------------------------------------------------------------------