├── aws-network-spe-py ├── ssh-to-host ├── spe │ ├── __init__.py │ ├── network.py │ └── node.py ├── .gitignore ├── requirements.txt ├── Pulumi.yaml ├── token-demo ├── README.md └── __main__.py ├── bare-metal-fd-ts ├── ssh-to-host ├── .gitignore ├── Pulumi.yaml ├── package.json ├── tsconfig.json ├── index.ts └── README.md ├── gcp-network-spe-ts ├── ssh-to-host ├── .gitignore ├── Pulumi.yaml ├── package.json ├── tsconfig.json ├── spe.ts ├── README.md └── index.ts ├── aws-validator-agave-ts ├── ssh-to-host ├── .gitignore ├── Pulumi.yaml ├── package.json ├── tsconfig.json ├── aws.ts ├── index.ts └── README.md ├── aws-validator-fd-ts ├── ssh-to-host ├── .gitignore ├── Pulumi.yaml ├── package.json ├── tsconfig.json ├── aws.ts ├── index.ts └── README.md ├── aws-validator-xen-ts ├── ssh-to-host ├── .gitignore ├── Pulumi.yaml ├── package.json ├── tsconfig.json ├── aws.ts ├── index.ts └── README.md ├── gcp-validator-agave-ts ├── ssh-to-host ├── .gitignore ├── Pulumi.yaml ├── package.json ├── tsconfig.json ├── gcp.ts ├── index.ts └── README.md ├── .gitignore ├── package.json ├── .githooks ├── pre-commit ├── commit-msg └── lib.opsh ├── Makefile ├── bin ├── check-env ├── ssh-to-host └── opsh ├── guides ├── RUNNER-CONFIG.md ├── FAULT-TOLERANCE.md ├── VALIDATOR-VERSION.md ├── OPERATING-VALIDATOR.md ├── RUNNING-BENCH-TPS.md ├── DELINQUENT-VALIDATOR.md ├── HARD-FORK-RESTART.md └── AWS-BENCHMARK-REPORT.md ├── README.md ├── PULUMI-AWS.md └── PULUMI.md /aws-network-spe-py/ssh-to-host: -------------------------------------------------------------------------------- 1 | ../bin/ssh-to-host -------------------------------------------------------------------------------- /bare-metal-fd-ts/ssh-to-host: -------------------------------------------------------------------------------- 1 | ../bin/ssh-to-host -------------------------------------------------------------------------------- /gcp-network-spe-ts/ssh-to-host: -------------------------------------------------------------------------------- 1 | ../bin/ssh-to-host -------------------------------------------------------------------------------- /aws-validator-agave-ts/ssh-to-host: -------------------------------------------------------------------------------- 1 | ../bin/ssh-to-host -------------------------------------------------------------------------------- /aws-validator-fd-ts/ssh-to-host: -------------------------------------------------------------------------------- 1 | ../bin/ssh-to-host -------------------------------------------------------------------------------- /aws-validator-xen-ts/ssh-to-host: -------------------------------------------------------------------------------- 1 | ../bin/ssh-to-host -------------------------------------------------------------------------------- /gcp-validator-agave-ts/ssh-to-host: -------------------------------------------------------------------------------- 1 | ../bin/ssh-to-host -------------------------------------------------------------------------------- /aws-network-spe-py/spe/__init__.py: -------------------------------------------------------------------------------- 1 | from .node import * 2 | -------------------------------------------------------------------------------- /bare-metal-fd-ts/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | /node_modules/ 3 | -------------------------------------------------------------------------------- /gcp-network-spe-ts/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | /node_modules/ 3 | -------------------------------------------------------------------------------- /aws-validator-agave-ts/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | /node_modules/ 3 | -------------------------------------------------------------------------------- /aws-validator-fd-ts/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | /node_modules/ 3 | -------------------------------------------------------------------------------- /aws-validator-xen-ts/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | /node_modules/ 3 | -------------------------------------------------------------------------------- /gcp-validator-agave-ts/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | /node_modules/ 3 | -------------------------------------------------------------------------------- /aws-network-spe-py/.gitignore: -------------------------------------------------------------------------------- 1 | stake-state 2 | token-demo-state 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *\#* 3 | Pulumi.*.yaml 4 | __pycache__ 5 | venv 6 | -------------------------------------------------------------------------------- /aws-network-spe-py/requirements.txt: -------------------------------------------------------------------------------- 1 | pulumi>=3.0.0,<4.0.0 2 | pulumi-aws>=6.0.2,<7.0.0 3 | pulumi-tls>=5.0.8 4 | pulumi-svmkit>=0.44.0,<1.0.0 5 | pyright>=1.1.0,<2.0.0 6 | -------------------------------------------------------------------------------- /gcp-network-spe-ts/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: gcp-network-spe-ts 2 | description: A TypeScript example of a multi-node Solana Permissioned Environment in GCP 3 | runtime: 4 | name: nodejs 5 | options: 6 | packagemanager: yarn 7 | -------------------------------------------------------------------------------- /gcp-validator-agave-ts/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: gcp-validator-agave-ts 2 | description: An Agave validator configuration written in TypeScript running on GCP. 3 | runtime: 4 | name: nodejs 5 | options: 6 | packagemanager: yarn 7 | -------------------------------------------------------------------------------- /aws-network-spe-py/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: aws-network-spe-py 2 | runtime: 3 | name: python 4 | options: 5 | toolchain: pip 6 | virtualenv: venv 7 | typechecker: pyright 8 | description: A Python example of a multi-node Solana Permissioned Environment in AWS 9 | -------------------------------------------------------------------------------- /aws-validator-xen-ts/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: aws-validator-xen-ts 2 | description: An X1 validator running on AWS using TypeScript 3 | runtime: 4 | name: nodejs 5 | options: 6 | packagemanager: yarn 7 | config: 8 | pulumi:tags: 9 | value: 10 | pulumi:template: aws-typescript 11 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "devDependencies": { 3 | "@eslint/js": "^9.31.0", 4 | "@types/node": "^24.1.0", 5 | "@typescript-eslint/eslint-plugin": "^8.38.0", 6 | "@typescript-eslint/parser": "^8.38.0", 7 | "eslint": "^9.32.0", 8 | "typescript": "5.8.3" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /aws-validator-agave-ts/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: aws-validator-agave-ts 2 | description: An Agave validator running in AWS using TypeScript 3 | runtime: 4 | name: nodejs 5 | options: 6 | packagemanager: yarn 7 | config: 8 | pulumi:tags: 9 | value: 10 | pulumi:template: aws-typescript 11 | -------------------------------------------------------------------------------- /aws-validator-fd-ts/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: aws-validator-fd-ts 2 | description: A Frankendancer validator running in AWS using TypeScript 3 | runtime: 4 | name: nodejs 5 | options: 6 | packagemanager: yarn 7 | config: 8 | pulumi:tags: 9 | value: 10 | pulumi:template: aws-typescript 11 | -------------------------------------------------------------------------------- /bare-metal-fd-ts/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: bare-metal-fd-ts 2 | description: An Agave validator running on any remote machine using TypeScript 3 | runtime: 4 | name: nodejs 5 | options: 6 | packagemanager: yarn 7 | config: 8 | pulumi:tags: 9 | value: 10 | pulumi:template: typescript 11 | -------------------------------------------------------------------------------- /.githooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env opsh 2 | # shellcheck shell=bash 3 | 4 | source "$SCRIPTDIR/lib.opsh" 5 | 6 | step::000::checkout-staging() { 7 | STAGING_DIR=$(checkout-staging pre-commit) 8 | } 9 | 10 | step::010::check-source() { 11 | (cd "$STAGING_DIR" && make check) 12 | } 13 | 14 | steps::run step 15 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: lint 2 | 3 | lint: 4 | shfmt -d .githooks/* 5 | shellcheck -P .githooks .githooks/* 6 | yarn eslint 7 | 8 | check: lint 9 | 10 | format: 11 | shfmt -w .githooks/* ./bin/check-env 12 | 13 | clean: 14 | rm -f .env-checked 15 | 16 | 17 | .env-checked: bin/check-env 18 | ./bin/check-env 19 | touch .env-checked 20 | 21 | include .env-checked 22 | -------------------------------------------------------------------------------- /gcp-validator-agave-ts/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gcp-validator-agave-ts", 3 | "main": "index.ts", 4 | "devDependencies": { 5 | "@types/node": "^18", 6 | "typescript": "^5.0.0" 7 | }, 8 | "dependencies": { 9 | "@pulumi/gcp": "^8.0.0", 10 | "@pulumi/pulumi": "^3.113.0", 11 | "@pulumi/tls": "^5.1.0", 12 | "@svmkit/pulumi-svmkit": "^0.44.1" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /bare-metal-fd-ts/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bare-metal-fd-ts", 3 | "main": "index.ts", 4 | "license": "GPL-3.0", 5 | "devDependencies": { 6 | "@types/node": "^18", 7 | "prettier": "^3.3.3", 8 | "typescript": "^5.6.3", 9 | "typescript-language-server": "^4.3.3" 10 | }, 11 | "dependencies": { 12 | "@pulumi/pulumi": "^3.113.0", 13 | "@pulumi/tls": "^5.0.9", 14 | "@svmkit/pulumi-svmkit": "^0.42.0" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /gcp-network-spe-ts/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gcp-validator-agave-ts", 3 | "main": "index.ts", 4 | "devDependencies": { 5 | "@types/node": "^18", 6 | "prettier": "^3.4.2", 7 | "typescript": "^5.0.0", 8 | "typescript-language-server": "^4.3.3" 9 | }, 10 | "dependencies": { 11 | "@pulumi/gcp": "^8.0.0", 12 | "@pulumi/pulumi": "^3.113.0", 13 | "@pulumi/tls": "^5.1.0", 14 | "@svmkit/pulumi-svmkit": "^0.44.0" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /aws-validator-xen-ts/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "aws-validator-xen-ts", 3 | "main": "index.ts", 4 | "license": "GPLv3", 5 | "devDependencies": { 6 | "@types/node": "^18", 7 | "prettier": "^3.3.3", 8 | "typescript": "^5.6.3", 9 | "typescript-language-server": "^4.3.3" 10 | }, 11 | "dependencies": { 12 | "@pulumi/aws": "^6.0.0", 13 | "@pulumi/awsx": "^2.0.2", 14 | "@pulumi/pulumi": "^3.113.0", 15 | "@pulumi/tls": "^5.0.9", 16 | "@svmkit/pulumi-svmkit": "^0.44.0" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /aws-validator-agave-ts/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "aws-validator-agave-ts", 3 | "main": "index.ts", 4 | "license": "GPL-3.0", 5 | "devDependencies": { 6 | "@types/node": "^18", 7 | "prettier": "^3.3.3", 8 | "typescript": "^5.6.3", 9 | "typescript-language-server": "^4.3.3" 10 | }, 11 | "dependencies": { 12 | "@pulumi/aws": "^6.0.0", 13 | "@pulumi/awsx": "^2.0.2", 14 | "@pulumi/pulumi": "^3.113.0", 15 | "@pulumi/tls": "^5.0.9", 16 | "@svmkit/pulumi-svmkit": "^0.44.0" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /aws-validator-fd-ts/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "aws-validator-agave-ts", 3 | "main": "index.ts", 4 | "license": "GPL-3.0", 5 | "devDependencies": { 6 | "@types/node": "^18", 7 | "prettier": "^3.3.3", 8 | "typescript": "^5.6.3", 9 | "typescript-language-server": "^4.3.3" 10 | }, 11 | "dependencies": { 12 | "@pulumi/aws": "^6.0.0", 13 | "@pulumi/awsx": "^2.0.2", 14 | "@pulumi/pulumi": "^3.113.0", 15 | "@pulumi/tls": "^5.0.9", 16 | "@svmkit/pulumi-svmkit": "^0.44.0" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /aws-validator-fd-ts/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "strict": true, 4 | "outDir": "bin", 5 | "target": "es2020", 6 | "module": "commonjs", 7 | "moduleResolution": "node", 8 | "sourceMap": true, 9 | "experimentalDecorators": true, 10 | "pretty": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "noImplicitReturns": true, 13 | "forceConsistentCasingInFileNames": true 14 | }, 15 | "files": [ 16 | "index.ts" 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /aws-validator-xen-ts/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "strict": true, 4 | "outDir": "bin", 5 | "target": "es2020", 6 | "module": "commonjs", 7 | "moduleResolution": "node", 8 | "sourceMap": true, 9 | "experimentalDecorators": true, 10 | "pretty": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "noImplicitReturns": true, 13 | "forceConsistentCasingInFileNames": true 14 | }, 15 | "files": [ 16 | "index.ts" 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /bare-metal-fd-ts/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "strict": true, 4 | "outDir": "bin", 5 | "target": "es2020", 6 | "module": "commonjs", 7 | "moduleResolution": "node", 8 | "sourceMap": true, 9 | "experimentalDecorators": true, 10 | "pretty": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "noImplicitReturns": true, 13 | "forceConsistentCasingInFileNames": true 14 | }, 15 | "files": [ 16 | "index.ts" 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /gcp-network-spe-ts/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "strict": true, 4 | "outDir": "bin", 5 | "target": "es2020", 6 | "module": "commonjs", 7 | "moduleResolution": "node", 8 | "sourceMap": true, 9 | "experimentalDecorators": true, 10 | "pretty": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "noImplicitReturns": true, 13 | "forceConsistentCasingInFileNames": true 14 | }, 15 | "files": [ 16 | "index.ts" 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /aws-validator-agave-ts/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "strict": true, 4 | "outDir": "bin", 5 | "target": "es2020", 6 | "module": "commonjs", 7 | "moduleResolution": "node", 8 | "sourceMap": true, 9 | "experimentalDecorators": true, 10 | "pretty": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "noImplicitReturns": true, 13 | "forceConsistentCasingInFileNames": true 14 | }, 15 | "files": [ 16 | "index.ts" 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /gcp-validator-agave-ts/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "strict": true, 4 | "outDir": "bin", 5 | "target": "es2020", 6 | "module": "commonjs", 7 | "moduleResolution": "node", 8 | "sourceMap": true, 9 | "experimentalDecorators": true, 10 | "pretty": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "noImplicitReturns": true, 13 | "forceConsistentCasingInFileNames": true 14 | }, 15 | "files": [ 16 | "index.ts" 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /bin/check-env: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env opsh 2 | # shellcheck shell=bash disable=SC2164 3 | opsh::version::require v0.7.0 4 | lib::import step-runner 5 | VERBOSE="${VERBOSE:-}" 6 | 7 | is_true() { 8 | local val="${1,,}" 9 | [[ "$val" =~ ^[1-9][0-9]*$ || "$val" =~ ^(true|yes|on)$ ]] 10 | } 11 | 12 | step::10::check_git_hooks() { 13 | is_true "${VERBOSE:-}" && log::info "Checking Git hooks setup..." 14 | git rev-parse --git-dir >/dev/null 2>&1 || log::fatal "Not inside a git repository" 15 | hookspath=$(git config core.hooksPath) || log::fatal "Couldn't retrieve your git config's core.hooksPath" 16 | [[ $hookspath = .githooks ]] || log::fatal "Your git hooks must be configured to point to .githooks" 17 | [[ -d "$hookspath" ]] || log::fatal "Hooks directory does not exist: $hookspath" 18 | } 19 | 20 | steps::run step 21 | -------------------------------------------------------------------------------- /.githooks/commit-msg: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env opsh 2 | # shellcheck shell=bash 3 | 4 | source "$SCRIPTDIR/lib.opsh" 5 | 6 | MSGFILE=$1 7 | shift 8 | LINE=0 9 | 10 | line-error() { 11 | log::fatal "$LINE: $*" 12 | } 13 | 14 | while read -r; do 15 | case "$LINE" in 16 | 0) 17 | [[ ${#REPLY} -gt 0 ]] || line-error "blank summary lines are not allowed" 18 | [[ ${#REPLY} -le 50 ]] || line-error "summary line is longer than 50 characters" 19 | ;; 20 | 1) 21 | [[ ${#REPLY} -eq 0 ]] || line-error "an empty line must exist between summary and body" 22 | ;; 23 | *) 24 | [[ ${#REPLY} -gt 0 ]] || line-error "blank body lines are not allowed" 25 | [[ ${#REPLY} -le 72 ]] || line-error "body line is longer than 72 characters" 26 | ;; 27 | esac 28 | LINE=$((LINE + 1)) 29 | done < <(grep -Ev '^\s*#' "$MSGFILE") 30 | 31 | [[ "$LINE" -ne 0 ]] || log::fatal "commit message is empty" 32 | -------------------------------------------------------------------------------- /.githooks/lib.opsh: -------------------------------------------------------------------------------- 1 | # shellcheck shell=bash 2 | lib::import git step-runner 3 | 4 | : "${GIT_HOOK_KEEP_STAGING:=false}" 5 | : "${GIT_SKIP_HOOKS:=false}" 6 | 7 | if $GIT_SKIP_HOOKS; then 8 | log::warn "skipping git hooks" 9 | exit 0 10 | fi 11 | 12 | if [[ ! -v _GIT_HOOK_STAGING_DIR ]]; then 13 | _GIT_HOOK_STAGING_DIR="$(git rev-parse --show-toplevel)/git-hook-staging" 14 | export _GIT_HOOK_STAGING_DIR 15 | exit::trigger staging-cleanup 16 | fi 17 | 18 | staging-cleanup() { 19 | if $GIT_HOOK_KEEP_STAGING; then 20 | return 0 21 | fi 22 | 23 | if [[ ! -d $_GIT_HOOK_STAGING_DIR ]]; then 24 | return 0 25 | fi 26 | 27 | log::info "cleaning up staging directories" 28 | rm -rf "$_GIT_HOOK_STAGING_DIR" 29 | } 30 | 31 | checkout-staging() { 32 | local dir hookname 33 | hookname=$1 34 | shift 35 | 36 | dir="$_GIT_HOOK_STAGING_DIR/$hookname-$$" 37 | git checkout-index -a --prefix="$dir/" 38 | echo "$dir" 39 | } 40 | -------------------------------------------------------------------------------- /aws-network-spe-py/spe/network.py: -------------------------------------------------------------------------------- 1 | import pulumi 2 | import pulumi_aws as aws 3 | 4 | network_config = pulumi.Config("network") 5 | vpc_id = network_config.get('vpcId') or None 6 | 7 | if vpc_id: 8 | subnets = aws.ec2.get_subnets( 9 | filters=[{ 10 | "name": "vpc-id", 11 | "values": [vpc_id], 12 | }], 13 | ) 14 | 15 | if len(subnets.ids) < 1: 16 | raise ValueError("VPC is required to have at least 1 subnet.") 17 | 18 | subnet_id = subnets.ids[0] 19 | else: 20 | subnet_id = None 21 | 22 | stack_name = pulumi.get_stack() 23 | 24 | external_sg = aws.ec2.SecurityGroup( 25 | "external-access", 26 | description="Allow external SSH access to all of the nodes", 27 | vpc_id=vpc_id, 28 | ingress=[ 29 | { 30 | "protocol": "tcp", 31 | "from_port": 0, 32 | "to_port": 22, 33 | "cidr_blocks": ["0.0.0.0/0"], 34 | }, 35 | ], 36 | egress=[ 37 | { 38 | "protocol": "-1", 39 | "from_port": 0, 40 | "to_port": 0, 41 | "cidr_blocks": ["0.0.0.0/0"], 42 | } 43 | ], 44 | tags={ 45 | "Stack": stack_name, 46 | } 47 | ) 48 | 49 | internal_sg = aws.ec2.SecurityGroup( 50 | "internal-access", 51 | description="Permissive internal traffic", 52 | vpc_id=vpc_id, 53 | ingress=[ 54 | {"protocol": "-1", "from_port": 0, "to_port": 0, "self": True}, 55 | ], 56 | egress=[ 57 | { 58 | "protocol": "-1", 59 | "from_port": 0, 60 | "to_port": 0, 61 | "cidr_blocks": ["0.0.0.0/0"], 62 | } 63 | ], 64 | tags={ 65 | "Stack": stack_name, 66 | } 67 | ) 68 | -------------------------------------------------------------------------------- /bin/ssh-to-host: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ../bin/opsh 2 | # shellcheck shell=bash 3 | 4 | check_pulumi_yaml() { 5 | local dir 6 | dir="$(pwd)" 7 | 8 | while [[ ! -f "$dir/Pulumi.yaml" && "$dir" != "/" ]]; do 9 | dir="$(dirname "$dir")" 10 | done 11 | 12 | [[ -f "$dir/Pulumi.yaml" ]] 13 | } 14 | 15 | if ! check_pulumi_yaml; then 16 | log::fatal "No Pulumi.yaml file found in the current directory or any of its parents." 17 | fi 18 | 19 | list_nodes() { 20 | pulumi stack output --show-secrets nodes | jq -r '.[].name' | nl -v 0 21 | exit "$1" 22 | } 23 | 24 | if [[ "$#" -eq 0 ]]; then 25 | log::info "Usage: $0 [command]" 26 | log::info "Available nodes:" 27 | list_nodes 0 28 | fi 29 | 30 | hostindex=$1 31 | shift 32 | 33 | NODE_COUNT=$(pulumi stack output --show-secrets nodes | jq -r 'length') 34 | 35 | if ! [[ "$hostindex" =~ ^[0-9]+$ ]] || [ "$hostindex" -ge "$NODE_COUNT" ]; then 36 | log::error "Invalid node index '$hostindex'." 37 | log::info "Available nodes:" 38 | list_nodes 1 39 | fi 40 | 41 | PRIVKEY="$(temp::file)" 42 | 43 | cleanup() { 44 | rm -f "$PRIVKEY" 45 | } 46 | 47 | trap cleanup EXIT 48 | 49 | touch "$PRIVKEY" 50 | chmod 600 "$PRIVKEY" 51 | 52 | connection="$(pulumi stack output --show-secrets nodes | jq -r ".[$hostindex]"'.connection as 53 | {privateKey: $private_key, $user, $host} 54 | ?// {$private_key, $user, $host} 55 | | {privateKey: ($private_key // error), $user, $host} 56 | ')" 57 | 58 | jq -r '.privateKey' <<<"$connection" >"$PRIVKEY" 59 | USER=$(jq -r '.user' <<<"$connection") 60 | HOSTNAME=$(jq -r '.host' <<<"$connection") 61 | 62 | ssh -o StrictHostKeyChecking=off -o UserKnownHostsFile=/dev/null -i "$PRIVKEY" "$USER@$HOSTNAME" "$@" 63 | 64 | # vim:set ft=sh: 65 | -------------------------------------------------------------------------------- /gcp-validator-agave-ts/gcp.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from "@pulumi/pulumi"; 2 | import * as gcp from "@pulumi/gcp"; 3 | import * as tls from "@pulumi/tls"; 4 | 5 | const nodeConfig = new pulumi.Config("node"); 6 | const machineType = nodeConfig.get("machineType") ?? "c4-standard-8"; 7 | const osImage = nodeConfig.get("osImage") ?? "debian-12"; 8 | const diskSize = nodeConfig.getNumber("diskSize") ?? 256; 9 | 10 | export const user = nodeConfig.get("user") ?? "admin"; 11 | 12 | export const sshKey = new tls.PrivateKey("ssh-key", { 13 | algorithm: "ED25519", 14 | }); 15 | 16 | const network = new gcp.compute.Network("network", { 17 | autoCreateSubnetworks: false, 18 | }); 19 | 20 | const subnet = new gcp.compute.Subnetwork("subnet", { 21 | ipCidrRange: "10.0.1.0/24", 22 | network: network.id, 23 | }); 24 | 25 | const firewall = new gcp.compute.Firewall("firewall", { 26 | network: network.selfLink, 27 | allows: [ 28 | { 29 | protocol: "tcp", 30 | ports: ["22", "8000-8020", "8899"], 31 | }, 32 | { 33 | protocol: "udp", 34 | ports: ["8000-8020"], 35 | }, 36 | ], 37 | direction: "INGRESS", 38 | sourceRanges: ["0.0.0.0/0"], 39 | targetTags: [], 40 | }); 41 | 42 | export const instance = new gcp.compute.Instance( 43 | "instance", 44 | { 45 | machineType, 46 | bootDisk: { 47 | initializeParams: { 48 | image: osImage, 49 | size: diskSize, 50 | }, 51 | }, 52 | networkInterfaces: [ 53 | { 54 | network: network.id, 55 | subnetwork: subnet.id, 56 | accessConfigs: [{}], 57 | }, 58 | ], 59 | serviceAccount: { 60 | scopes: ["https://www.googleapis.com/auth/cloud-platform"], 61 | }, 62 | allowStoppingForUpdate: true, 63 | tags: [], 64 | metadata: { 65 | "enable-oslogin": "false", 66 | "ssh-keys": sshKey.publicKeyOpenssh.apply((k) => `${user}:${k}`), 67 | }, 68 | }, 69 | { dependsOn: firewall }, 70 | ); 71 | -------------------------------------------------------------------------------- /guides/RUNNER-CONFIG.md: -------------------------------------------------------------------------------- 1 | # Runner Configuration 2 | 3 | Under the hood, `svmkit` uses `apt` and `dpkg` to install and manage software on the remote hosts it interacts with. In order to support development and custom use cases, `svmkit` provides a means to override its software installation behavior using `runnerConfig`. This datastructure can be passed into any component in `svmkit` that installs software on the remote host. 4 | 5 | ## `aptLockTimeout` 6 | 7 | On the target hosts, only one `apt` or `dpkg` instance may run at a time. This causes a problem if multiple components are being installed in parallel. To work around this, `svmkit` maintains a lock which is shared amongst all of its `apt` invocations. This timeout (in seconds) can be configured as necessary, if the defaults aren't enough. 8 | 9 | ## `packageConfig` 10 | 11 | `packageConfig` allows you to do things like: 12 | 13 | - Override versions and/or releases for a given package installed by the component. 14 | - Install a package from a local file instead of using the `svmkit` `apt` repositories. 15 | - Install additional packages not normally included with a component. 16 | 17 | NOTE: Only packages which were already being installed by a component, or new packages added to the `additional` list, may be overridden. This is done to avoid typos in package overrides being silently ignored. 18 | 19 | # Example of `runnerConfig` 20 | 21 | ```typescript 22 | new svmkit.validator.Agave("avalidator", { 23 | runnerConfig: { 24 | aptLockTimeout: 200, 25 | packageConfig: { 26 | additional: ["anewpackage", "jq"], 27 | override: [ 28 | { 29 | name: "svmkit-agave-validator", 30 | path: "./build/svmkit-agave-validator.deb", 31 | }, 32 | { 33 | name: "anewpackage", 34 | path: "./build/my-new-package.deb", 35 | }, 36 | { 37 | name: "jq", 38 | version: "1.6-2.1", 39 | }, 40 | ], 41 | }, 42 | }, 43 | }); 44 | ``` 45 | -------------------------------------------------------------------------------- /bare-metal-fd-ts/index.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from "@pulumi/pulumi"; 2 | import * as svmkit from "@svmkit/pulumi-svmkit"; 3 | 4 | const solanaConfig = new pulumi.Config("solana"); 5 | const remote = new pulumi.Config("remote"); 6 | 7 | // Lookup information about the Solana network. 8 | const networkName = 9 | solanaConfig.get("network") ?? 10 | svmkit.solana.NetworkName.Testnet; 11 | const networkInfo = svmkit.networkinfo.getNetworkInfoOutput({ networkName }); 12 | 13 | // Create some keys for this validator to use. 14 | export const validatorKey = new svmkit.KeyPair("validator-key").json; 15 | export const voteAccountKey = new svmkit.KeyPair("vote-account-key").json; 16 | export const withdrawerKey = new svmkit.KeyPair("withdrawer-key").json; 17 | 18 | // Point pulumi-svmkit at the configured machine's SSH connection. 19 | const connection = { 20 | host: remote.require("host"), 21 | user: remote.require("user"), 22 | privateKey: remote.requireSecret("privateKey"), 23 | }; 24 | 25 | // Configure the instance for SVMKit 26 | const machine = new svmkit.machine.Machine("machine", { 27 | connection, 28 | }); 29 | 30 | // Instantiate a new Firedancer instance on the machine. 31 | new svmkit.validator.Firedancer( 32 | "fd", 33 | { 34 | connection, 35 | keyPairs: { 36 | identity: validatorKey, 37 | voteAccount: voteAccountKey, 38 | }, 39 | config: { 40 | user: "sol", 41 | gossip: { 42 | host: connection.host, 43 | entrypoints: networkInfo.entryPoint, 44 | }, 45 | consensus: { 46 | identityPath: "/home/sol/validator-keypair.json", 47 | voteAccountPath: "/home/sol/vote-account-keypair.json", 48 | knownValidators: networkInfo.knownValidator, 49 | expectedGenesisHash: networkInfo.genesisHash, 50 | }, 51 | ledger: { 52 | path: "/home/sol/ledger", 53 | accountsPath: "/home/sol/accounts", 54 | }, 55 | rpc: { 56 | port: 8899, 57 | private: true, 58 | }, 59 | }, 60 | }, 61 | { 62 | dependsOn: [machine], 63 | }, 64 | ); 65 | 66 | // Expose information required to SSH to the validator host. 67 | export const nodes = [ 68 | { 69 | name: "instance", 70 | connection, 71 | }, 72 | ]; 73 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SVMKit Examples 2 | 3 | Welcome to the **SVMKit Examples** repository! This collection of examples is designed to help developers gain a deeper understanding of **SVMKit** and explore ways to integrate it into various workflows. 4 | 5 | ## About This Repository 6 | 7 | This repository acts as a central hub for examples of how to use [SVMKit](https://github.com/abklabs/svmkit?tab=readme-ov-file#svmkit) to build both simple and complex Solana installations with ease. Here, you’ll find a range of example implementations, each demonstrating essential concepts and methods for utilizing **SVMKit**. While this repository does not cover every possible use case, the examples provided aim to serve as a solid foundation for getting started and exploring custom applications with SVMKit. 8 | 9 | Here are some examples of how to use SVMKit in various scenarios: 10 | 11 | ## Examples 12 | 13 | - Setting up a Solana Permissioned Environment [(SPE) on AWS](https://github.com/abklabs/svmkit-examples/tree/main/aws-network-spe-py) 14 | - Setting up a Validator to join [testnet](https://github.com/abklabs/pulumi-svmkit/tree/main/examples) - this is an example for developers hacking on the codebase - we'll add an example for node operators soon. 15 | 16 | ## Useful Links 17 | Until this repository is further populated, we recommend checking out: 18 | - [SVMKit](https://github.com/abklabs/svmkit?tab=readme-ov-file#svmkit) - Our deployment-agnostic tooling for building Solana validators and Solana Permissioned Environments (SPEs). 19 | - [Pulumi-SVMKit](https://github.com/abklabs/pulumi-svmkit?tab=readme-ov-file#pulumi-svmkit) - A Pulumi provider built around SVMKit. 20 | - [Pulumi Basics](https://github.com/abklabs/svmkit-examples/blob/main/PULUMI.md) - A quick guide on how to use Pulumi. 21 | 22 | ## Getting Started 23 | 24 | Each example includes: 25 | - **Setup instructions** to configure your environment. 26 | - **Code samples** showcasing specific SVMKit functionalities. 27 | - **Comments and tips** to highlight key integration points and best practices. 28 | 29 | Whether you’re new to SVMKit or looking to expand your knowledge, we hope this repository provides valuable insights and a starting point for your own implementations. 30 | 31 | ## Contributing 32 | 33 | We welcome contributions! If you have examples or improvements that could benefit other developers, please feel free to open a pull request. 34 | -------------------------------------------------------------------------------- /aws-validator-agave-ts/aws.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from "@pulumi/pulumi"; 2 | import * as aws from "@pulumi/aws"; 3 | import * as tls from "@pulumi/tls"; 4 | 5 | const nodeConfig = new pulumi.Config("node"); 6 | const instanceType = nodeConfig.get("instanceType") ?? "r6id.8xlarge"; 7 | const instanceArch = nodeConfig.get("instanceArch") ?? "x86_64"; 8 | const instanceAmi = nodeConfig.get("instanceAmi"); 9 | const iops = nodeConfig.getNumber("volumeIOPS") ?? 5000; 10 | export const instanceUser = nodeConfig.get("user") ?? "admin"; 11 | 12 | const rootVolumeSize = nodeConfig.getNumber("rootVolumeSize") ?? 32; 13 | 14 | // Setup a local SSH private key, stored inside Pulumi. 15 | export const sshKey = new tls.PrivateKey("ssh-key", { 16 | algorithm: "ED25519", 17 | }); 18 | 19 | const keyPair = new aws.ec2.KeyPair("keypair", { 20 | publicKey: sshKey.publicKeyOpenssh, 21 | }); 22 | 23 | // Get AMI information on the latest Debian image inside AWS. 24 | const ami = 25 | instanceAmi ?? 26 | pulumi.output( 27 | aws.ec2.getAmi({ 28 | filters: [ 29 | { 30 | name: "name", 31 | values: ["debian-12-*"], 32 | }, 33 | { 34 | name: "architecture", 35 | values: [instanceArch], 36 | }, 37 | ], 38 | owners: ["136693071363"], // Debian 39 | mostRecent: true, 40 | }), 41 | ).id; 42 | 43 | const securityGroup = new aws.ec2.SecurityGroup("security-group", { 44 | description: "Allow SSH and specific inbound traffic", 45 | ingress: [ 46 | { 47 | protocol: "tcp", 48 | fromPort: 22, 49 | toPort: 22, 50 | cidrBlocks: ["0.0.0.0/0"], 51 | }, 52 | { 53 | protocol: "tcp", 54 | fromPort: 8000, 55 | toPort: 8020, 56 | cidrBlocks: ["0.0.0.0/0"], 57 | }, 58 | { 59 | protocol: "udp", 60 | fromPort: 8000, 61 | toPort: 8020, 62 | cidrBlocks: ["0.0.0.0/0"], 63 | }, 64 | ], 65 | egress: [ 66 | { 67 | protocol: "-1", 68 | fromPort: 0, 69 | toPort: 0, 70 | cidrBlocks: ["0.0.0.0/0"], 71 | }, 72 | ], 73 | }); 74 | 75 | export const instance = new aws.ec2.Instance("instance", { 76 | ami, 77 | instanceType, 78 | keyName: keyPair.keyName, 79 | vpcSecurityGroupIds: [securityGroup.id], 80 | rootBlockDevice: { 81 | volumeSize: rootVolumeSize, 82 | volumeType: "gp3", 83 | }, 84 | ebsBlockDevices: [ 85 | { 86 | deviceName: "/dev/sdf", 87 | volumeSize: 500, 88 | volumeType: "io2", 89 | iops: iops, 90 | }, 91 | { 92 | deviceName: "/dev/sdg", 93 | volumeSize: 1024, 94 | volumeType: "io2", 95 | iops: iops, 96 | }, 97 | ], 98 | userData: `#!/bin/bash 99 | mkfs -t ext4 /dev/sdf 100 | mkfs -t ext4 /dev/sdg 101 | mkdir -p /home/sol/accounts 102 | mkdir -p /home/sol/ledger 103 | cat <> /etc/fstab 104 | /dev/sdf /home/sol/accounts ext4 defaults 0 0 105 | /dev/sdg /home/sol/ledger ext4 defaults 0 0 106 | EOF 107 | systemctl daemon-reload 108 | mount -a 109 | `, 110 | tags: { 111 | Name: `${pulumi.getStack()}-validator`, 112 | }, 113 | }); 114 | -------------------------------------------------------------------------------- /aws-validator-xen-ts/aws.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from "@pulumi/pulumi"; 2 | import * as aws from "@pulumi/aws"; 3 | import * as tls from "@pulumi/tls"; 4 | 5 | const nodeConfig = new pulumi.Config("node"); 6 | const instanceType = nodeConfig.get("instanceType") ?? "t3.2xlarge"; 7 | const instanceArch = nodeConfig.get("instanceArch") ?? "x86_64"; 8 | const instanceAmi = nodeConfig.get("instanceAmi"); 9 | const iops = nodeConfig.getNumber("volumeIOPS") ?? 5000; 10 | export const instanceUser = nodeConfig.get("user") ?? "admin"; 11 | 12 | const rootVolumeSize = nodeConfig.getNumber("rootVolumeSize") ?? 32; 13 | 14 | // Setup a local SSH private key, stored inside Pulumi. 15 | export const sshKey = new tls.PrivateKey("ssh-key", { 16 | algorithm: "ED25519", 17 | }); 18 | 19 | const keyPair = new aws.ec2.KeyPair("keypair", { 20 | publicKey: sshKey.publicKeyOpenssh, 21 | }); 22 | 23 | // Get AMI information on the latest Debian image inside AWS. 24 | const ami = 25 | instanceAmi ?? 26 | pulumi.output( 27 | aws.ec2.getAmi({ 28 | filters: [ 29 | { 30 | name: "name", 31 | values: ["debian-12-*"], 32 | }, 33 | { 34 | name: "architecture", 35 | values: [instanceArch], 36 | }, 37 | ], 38 | owners: ["136693071363"], // Debian 39 | mostRecent: true, 40 | }), 41 | ).id; 42 | 43 | const securityGroup = new aws.ec2.SecurityGroup("security-group", { 44 | description: "Allow SSH and specific inbound traffic", 45 | ingress: [ 46 | { 47 | protocol: "tcp", 48 | fromPort: 22, 49 | toPort: 22, 50 | cidrBlocks: ["0.0.0.0/0"], 51 | }, 52 | { 53 | protocol: "tcp", 54 | fromPort: 8000, 55 | toPort: 8020, 56 | cidrBlocks: ["0.0.0.0/0"], 57 | }, 58 | { 59 | protocol: "udp", 60 | fromPort: 8000, 61 | toPort: 8020, 62 | cidrBlocks: ["0.0.0.0/0"], 63 | }, 64 | ], 65 | egress: [ 66 | { 67 | protocol: "-1", 68 | fromPort: 0, 69 | toPort: 0, 70 | cidrBlocks: ["0.0.0.0/0"], 71 | }, 72 | ], 73 | }); 74 | 75 | export const instance = new aws.ec2.Instance("instance", { 76 | ami, 77 | instanceType, 78 | keyName: keyPair.keyName, 79 | vpcSecurityGroupIds: [securityGroup.id], 80 | rootBlockDevice: { 81 | volumeSize: rootVolumeSize, 82 | volumeType: "gp3", 83 | }, 84 | ebsBlockDevices: [ 85 | { 86 | deviceName: "/dev/sdf", 87 | volumeSize: 100, 88 | volumeType: "io2", 89 | iops: iops, 90 | }, 91 | { 92 | deviceName: "/dev/sdg", 93 | volumeSize: 500, 94 | volumeType: "io2", 95 | iops: iops, 96 | }, 97 | ], 98 | userData: `#!/bin/bash 99 | mkfs -t ext4 /dev/sdf 100 | mkfs -t ext4 /dev/sdg 101 | mkdir -p /home/sol/accounts 102 | mkdir -p /home/sol/ledger 103 | cat <> /etc/fstab 104 | /dev/sdf /home/sol/accounts ext4 defaults 0 0 105 | /dev/sdg /home/sol/ledger ext4 defaults 0 0 106 | EOF 107 | systemctl daemon-reload 108 | mount -a 109 | `, 110 | tags: { 111 | Name: `${pulumi.getStack()}-validator`, 112 | Stack: pulumi.getStack(), 113 | }, 114 | }); 115 | -------------------------------------------------------------------------------- /guides/FAULT-TOLERANCE.md: -------------------------------------------------------------------------------- 1 | # Cluster Fault Tolerance Guide for SVM Networks 2 | 3 | ## Introduction 4 | 5 | This guide explains how stake distribution impacts fault tolerance, and how to calculate the maximum allowable node failures while maintaining consensus in an SVM network. 6 | 7 | ## Consensus and Supermajority 8 | 9 | ### What is Consensus? 10 | 11 | Consensus is the minimum percentage of total stake required for a network to continue producing blocks. In Solana-based networks, consensus is 66.67% (2/3) of total stake. If stake controlled by active validators falls below 66.67%, consensus is lost, and the network halts. 12 | 13 | ### What is Supermajority? 14 | 15 | A supermajority is required to agree on the slot for a hard-fork networks. The supermajority is defined as 80% and is configured as a constant within the validator binary. 16 | 17 | ### Calculating Maximum Node Failures 18 | 19 | To maintain consensus, the remaining stake must be at least 66.67% of the total stake. 20 | 21 | #### Example: 5-Node Cluster 22 | 23 | - Stake is evenly distributed (each validator holds 20% of total stake). 24 | - Consensus requires 4 nodes. 25 | - 1 node can go down safely, but if 2 nodes fail, the network halts. 26 | 27 | #### Example: 7-Node Cluster 28 | 29 | - Each validator has 1/7th (≈14.29%) of the stake. 30 | - To maintain 66.67%, at least 5 nodes must remain online. 31 | - Up to 2 nodes can fail without halting the network. 32 | 33 | ## Reference Table 34 | 35 | | Total Nodes | Minimum Nodes Required | Max Node Failures Allowed | Active Stake (%) | 36 | | ----------- | ---------------------- | ------------------------- | ---------------- | 37 | | 3 | 3 | 0 | 100 | 38 | | 4 | 3 | 1 | 75 | 39 | | 5 | 4 | 1 | 80 | 40 | | 6 | 4 | 2 | 66.67 | 41 | | 7 | 5 | 2 | 71.43 | 42 | | 8 | 6 | 2 | 75 | 43 | | 9 | 6 | 3 | 66.67 | 44 | | 10 | 7 | 3 | 70 | 45 | 46 | ## Best Practices for Fault Tolerance 47 | 48 | 1. **Minimum of 4 Validators:** 49 | - Avoid 3-node clusters as any single failure halts the network. 50 | - Running 5+ validators improves resilience. 51 | 2. **Distribute Stake Evenly:** 52 | - Uneven stake can create single points of failure. 53 | 3. **Monitor Validator Health:** 54 | - Use solana validators to check for delinquent nodes. 55 | - Example of a delinquent node: 56 | ``` 57 | ⚠️ ValidatorX VoteAccountXYZ 100% 3245 3214 100.00% 0 unknown 9.999999344 SOL (20.00%) 58 | ``` 59 | 60 | ## Conclusion 61 | 62 | - Consensus requires 66.67% of the total stake. 63 | - A supermajority, set at 80% of the stake, is necessary for the network to approve a hard-fork. 64 | - A 3-node cluster is vulnerable to a single node failure. 65 | - Recommended practice: Operate with a minimum of 4 validators and ensure even stake distribution. 66 | -------------------------------------------------------------------------------- /aws-validator-fd-ts/aws.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from "@pulumi/pulumi"; 2 | import * as aws from "@pulumi/aws"; 3 | import * as tls from "@pulumi/tls"; 4 | 5 | const nodeConfig = new pulumi.Config("node"); 6 | const instanceType = nodeConfig.get("instanceType") ?? "r7a.8xlarge"; 7 | const instanceArch = nodeConfig.get("instanceArch") ?? "x86_64"; 8 | const instanceAmi = nodeConfig.get("instanceAmi"); 9 | const iops = nodeConfig.getNumber("volumeIOPS") ?? 5000; 10 | export const instanceUser = nodeConfig.get("user") ?? "admin"; 11 | 12 | const rootVolumeSize = nodeConfig.getNumber("rootVolumeSize") ?? 32; 13 | 14 | // Setup a local SSH private key, stored inside Pulumi. 15 | export const sshKey = new tls.PrivateKey("ssh-key", { 16 | algorithm: "ED25519", 17 | }); 18 | 19 | const keyPair = new aws.ec2.KeyPair("keypair", { 20 | publicKey: sshKey.publicKeyOpenssh, 21 | }); 22 | 23 | // Get AMI information on the latest Debian image inside AWS. 24 | const ami = 25 | instanceAmi ?? 26 | pulumi.output( 27 | aws.ec2.getAmi({ 28 | filters: [ 29 | { 30 | name: "name", 31 | values: ["debian-12-*"], 32 | }, 33 | { 34 | name: "architecture", 35 | values: [instanceArch], 36 | }, 37 | ], 38 | owners: ["136693071363"], // Debian 39 | mostRecent: true, 40 | }), 41 | ).id; 42 | 43 | const securityGroup = new aws.ec2.SecurityGroup("security-group", { 44 | description: "Allow SSH and specific inbound traffic", 45 | ingress: [ 46 | { 47 | protocol: "tcp", 48 | fromPort: 22, 49 | toPort: 22, 50 | cidrBlocks: ["0.0.0.0/0"], 51 | }, 52 | { 53 | protocol: "tcp", 54 | fromPort: 8000, 55 | toPort: 8020, 56 | cidrBlocks: ["0.0.0.0/0"], 57 | }, 58 | { 59 | protocol: "udp", 60 | fromPort: 8000, 61 | toPort: 8020, 62 | cidrBlocks: ["0.0.0.0/0"], 63 | }, 64 | { 65 | protocol: "udp", 66 | fromPort: 8900, 67 | toPort: 8920, 68 | cidrBlocks: ["0.0.0.0/0"], 69 | }, 70 | ], 71 | egress: [ 72 | { 73 | protocol: "-1", 74 | fromPort: 0, 75 | toPort: 0, 76 | cidrBlocks: ["0.0.0.0/0"], 77 | }, 78 | ], 79 | }); 80 | 81 | export const instance = new aws.ec2.Instance("instance", { 82 | ami, 83 | instanceType, 84 | keyName: keyPair.keyName, 85 | vpcSecurityGroupIds: [securityGroup.id], 86 | rootBlockDevice: { 87 | volumeSize: rootVolumeSize, 88 | volumeType: "gp3", 89 | }, 90 | ebsBlockDevices: [ 91 | { 92 | deviceName: "/dev/sdf", 93 | volumeSize: 500, 94 | volumeType: "io2", 95 | iops: iops, 96 | }, 97 | { 98 | deviceName: "/dev/sdg", 99 | volumeSize: 1024, 100 | volumeType: "io2", 101 | iops: iops, 102 | }, 103 | ], 104 | userData: `#!/bin/bash 105 | mkfs -t ext4 /dev/sdf 106 | mkfs -t ext4 /dev/sdg 107 | mkdir -p /home/sol/accounts 108 | mkdir -p /home/sol/ledger 109 | cat <> /etc/fstab 110 | /dev/sdf /home/sol/accounts ext4 defaults 0 0 111 | /dev/sdg /home/sol/ledger ext4 defaults 0 0 112 | EOF 113 | systemctl daemon-reload 114 | mount -a 115 | `, 116 | tags: { 117 | Name: `${pulumi.getStack()}-validator`, 118 | }, 119 | }); 120 | -------------------------------------------------------------------------------- /guides/VALIDATOR-VERSION.md: -------------------------------------------------------------------------------- 1 | # Identify and Update Validator Version 2 | 3 | This guide walks you through determining your validator’s current version and updating it to the latest release. 4 | 5 | > **Note:** This guide is tailored for the Agave validator. System service names may vary depending on the specific flavor and variant. 6 | 7 | 1. Check the Installed Validator Version 8 | 9 | To check the currently installed version of the validator, run: 10 | 11 | ``` 12 | sudo apt info svmkit-agave-validator 13 | ``` 14 | 15 | Example output: 16 | 17 | ``` 18 | Package: svmkit-agave-validator 19 | Version: 1.18.26-1 20 | Priority: optional 21 | Maintainer: Engineering 22 | Installed-Size: 120 MB 23 | Provides: svmkit-validator 24 | Depends: libc6 (>= 2.34), libstdc++6 (>= 12) 25 | Conflicts: svmkit-validator 26 | Replaces: svmkit-validator 27 | Homepage: https://anza.xyz/ 28 | Vcs-Browser: https://github.com/anza-xyz/agave 29 | Vcs-Git: https://github.com/anza-xyz/agave 30 | Download-Size: 32.6 MB 31 | APT-Sources: https://apt.abklabs.com/svmkit dev/main amd64 Packages 32 | Description: Blockchain, Rebuilt for Scale 33 | ``` 34 | 35 | The Version field shows the currently installed version (e.g., 1.18.26-1). 36 | 37 | 2. Check for Available Updates 38 | 39 | To determine the latest available version: 40 | 41 | sudo apt list svmkit-agave-validator 42 | 43 | Example output: 44 | 45 | Listing... Done 46 | svmkit-agave-validator/dev 2.2.0-1 amd64 [upgradable from: 1.18.26-1] 47 | N: There are 24 additional versions. Please use the '-a' switch to see them. 48 | 49 | If an upgrade is available, it will be indicated as [upgradable from: ]. 50 | 51 | To see all available versions: 52 | 53 | ``` 54 | sudo apt list -a svmkit-agave-validator 55 | ``` 56 | 57 | 3. Update the Validator 58 | 59 | If an update is available, upgrade to the latest version. 60 | 61 | Option 1: Directly Update via APT 62 | 63 | sudo apt update && sudo apt install svmkit-agave-validator 64 | 65 | This will download and install the latest version from the repository. 66 | 67 | Option 2 (recommended): Update via Pulumi 68 | 69 | If you manage your validator using Pulumi, update the version in the configuration and apply the changes. 70 | 71 | Modify the validator component to specify the new version: 72 | 73 | ```typescrpt 74 | new svmkit.validator.Agave("validator", { 75 | connection, 76 | version: "2.2.0-1", 77 | environment: {}, 78 | keyPairs: { 79 | identity: validatorKey.json, 80 | voteAccount: voteAccountKey.json, 81 | }, 82 | flags: {}, 83 | }, { 84 | dependsOn: [instance], 85 | }); 86 | ``` 87 | 88 | Apply the changes: 89 | 90 | ``` 91 | pulumi up 92 | ``` 93 | 94 | This will install the updated validator version and restart the validator automatically. 95 | 96 | 4. Verify the Update 97 | 98 | After updating, confirm that the validator is running the new version: 99 | 100 | ``` 101 | sudo apt info svmkit-agave-validator 102 | ``` 103 | 104 | Ensure the Version field matches the expected update. 105 | 106 | Additionally, check the validator process: 107 | 108 | ``` 109 | systemctl status svmkit-agave-validator 110 | ``` 111 | 112 | If the validator is not running, restart it: 113 | 114 | ``` 115 | sudo systemctl restart svmkit-agave-validator 116 | ``` 117 | 118 | 5. Confirm Validator Participation 119 | 120 | After updating, ensure your validator is actively participating in the network: 1. Check voting status: 121 | 122 | Check synchronization: 123 | 124 | ``` 125 | solana catchup --our-localhost 126 | ``` 127 | -------------------------------------------------------------------------------- /PULUMI-AWS.md: -------------------------------------------------------------------------------- 1 | # PULUMI setup with AWS 2 | 3 | Pulumi is an Infrastructure as Code (IaC) platform that allows you to define, deploy, and manage cloud infrastructure using familiar programming languages. This guide will walk you through using Pulumi in AWS CloudShell with a Amazon Simple Storage Service (S3) backend, to store state files on cloud. 4 | 5 | ## Setup 6 | 7 | 1. Open and configure [AWS CloudShell](https://aws.amazon.com/cloudshell/) to follow the deployment process: 8 | 9 | - On the navigation bar, [choose the CloudShell icon](https://docs.aws.amazon.com/cloudshell/latest/userguide/getting-started.html#launch-region-shell). 10 | 11 | - Add a new user with larger storage quota to perform smooth installation: 12 | 13 | ```bash 14 | sudo useradd -m -G cloudshell-user bcuser 15 | ``` 16 | - Configure sudo for the new user: 17 | 18 | ```bash 19 | sudo visudo -f /etc/sudoers.d/bcuser 20 | ``` 21 | - Add the following text to allow sudo without password for the new user: 22 | 23 | ```bash 24 | bcuser ALL=(ALL) NOPASSWD: ALL 25 | ``` 26 | - Hit `Ctrl+x`, `y`, and `Enter` 27 | 28 | 2. Install `pulumi` in your AWS CloudShell environment ([Official docs to install Pulumi on AWS](https://www.pulumi.com/docs/iac/get-started/aws/)] 29 | 30 | ```bash 31 | sudo su bcuser 32 | cd 33 | curl -fsSL https://get.pulumi.com | sh 34 | source ~/.bashrc 35 | pulumi version 36 | ``` 37 | 38 | - You should see something like this: 39 | ``` 40 | v3.140.0 41 | ``` 42 | 43 | 3. Create a new S3 bucket to store your Pulumi configuration: 44 | 45 | ```bash 46 | export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 47 | aws s3 mb s3://pulumi-config-$AWS_ACCOUNT_ID-$AWS_REGION 48 | ``` 49 | 50 | 4. Configure your pulumi to use the new S3 bucket: 51 | 52 | ```bash 53 | pulumi login s3://pulumi-config-$AWS_ACCOUNT_ID-$AWS_REGION 54 | ``` 55 | 56 | 5. Set the passphrase into an new environment variable for this session: 57 | 58 | ```bash 59 | export PULUMI_CONFIG_PASSPHRASE= 60 | ``` 61 | 62 | 6. (Optional) Install Solana CLI if you'd like to run `token-demo` script 63 | 64 | - Set the Solana version 65 | 66 | ```bash 67 | export SOLANA_VERSION=1.18.26 68 | ``` 69 | 70 | - Download binaries and set up the PATH variable 71 | ```bash 72 | sudo yum install -y bzip2 73 | cd 74 | curl -sSfL https://github.com/solana-labs/solana/releases/download/v$SOLANA_VERSION/solana-release-x86_64-unknown-linux-gnu.tar.bz2 -o solana.tar.bz2 75 | tar --bzip2 -xf solana.tar.bz2 76 | cd solana-release/ 77 | export PATH=$PWD/bin:$PATH 78 | solana --version 79 | ``` 80 | 81 | - You should see something like this: 82 | 83 | ```bash 84 | solana-cli 1.18.26 (src:d9f20e95; feat:3241752014, client:SolanaLabs) 85 | ``` 86 | 87 | 7. Clone this repository and change directory to the AWS blueprint: 88 | 89 | ```bash 90 | cd 91 | git clone https://github.com/abklabs/svmkit-examples.git 92 | cd svmkit-examples/aws-network-spe-py/ 93 | ``` 94 | 95 | 8. Continue from **Step 1** in [Solana Permissioned Environment Inside an AWS VPC](aws-network-spe-py/README.md) 96 | 97 | ## Tear down 98 | 99 | 1. Delete all created AWS resources 100 | 101 | ```bash 102 | pulumi down 103 | ``` 104 | 105 | - Select `yes` 106 | 107 | 2. Clear and delete the S3 bucket used for Pulumi configuration 108 | 109 | ```bash 110 | export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 111 | aws s3 rm s3://pulumi-config-$AWS_ACCOUNT_ID-$AWS_REGION --recursive 112 | aws s3 rb s3://pulumi-config-$AWS_ACCOUNT_ID-$AWS_REGION 113 | ``` 114 | 115 | 3. To delete the currently open AWS CloudShell environment, in top-right corner choose **Actions** > **Delete** 116 | -------------------------------------------------------------------------------- /PULUMI.md: -------------------------------------------------------------------------------- 1 | # Pulumi 2 | 3 | Pulumi is an Infrastructure as Code (IaC) platform that allows you to define, deploy, and manage cloud infrastructure using familiar programming languages. This guide will walk you through using Pulumi with a local backend, which stores state files on your local filesystem. 4 | 5 | ## Setup 6 | 7 | Before using Pulumi, ensure you have: 8 | 9 | • The Pulumi CLI installed on your system. Follow the installation instructions at [Pulumi Installation](https://www.pulumi.com/docs/iac/download-install/). 10 | 11 | ## Workflow 12 | 13 | 1. Log In to Pulumi 14 | 15 | To store state locally, use the --local option when logging in to Pulumi. Run the following command in your terminal: 16 | 17 | ```bash 18 | pulumi login --local 19 | Enter your passphrase to protect config/secrets: 20 | Re-enter your passphrase to confirm: 21 | Created stack 'workstation' 22 | ``` 23 | 24 | This command sets up Pulumi to save your stack's state files on your local filesystem, usually in the ~/.pulumi directory. This is useful if you choose not to use the Pulumi Service. You will be asked to enter a passphrase to encrypt secrets. This passphrase will be required whenever Pulumi needs to access the stack's state. To avoid being prompted for the passphrase you can set it in advance with: 25 | 26 | ```bash 27 | export PULUMI_CONFIG_PASSPHRASE="" 28 | ``` 29 | 30 | 2. Create a Stack 31 | 32 | In Pulumi, a stack represents a unique instance of your infrastructure configuration, such as dev, staging, or production. To create a new stack with local state, run: 33 | 34 | ```bash 35 | pulumi stack init 36 | ``` 37 | 38 | Replace with a name that identifies the environment, such as dev or production. This command initializes a new stack and stores the state file locally. 39 | 40 | You can list all stacks for the current project with: 41 | 42 | ```bash 43 | pulumi stack ls 44 | ``` 45 | 46 | 3. Configure a stack 47 | 48 | Once your stack is created, configure environment-specific settings. Pulumi uses configuration settings to manage details for each stack, such as region and credentials. 49 | 50 | a. Set your AWS region (replace with the desired AWS region, such as us-west-2): 51 | 52 | ```bash 53 | pulumi config set aws:region 54 | ``` 55 | 56 | b. To view all configuration settings for the current stack: 57 | 58 | ```bash 59 | pulumi config 60 | ``` 61 | 62 | For more details on configuring Pulumi for AWS, refer to the [Pulumi AWS Provider Configuration](https://github.com/pulumi/pulumi-aws?tab=readme-ov-file#configuration). 63 | 64 | 4. Preview and Deploy 65 | 66 | Once your code and configuration are set up, you can preview and deploy your changes. 67 | 68 | a. Preview the changes Pulumi will make to your cloud infrastructure: 69 | 70 | ```bash 71 | pulumi preview 72 | ``` 73 | 74 | This command shows the proposed changes without actually applying them. 75 | 76 | b. Deploy the changes to your infrastructure: 77 | 78 | ```bash 79 | pulumi up 80 | ``` 81 | 82 | Pulumi will apply the changes and output the results, including resource creation, updates, or deletions. 83 | 84 | 5. Destroy a stack 85 | 86 | To destroy your stack and clean up resources: 87 | 88 | ```bash 89 | pulumi destroy 90 | ``` 91 | 92 | ## FAQ 93 | 94 | 1. Can I choose where to store my Pulumi state? 95 | Absolutely. Pulumi allows you to store state in various blob storage providers. For details on available backends and configuration options, see the [State Backends documentation](https://www.pulumi.com/docs/iac/concepts/state-and-backends/). 96 | 97 | 2. Can I use a custom provider for managing secrets? 98 | Yes, Pulumi supports several popular secret management providers. For more information on available options, refer to the [Secrets Provider documentation](https://www.pulumi.com/docs/iac/cli/commands/pulumi_stack_change-secrets-provider/). 99 | -------------------------------------------------------------------------------- /guides/OPERATING-VALIDATOR.md: -------------------------------------------------------------------------------- 1 | # Setting Up a Solana Validator for Staking 2 | 3 | This guide shows how to convert an existing [non-voting Solana validator](https://github.com/abklabs/svmkit-examples/tree/main/aws-validator-agave-ts) into one that participates in network consensus with staking capabilities. We'll build on the AWS validator Agave TypeScript example from the svmkit-examples repository. 4 | 5 | ## Overview 6 | 7 | This guide provides step-by-step instructions to enable voting and staking on your validator by: 8 | 1. Adding necessary keypairs for funding and staking 9 | 2. Creating and configuring vote and stake accounts 10 | 3. Enabling voting functionality 11 | 4. Setting up stake delegation 12 | 13 | ## Step-by-Step Implementation 14 | 15 | ### Step 1: Add Treasury and Stake Account Keypairs 16 | 17 | Add these keypairs to your existing configuration: 18 | 19 | ```typescript 20 | // Add a treasury key for funding validator operations 21 | const treasuryKey = new svmkit.KeyPair("treasury-key"); 22 | // Create a keypair for stake account 23 | const stakeAccountKey = new svmkit.KeyPair("stake-account-key"); 24 | 25 | // Export public keys for funding 26 | export const treasuryPublicKey = treasuryKey.publicKey; 27 | export const stakeAccountPublicKey = stakeAccountKey.publicKey; 28 | export const validatorPublicKey = validatorKey.publicKey; 29 | ``` 30 | 31 | Deploy this initial change to generate the keypairs: 32 | ```bash 33 | pulumi up 34 | ``` 35 | 36 | ### Step 2: Retrieve and Fund Account Addresses 37 | 38 | Get the addresses that need funding: 39 | ```bash 40 | pulumi stack output treasuryPublicKey 41 | pulumi stack output validatorPublicKey 42 | ``` 43 | 44 | Fund both accounts before continuing: 45 | - Fund the treasury address with at least 0.3 SOL (for creating stake account) 46 | - Ensure the validator has at least 0.1 SOL (for vote account creation) 47 | 48 | You can use [Solana faucets](https://solana.com/developers/guides/getstarted/solana-token-airdrop-and-faucets) for devnet testing. 49 | 50 | ### Step 3: Create Vote Account 51 | 52 | After funding, add the vote account configuration: 53 | 54 | ```typescript 55 | // Create vote account 56 | const voteAccount = new svmkit.account.VoteAccount("validator-vote-account", { 57 | connection: connection, 58 | keyPairs: { 59 | identity: validatorKey.json, 60 | voteAccount: voteAccountKey.json, 61 | authWithdrawer: treasuryKey.json, // Treasury key as withdraw authority 62 | }, 63 | }, { dependsOn: [instance] }); 64 | 65 | // Export vote account public key 66 | export const voteAccountPublicKey = voteAccountKey.publicKey; 67 | ``` 68 | 69 | ### Step 4: Configure Stake Account 70 | 71 | With the vote account defined, add the stake account setup: 72 | 73 | ```typescript 74 | // Create and delegate stake account 75 | const stakeAccount = new svmkit.account.StakeAccount("validator-stake-account", { 76 | connection: connection, 77 | transactionOptions: { 78 | keyPair: treasuryKey.json, // Treasury funds the stake account 79 | }, 80 | keyPairs: { 81 | stakeAccount: stakeAccountKey.json, 82 | voteAccount: voteAccountKey.json, // Delegate to your vote account 83 | }, 84 | amount: 0.2, // Stake amount in SOL - adjust based on available funds 85 | }, { dependsOn: [voteAccount] }); 86 | ``` 87 | 88 | ### Step 5: Enable Voting on Validator 89 | 90 | Find your Agave validator configuration and change `noVoting` from `true` to `false`: 91 | 92 | ```typescript 93 | flags: { 94 | // Other flags remain unchanged 95 | noVoting: false, // Change from true to false to enable voting 96 | } 97 | ``` 98 | 99 | ### Step 6: Deploy Complete Configuration 100 | 101 | Deploy all changes: 102 | ```bash 103 | pulumi up 104 | ``` 105 | 106 | ## Verifying Your Setup 107 | 108 | After successful deployment, verify your configuration: 109 | 110 | ```bash 111 | # Check validator balance 112 | solana balance $(pulumi stack output validatorPublicKey) 113 | 114 | # Verify vote account exists and is properly configured 115 | solana vote-account $(pulumi stack output voteAccountPublicKey) 116 | 117 | # Check stake account and delegation 118 | solana stake-account $(pulumi stack output stakeAccountPublicKey) 119 | ``` 120 | 121 | ## Conclusion 122 | 123 | Your Solana validator is now ready to participate in consensus and can receive stake delegations. You've successfully converted a non-voting validator to an active network participant. -------------------------------------------------------------------------------- /aws-validator-fd-ts/index.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from "@pulumi/pulumi"; 2 | import * as svmkit from "@svmkit/pulumi-svmkit"; 3 | 4 | const firewallConfig = new pulumi.Config("firewall"); 5 | const solanaConfig = new pulumi.Config("solana"); 6 | const tunerConfig = new pulumi.Config("tuner"); 7 | 8 | // AWS-specific resources are created inside. 9 | import { sshKey, instance, instanceUser } from "./aws"; 10 | 11 | // Lookup information about the Solana network 12 | const networkName = 13 | solanaConfig.get("network") ?? 14 | svmkit.solana.NetworkName.Testnet; 15 | const networkInfo = svmkit.networkinfo.getNetworkInfoOutput({ networkName }); 16 | 17 | // Create some keys for this validator to use. 18 | export const validatorKey = new svmkit.KeyPair("validator-key").json; 19 | export const voteAccountKey = new svmkit.KeyPair("vote-account-key").json; 20 | export const withdrawerKey = new svmkit.KeyPair("withdrawer-key").json; 21 | 22 | // Point pulumi-svmkit at the AWS EC2 instance's SSH connection. 23 | const connection = { 24 | host: instance.publicDns, 25 | user: instanceUser, 26 | privateKey: sshKey.privateKeyOpenssh, 27 | }; 28 | 29 | // Configure the instance for SVMKit 30 | const machine = new svmkit.machine.Machine( 31 | "machine", 32 | { 33 | connection, 34 | }, 35 | { 36 | dependsOn: [instance], 37 | }, 38 | ); 39 | 40 | // Firewall setup 41 | const firewallVariant = 42 | firewallConfig.get("variant") ?? 43 | svmkit.firewall.FirewallVariant.Generic; 44 | 45 | // Retrieve the default firewall parameters for that variant 46 | const genericFirewallParamsOutput = 47 | svmkit.firewall.getDefaultFirewallParamsOutput({ 48 | variant: firewallVariant, 49 | }); 50 | 51 | // "Apply" those params so we can pass them to the Firewall constructor 52 | const firewallParams = genericFirewallParamsOutput.apply((f) => ({ 53 | allowPorts: [ 54 | ...(f.allowPorts ?? []), 55 | "8000:8020/tcp", 56 | "8000:8020/udp", 57 | "8899", 58 | "8900/tcp", 59 | "8900:8915/udp", 60 | ], 61 | })); 62 | 63 | // Create the Firewall resource on the EC2 instance 64 | const _firewall = new svmkit.firewall.Firewall( 65 | "firewall", 66 | { 67 | connection, 68 | params: firewallParams, 69 | }, 70 | { 71 | dependsOn: [machine], 72 | }, 73 | ); 74 | 75 | // Tuner setup 76 | const tunerVariant = 77 | tunerConfig.get("variant") ?? 78 | svmkit.tuner.TunerVariant.Generic; 79 | 80 | // Retrieve the default tuner parameters for that variant 81 | const genericTunerParamsOutput = svmkit.tuner.getDefaultTunerParamsOutput({ 82 | variant: tunerVariant, 83 | }); 84 | 85 | // "Apply" those params so we can pass them to the Tuner constructor 86 | const tunerParams = genericTunerParamsOutput.apply((p) => ({ 87 | cpuGovernor: p.cpuGovernor, 88 | kernel: p.kernel, 89 | net: p.net, 90 | vm: p.vm, 91 | fs: p.fs, 92 | })); 93 | 94 | // Create the Tuner resource on the EC2 instance 95 | const _tuner = new svmkit.tuner.Tuner( 96 | "tuner", 97 | { 98 | connection, 99 | params: tunerParams, 100 | }, 101 | { 102 | dependsOn: [machine], 103 | }, 104 | ); 105 | 106 | // Instantiate a new Firedancer instance on the machine. 107 | new svmkit.validator.Firedancer( 108 | "fd", 109 | { 110 | connection, 111 | keyPairs: { 112 | identity: validatorKey, 113 | voteAccount: voteAccountKey, 114 | }, 115 | config: { 116 | user: "sol", 117 | gossip: { 118 | host: instance.publicIp, 119 | entrypoints: networkInfo.entryPoint, 120 | }, 121 | consensus: { 122 | identityPath: "/home/sol/validator-keypair.json", 123 | voteAccountPath: "/home/sol/vote-account-keypair.json", 124 | knownValidators: networkInfo.knownValidator, 125 | expectedGenesisHash: networkInfo.genesisHash, 126 | }, 127 | ledger: { 128 | path: "/home/sol/ledger", 129 | accountsPath: "/home/sol/accounts", 130 | }, 131 | rpc: { 132 | port: 8899, 133 | private: true, 134 | }, 135 | log: { 136 | path: "-", 137 | }, 138 | }, 139 | }, 140 | { 141 | dependsOn: [machine], 142 | }, 143 | ); 144 | 145 | // Expose information required to SSH to the validator host. 146 | export const nodes = [ 147 | { 148 | name: "instance", 149 | connection, 150 | }, 151 | ]; 152 | export const tuner_params = tunerParams; 153 | -------------------------------------------------------------------------------- /aws-validator-agave-ts/index.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from "@pulumi/pulumi"; 2 | import * as svmkit from "@svmkit/pulumi-svmkit"; 3 | 4 | const validatorConfig = new pulumi.Config("validator"); 5 | const solanaConfig = new pulumi.Config("solana"); 6 | const tunerConfig = new pulumi.Config("tuner"); 7 | const firewallConfig = new pulumi.Config("firewall"); 8 | 9 | // AWS-specific resources are created inside. 10 | import { sshKey, instance, instanceUser } from "./aws"; 11 | 12 | // Lookup information about the Solana network. 13 | const networkName = 14 | solanaConfig.get("network") ?? 15 | svmkit.solana.NetworkName.Testnet; 16 | const networkInfo = svmkit.networkinfo.getNetworkInfoOutput({ networkName }); 17 | const agaveVersion = validatorConfig.get("version") ?? "2.2.14-1"; 18 | 19 | // Create some keys for this validator to use. 20 | const validatorKey = new svmkit.KeyPair("validator-key"); 21 | const voteAccountKey = new svmkit.KeyPair("vote-account-key"); 22 | 23 | // Point pulumi-svmkit at the AWS EC2 instance's SSH connection. 24 | const connection = { 25 | host: instance.publicDns, 26 | user: instanceUser, 27 | privateKey: sshKey.privateKeyOpenssh, 28 | }; 29 | 30 | // Configure the instance for SVMKit 31 | const machine = new svmkit.machine.Machine( 32 | "machine", 33 | { 34 | connection, 35 | }, 36 | { 37 | dependsOn: [instance], 38 | }, 39 | ); 40 | 41 | // Tuner setup 42 | const tunerVariant = 43 | tunerConfig.get("variant") ?? 44 | svmkit.tuner.TunerVariant.Generic; 45 | 46 | // Retrieve the default tuner parameters for that variant 47 | const genericTunerParamsOutput = svmkit.tuner.getDefaultTunerParamsOutput({ 48 | variant: tunerVariant, 49 | }); 50 | 51 | // "Apply" those params so we can pass them to the Tuner constructor 52 | const tunerParams = genericTunerParamsOutput.apply((p) => ({ 53 | cpuGovernor: p.cpuGovernor, 54 | kernel: p.kernel, 55 | net: p.net, 56 | vm: p.vm, 57 | fs: p.fs, 58 | })); 59 | 60 | // Create the Tuner resource on the EC2 instance 61 | const _tuner = new svmkit.tuner.Tuner( 62 | "tuner", 63 | { 64 | connection, 65 | params: tunerParams, 66 | }, 67 | { 68 | dependsOn: [machine], 69 | }, 70 | ); 71 | 72 | // Firewall setup 73 | const firewallVariant = 74 | firewallConfig.get("variant") ?? 75 | svmkit.firewall.FirewallVariant.Generic; 76 | 77 | // Retrieve the default firewall parameters for that variant 78 | const genericFirewallParamsOutput = 79 | svmkit.firewall.getDefaultFirewallParamsOutput({ 80 | variant: firewallVariant, 81 | }); 82 | 83 | // "Apply" those params so we can pass them to the Firewall constructor 84 | const firewallParams = genericFirewallParamsOutput.apply((f) => ({ 85 | allowPorts: [ 86 | ...(f.allowPorts ?? []), 87 | "8000:8020/tcp", 88 | "8000:8020/udp", 89 | "8899", 90 | "8900/tcp", 91 | ], 92 | })); 93 | 94 | // Create the Firewall resource on the EC2 instance 95 | const _firewall = new svmkit.firewall.Firewall( 96 | "firewall", 97 | { 98 | connection, 99 | params: firewallParams, 100 | }, 101 | { 102 | dependsOn: [machine], 103 | }, 104 | ); 105 | 106 | // Instantiate a new Agave instance on the machine. 107 | new svmkit.validator.Agave( 108 | "validator", 109 | { 110 | connection, 111 | version: agaveVersion, 112 | environment: { 113 | rpcURL: networkInfo.rpcURL[0], 114 | }, 115 | keyPairs: { 116 | identity: validatorKey.json, 117 | voteAccount: voteAccountKey.json, 118 | }, 119 | flags: { 120 | useSnapshotArchivesAtStartup: "when-newest", 121 | fullRpcAPI: false, 122 | rpcPort: 8899, 123 | privateRPC: true, 124 | onlyKnownRPC: true, 125 | dynamicPortRange: "8002-8020", 126 | gossipPort: 8001, 127 | rpcBindAddress: "0.0.0.0", 128 | walRecoveryMode: "skip_any_corrupted_record", 129 | limitLedgerSize: 50000000, 130 | blockProductionMethod: "central-scheduler", 131 | fullSnapshotIntervalSlots: 1000, 132 | noWaitForVoteToStartLeader: true, 133 | noVoting: true, 134 | entryPoint: networkInfo.entryPoint, 135 | knownValidator: networkInfo.knownValidator, 136 | expectedGenesisHash: networkInfo.genesisHash, 137 | }, 138 | }, 139 | { 140 | dependsOn: [machine], 141 | }, 142 | ); 143 | 144 | // Expose information required to SSH to the validator host. 145 | export const nodes = [ 146 | { 147 | name: "instance", 148 | connection, 149 | }, 150 | ]; 151 | export const tuner_params = tunerParams; 152 | -------------------------------------------------------------------------------- /aws-validator-xen-ts/index.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from "@pulumi/pulumi"; 2 | import * as svmkit from "@svmkit/pulumi-svmkit"; 3 | 4 | const firewallConfig = new pulumi.Config("firewall"); 5 | const tunerConfig = new pulumi.Config("tuner"); 6 | 7 | // AWS-specific resources are created inside. 8 | import { sshKey, instance, instanceUser } from "./aws"; 9 | 10 | // Create some keys for this validator to use. 11 | const validatorKey = new svmkit.KeyPair("validator-key"); 12 | const voteAccountKey = new svmkit.KeyPair("vote-account-key"); 13 | 14 | // Point pulumi-svmkit at the AWS EC2 instance's SSH connection. 15 | const connection = { 16 | host: instance.publicDns, 17 | user: instanceUser, 18 | privateKey: sshKey.privateKeyOpenssh, 19 | }; 20 | 21 | // Configure the instance for SVMKit 22 | const machine = new svmkit.machine.Machine( 23 | "machine", 24 | { 25 | connection, 26 | }, 27 | { 28 | dependsOn: [instance], 29 | }, 30 | ); 31 | 32 | // Firewall setup 33 | const firewallVariant = 34 | firewallConfig.get("variant") ?? 35 | svmkit.firewall.FirewallVariant.Generic; 36 | 37 | // Retrieve the default firewall parameters for that variant 38 | const genericFirewallParamsOutput = 39 | svmkit.firewall.getDefaultFirewallParamsOutput({ 40 | variant: firewallVariant, 41 | }); 42 | 43 | // "Apply" those params so we can pass them to the Firewall constructor 44 | const firewallParams = genericFirewallParamsOutput.apply((f) => ({ 45 | allowPorts: [ 46 | ...(f.allowPorts ?? []), 47 | "8000:8020/tcp", 48 | "8000:8020/udp", 49 | "8899", 50 | "8900/tcp", 51 | ], 52 | })); 53 | 54 | // Create the Firewall resource on the EC2 instance 55 | const _firewall = new svmkit.firewall.Firewall( 56 | "firewall", 57 | { 58 | connection, 59 | params: firewallParams, 60 | }, 61 | { 62 | dependsOn: [machine], 63 | }, 64 | ); 65 | 66 | // Tuner setup 67 | const tunerVariant = 68 | tunerConfig.get("variant") ?? 69 | svmkit.tuner.TunerVariant.Generic; 70 | 71 | // Retrieve the default tuner parameters for that variant 72 | const genericTunerParamsOutput = svmkit.tuner.getDefaultTunerParamsOutput({ 73 | variant: tunerVariant, 74 | }); 75 | 76 | // "Apply" those params so we can pass them to the Tuner constructor 77 | const tunerParams = genericTunerParamsOutput.apply((p) => ({ 78 | cpuGovernor: p.cpuGovernor, 79 | kernel: p.kernel, 80 | net: p.net, 81 | vm: p.vm, 82 | fs: p.fs, 83 | })); 84 | 85 | // Create the Tuner resource on the EC2 instance 86 | const _tuner = new svmkit.tuner.Tuner( 87 | "tuner", 88 | { 89 | connection, 90 | params: tunerParams, 91 | }, 92 | { 93 | dependsOn: [machine], 94 | }, 95 | ); 96 | 97 | // Instantiate a new Xen instance on the machine. 98 | new svmkit.validator.Agave( 99 | "validator", 100 | { 101 | connection, 102 | variant: "tachyon", 103 | environment: { 104 | rpcURL: "https://rpc.testnet.x1.xyz", 105 | }, 106 | keyPairs: { 107 | identity: validatorKey.json, 108 | voteAccount: voteAccountKey.json, 109 | }, 110 | flags: { 111 | useSnapshotArchivesAtStartup: "when-newest", 112 | fullRpcAPI: false, 113 | rpcPort: 8899, 114 | privateRPC: true, 115 | onlyKnownRPC: true, 116 | dynamicPortRange: "8002-8020", 117 | gossipPort: 8001, 118 | rpcBindAddress: "0.0.0.0", 119 | walRecoveryMode: "skip_any_corrupted_record", 120 | limitLedgerSize: 50000000, 121 | blockProductionMethod: "central-scheduler", 122 | fullSnapshotIntervalSlots: 5000, 123 | noWaitForVoteToStartLeader: true, 124 | noVoting: true, 125 | maximumIncrementalSnapshotsToRetain: 10, 126 | maximumFullSnapshotsToRetain: 50, 127 | enableRpcTransactionHistory: true, 128 | enableExtendedTxMetadataStorage: true, 129 | rpcPubsubEnableBlockSubscription: true, 130 | entryPoint: [ 131 | "entrypoint1.testnet.x1.xyz:8001", 132 | "entrypoint2.testnet.x1.xyz:8000", 133 | "entrypoint3.testnet.x1.xyz:8000", 134 | ], 135 | knownValidator: [ 136 | "Abt4r6uhFs7yPwR3jT5qbnLjBtasgHkRVAd1W6H5yonT", 137 | "5NfpgFCwrYzcgJkda9bRJvccycLUo3dvVQsVAK2W43Um", 138 | "FcrZRBfVk2h634L9yvkysJdmvdAprq1NM4u263NuR6LC", 139 | ], 140 | expectedGenesisHash: "C7ucgdDEhxLTpXHhWSZxavSVmaNTUJWwT5iTdeaviDho", 141 | }, 142 | }, 143 | { 144 | dependsOn: [machine], 145 | }, 146 | ); 147 | 148 | // Expose information required to SSH to the validator host. 149 | export const nodes = [ 150 | { 151 | name: "instance", 152 | connection, 153 | }, 154 | ]; 155 | export const tuner_params = tunerParams; 156 | -------------------------------------------------------------------------------- /gcp-validator-agave-ts/index.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from "@pulumi/pulumi"; 2 | import * as svmkit from "@svmkit/pulumi-svmkit"; 3 | 4 | const validatorConfig = new pulumi.Config("validator"); 5 | const solanaConfig = new pulumi.Config("solana"); 6 | const firewallConfig = new pulumi.Config("firewall"); 7 | const tunerConfig = new pulumi.Config("tuner"); 8 | 9 | // AWS-specific resources are created inside. 10 | import { sshKey, instance, user } from "./gcp"; 11 | 12 | // Lookup information about the Solana network. 13 | const networkName = 14 | solanaConfig.get("network") ?? 15 | svmkit.solana.NetworkName.Testnet; 16 | const networkInfo = svmkit.networkinfo.getNetworkInfoOutput({ networkName }); 17 | const agaveVersion = validatorConfig.get("version") ?? "2.2.14-1"; 18 | 19 | // Create some keys for this validator to use. 20 | const validatorKey = new svmkit.KeyPair("validator-key"); 21 | const voteAccountKey = new svmkit.KeyPair("vote-account-key"); 22 | 23 | const instanceIP = instance.networkInterfaces.apply((interfaces) => { 24 | return interfaces[0].accessConfigs![0].natIp; 25 | }); 26 | 27 | // Point pulumi-svmkit at the AWS EC2 instance's SSH connection. 28 | const connection = { 29 | host: instanceIP, 30 | user, 31 | privateKey: sshKey.privateKeyOpenssh, 32 | dialErrorLimit: 50, 33 | }; 34 | 35 | // Configure the instance for SVMKit 36 | const machine = new svmkit.machine.Machine( 37 | "machine", 38 | { 39 | connection, 40 | }, 41 | { 42 | dependsOn: [instance], 43 | }, 44 | ); 45 | 46 | // Firewall setup 47 | const firewallVariant = 48 | firewallConfig.get("variant") ?? 49 | svmkit.firewall.FirewallVariant.Generic; 50 | 51 | // Retrieve the default firewall parameters for that variant 52 | const genericFirewallParamsOutput = 53 | svmkit.firewall.getDefaultFirewallParamsOutput({ 54 | variant: firewallVariant, 55 | }); 56 | 57 | // "Apply" those params so we can pass them to the Firewall constructor 58 | const firewallParams = genericFirewallParamsOutput.apply((f) => ({ 59 | allowPorts: [ 60 | ...(f.allowPorts ?? []), 61 | "8000:8020/tcp", 62 | "8000:8020/udp", 63 | "8899", 64 | "8900/tcp", 65 | ], 66 | })); 67 | 68 | // Create the Firewall resource on the EC2 instance 69 | const _firewall = new svmkit.firewall.Firewall( 70 | "firewall", 71 | { 72 | connection, 73 | params: firewallParams, 74 | }, 75 | { 76 | dependsOn: [machine], 77 | }, 78 | ); 79 | 80 | // Tuner setup 81 | const tunerVariant = 82 | tunerConfig.get("variant") ?? 83 | svmkit.tuner.TunerVariant.Generic; 84 | 85 | // Retrieve the default tuner parameters for that variant 86 | const genericTunerParamsOutput = svmkit.tuner.getDefaultTunerParamsOutput({ 87 | variant: tunerVariant, 88 | }); 89 | 90 | // "Apply" those params so we can pass them to the Tuner constructor 91 | const tunerParams = genericTunerParamsOutput.apply((p) => ({ 92 | cpuGovernor: p.cpuGovernor, 93 | kernel: p.kernel, 94 | net: p.net, 95 | vm: p.vm, 96 | fs: p.fs, 97 | })); 98 | 99 | // Create the Tuner resource on the EC2 instance 100 | const _tuner = new svmkit.tuner.Tuner( 101 | "tuner", 102 | { 103 | connection, 104 | params: tunerParams, 105 | }, 106 | { 107 | dependsOn: [machine], 108 | }, 109 | ); 110 | 111 | // Instantiate a new Agave instance on the machine. 112 | new svmkit.validator.Agave( 113 | "validator", 114 | { 115 | connection, 116 | version: agaveVersion, 117 | environment: { 118 | rpcURL: networkInfo.rpcURL[0], 119 | }, 120 | keyPairs: { 121 | identity: validatorKey.json, 122 | voteAccount: voteAccountKey.json, 123 | }, 124 | flags: { 125 | useSnapshotArchivesAtStartup: "when-newest", 126 | fullRpcAPI: false, 127 | rpcPort: 8899, 128 | privateRPC: true, 129 | onlyKnownRPC: true, 130 | dynamicPortRange: "8002-8020", 131 | gossipHost: instanceIP, 132 | gossipPort: 8001, 133 | rpcBindAddress: "0.0.0.0", 134 | walRecoveryMode: "skip_any_corrupted_record", 135 | limitLedgerSize: 50000000, 136 | blockProductionMethod: "central-scheduler", 137 | fullSnapshotIntervalSlots: 1000, 138 | noWaitForVoteToStartLeader: true, 139 | noVoting: true, 140 | entryPoint: networkInfo.entryPoint, 141 | knownValidator: networkInfo.knownValidator, 142 | expectedGenesisHash: networkInfo.genesisHash, 143 | }, 144 | }, 145 | { 146 | dependsOn: [machine], 147 | }, 148 | ); 149 | 150 | // Expose information required to SSH to the validator host. 151 | export const nodes = [ 152 | { 153 | name: "instance", 154 | connection, 155 | }, 156 | ]; 157 | -------------------------------------------------------------------------------- /aws-validator-xen-ts/README.md: -------------------------------------------------------------------------------- 1 | # X1 Validator running on AWS 2 | 3 | This example deploys a single X1 testnet validator on AWS. It is a non-voting, functional demonstration and is not intended for production use. For the additional instructions needed to setup a voting validator, please refer to the official [X1 documentation](https://docs.x1.xyz/validating/create-a-validator-node). 4 | 5 | ## Demo Video 6 | 7 | [![YouTube Video](https://img.youtube.com/vi/QMZE24w71uE/0.jpg)](https://www.youtube.com/watch?v=QMZE24w71uE) 8 | 9 | 10 | ## Pulumi Configuration Options 11 | 12 | | Name | Description | Default Value | 13 | |:--------------------|:------------------------------------------------------------------|:--------------| 14 | | node:instanceType | The AWS instance type to use for all of the nodes. | t3.2xlarge | 15 | | node:instanceArch | The AWS architecture type to use for AMI lookup. | x86\_64 | 16 | | node:volumeIOPS | The number of IOPS to provide to the ledger and accounts volumes. | 5000 | 17 | | node:rootVolumeSize | The size of the AWS instance's root volume, in gigabytes. | 32 | 18 | | node:instanceAmi | The AMI to use for all of the nodes. | _(debian-12)_ | 19 | | node:user | The user to log into all of the nodes as. | admin | 20 | 21 | ### Performance testing 22 | 23 | The default configuration for IOPS is targeted at functional testing. 24 | For perfomance testing, set node:volumeIOPS to 16000. 25 | 26 | ## Running the Example 27 | 28 | 0. Have `pulumi` installed, logged in to wherever you're storing state, and configured to work with AWS. 29 | 30 | - https://www.pulumi.com/docs/iac/cli/commands/pulumi_login/ 31 | - https://github.com/pulumi/pulumi-aws?tab=readme-ov-file#configuration 32 | 33 | 1. Run `pulumi install`; this will install all of the required pieces for this example. 34 | 35 | ``` 36 | % pulumi install 37 | Installing dependencies... 38 | 39 | yarn install v1.22.22 40 | [1/4] 🔍 Resolving packages... 41 | [2/4] 🚚 Fetching packages... 42 | [3/4] 🔗 Linking dependencies... 43 | [4/4] 🔨 Building fresh packages... 44 | ✨ Done in 3.69s. 45 | Finished installing dependencies 46 | ``` 47 | 48 | 2. Create and select a Pulumi stack 49 | 50 | ``` 51 | % pulumi stack init new-validator 52 | Created stack 'new-validator' 53 | ``` 54 | 55 | 3. Run `pulumi up` 56 | 57 | ``` 58 | % pulumi up 59 | Previewing update (new-validator) 60 | 61 | . 62 | . 63 | . 64 | 65 | Do you want to perform this update? yes 66 | Updating (new-validator) 67 | 68 | Type Name Status 69 | + pulumi:pulumi:Stack aws-validator-agave-ts-new-validator created (60s) 70 | + ├─ svmkit:index:KeyPair vote-account-key created (0.18s) 71 | + ├─ svmkit:index:KeyPair validator-key created (0.39s) 72 | + ├─ tls:index:PrivateKey ssh-key created (0.29s) 73 | + ├─ aws:ec2:SecurityGroup security-group created (4s) 74 | + ├─ aws:ec2:KeyPair keypair created (0.70s) 75 | + ├─ aws:ec2:Instance instance created (14s) 76 | + └─ svmkit:validator:Agave validator created (36s) 77 | 78 | Outputs: 79 | PUBLIC_DNS_NAME: "ec2-35-86-146-3.us-west-2.compute.amazonaws.com" 80 | SSH_PRIVATE_KEY: [secret] 81 | 82 | Resources: 83 | + 8 created 84 | 85 | Duration: 1m2s 86 | ``` 87 | 88 | 4. Verify that the validator has connected to the network. 89 | 90 | ``` 91 | % ./ssh-to-host 0 journalctl -f -u svmkit-tachyon-validator 92 | [2024-11-20T17:50:24.275774661Z INFO solana_download_utils] downloaded 3048373992 bytes 9.3% 17468680.0 bytes/s 93 | [2024-11-20T17:50:30.278042126Z INFO solana_download_utils] downloaded 3154173560 bytes 9.6% 17626600.0 bytes/s 94 | [2024-11-20T17:50:36.286639128Z INFO solana_download_utils] downloaded 3259296912 bytes 10.0% 17495494.0 bytes/s 95 | [2024-11-20T17:50:42.291898545Z INFO solana_download_utils] downloaded 3364530312 bytes 10.3% 17523540.0 bytes/s 96 | [2024-11-20T17:50:48.297096944Z INFO solana_download_utils] downloaded 3470044624 bytes 10.6% 17570496.0 bytes/s 97 | ``` 98 | 99 | 5. You can then do some of the following by manually: 100 | 101 | - Airdrop Solana to your validator's accounts. 102 | - Create a vote account for your validator. 103 | - Create stake for your validator. 104 | 105 | To SSH into the validator node you just created, run `./ssh-to-host 0` with no additional arguments. 106 | 107 | 6. (Optional) Tear down the example 108 | 109 | ``` 110 | % pulumi down 111 | ``` 112 | -------------------------------------------------------------------------------- /gcp-validator-agave-ts/README.md: -------------------------------------------------------------------------------- 1 | # Solana Validator running on GCP 2 | 3 | This example brings up a single Solana validator on GCP. This is a 4 | non-voting functional example, and shouldn't used for any production 5 | use case. You may have performance problems with the default 6 | `node:instanceType`. 7 | 8 | [![Watch the video](https://img.youtube.com/vi/jHvUuGpmU9o/0.jpg)](https://youtu.be/jHvUuGpmU9o) 9 | 10 | ## Pulumi Configuration Options 11 | 12 | | Name | Description | Required | Default Value | 13 | | :---------------- | :------------------------------------------------------------------------ | :------- | :------------ | 14 | | solana:network | The known Solana cluster to connect to. | no | testnet | 15 | | validator:version | The version of the validator APT package to install. | no | 2.2.14-1 | 16 | | node:instanceType | The GCP instance type to use for all of the nodes. | no | c4-standard-8 | 17 | | node:diskSize | The size of the volume to use for OS, accounts, and ledger, in gigabytes. | no | 256 | 18 | | gcp:project | The GCP project to create all resources under. | no | _(system)_ | 19 | | gcp:region | The GCP region to create all resources in. | yes | | 20 | | gcp:zone | The **fully-qualified** GCP availability zone to create all resources in. | yes | | 21 | | node:user | The user to log into all of the nodes as. | no | admin | 22 | 23 | ## Running the Example 24 | 25 | 0. Have `pulumi` installed, logged in to wherever you're storing state, and configured to work with GCP. 26 | 27 | - https://www.pulumi.com/docs/iac/cli/commands/pulumi_login/ 28 | - https://github.com/pulumi/pulumi-gcp?tab=readme-ov-file#google-cloud-platform-resource-provider 29 | 30 | 1. Run `pulumi install`; this will install all of the required pieces for this example. 31 | 32 | ``` 33 | % pulumi install 34 | Installing dependencies... 35 | 36 | yarn install v1.22.22 37 | [1/4] 🔍 Resolving packages... 38 | [2/4] 🚚 Fetching packages... 39 | [3/4] 🔗 Linking dependencies... 40 | [4/4] 🔨 Building fresh packages... 41 | ✨ Done in 3.69s. 42 | Finished installing dependencies 43 | ``` 44 | 45 | 2. Create and select a Pulumi stack 46 | 47 | ``` 48 | % pulumi stack init new-validator 49 | Created stack 'new-validator' 50 | ``` 51 | 52 | 3. Run `pulumi up` 53 | 54 | ``` 55 | % pulumi up 56 | Previewing update (new-validator) 57 | 58 | . 59 | . 60 | . 61 | 62 | Do you want to perform this update? yes 63 | Updating (new-validator) 64 | 65 | View in Browser (Ctrl+O): https://app.pulumi.com/someuser/gcp-validator-agave-ts/new-validator/updates/1 66 | 67 | Type Name Status 68 | + pulumi:pulumi:Stack gcp-validator-agave-ts-asg-test created (124s) 69 | + ├─ svmkit:index:KeyPair validator-key created (0.12s) 70 | + ├─ gcp:compute:Network network created (21s) 71 | + ├─ svmkit:index:KeyPair vote-account-key created (0.20s) 72 | + ├─ tls:index:PrivateKey ssh-key created (0.10s) 73 | + ├─ gcp:compute:Subnetwork subnet created (22s) 74 | + ├─ gcp:compute:Firewall firewall created (11s) 75 | + ├─ gcp:compute:Instance instance created (40s) 76 | + └─ svmkit:validator:Agave validator created (36s) 77 | 78 | Outputs: 79 | nodes: [ 80 | ... 81 | ] 82 | 83 | Resources: 84 | + 9 created 85 | 86 | Duration: 2m6s 87 | ``` 88 | 89 | 4. Verify that the validator has connected to the network. 90 | 91 | ``` 92 | % ./ssh-to-host 0 journalctl -f -u svmkit-agave-validator 93 | [2024-11-20T17:50:24.275774661Z INFO solana_download_utils] downloaded 3048373992 bytes 9.3% 17468680.0 bytes/s 94 | [2024-11-20T17:50:30.278042126Z INFO solana_download_utils] downloaded 3154173560 bytes 9.6% 17626600.0 bytes/s 95 | [2024-11-20T17:50:36.286639128Z INFO solana_download_utils] downloaded 3259296912 bytes 10.0% 17495494.0 bytes/s 96 | [2024-11-20T17:50:42.291898545Z INFO solana_download_utils] downloaded 3364530312 bytes 10.3% 17523540.0 bytes/s 97 | [2024-11-20T17:50:48.297096944Z INFO solana_download_utils] downloaded 3470044624 bytes 10.6% 17570496.0 bytes/s 98 | ``` 99 | 100 | 5. You can then do some of the following by manually: 101 | 102 | - Airdrop Solana to your validator's accounts. 103 | - Create a vote account for your validator. 104 | - Create stake for your validator. 105 | 106 | Please see the [Solana Operations](https://docs.solanalabs.com/operations/) manual for more information. 107 | To SSH into the validator node you just created, run `./ssh-to-host 0` with no other arguments. 108 | 109 | 6. (Optional) Tear down the example 110 | 111 | ``` 112 | % pulumi down 113 | ``` 114 | -------------------------------------------------------------------------------- /gcp-network-spe-ts/spe.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from "@pulumi/pulumi"; 2 | import * as gcp from "@pulumi/gcp"; 3 | import * as tls from "@pulumi/tls"; 4 | import * as svmkit from "@svmkit/pulumi-svmkit"; 5 | 6 | const validatorConfig = new pulumi.Config("validator"); 7 | 8 | export const agaveVersion = validatorConfig.get("version") ?? "2.2.14-1"; 9 | 10 | const nodeConfig = new pulumi.Config("node"); 11 | 12 | export const user = nodeConfig.get("user") ?? "admin"; 13 | 14 | const network = new gcp.compute.Network("network", { 15 | autoCreateSubnetworks: false, 16 | }); 17 | 18 | const subnet = new gcp.compute.Subnetwork("subnet", { 19 | ipCidrRange: "10.0.1.0/24", 20 | network: network.id, 21 | }); 22 | 23 | const firewalls = [ 24 | new gcp.compute.Firewall("external", { 25 | network: network.selfLink, 26 | allows: [ 27 | { 28 | protocol: "tcp", 29 | ports: ["22"], 30 | }, 31 | ], 32 | direction: "INGRESS", 33 | sourceRanges: ["0.0.0.0/0"], 34 | targetTags: [], 35 | }), 36 | new gcp.compute.Firewall("internal", { 37 | network: network.selfLink, 38 | allows: [ 39 | { 40 | protocol: "icmp", 41 | }, 42 | { 43 | protocol: "tcp", 44 | ports: ["22", "8000-8020", "8899", "8900"], 45 | }, 46 | { 47 | protocol: "udp", 48 | ports: ["8000-8020"], 49 | }, 50 | ], 51 | sourceRanges: [subnet.ipCidrRange], 52 | targetTags: [], 53 | }), 54 | ]; 55 | 56 | export class Node { 57 | name: string; 58 | sshKey: tls.PrivateKey; 59 | validatorKey: svmkit.KeyPair; 60 | voteAccountKey: svmkit.KeyPair; 61 | instance: gcp.compute.Instance; 62 | publicIP: pulumi.Output; 63 | privateIP: pulumi.Output; 64 | connection: svmkit.types.input.ssh.ConnectionArgs; 65 | machine: svmkit.machine.Machine; 66 | constructor(name: string) { 67 | this.name = name; 68 | 69 | const _ = (s: string) => `${this.name}-${s}`; 70 | 71 | this.sshKey = new tls.PrivateKey(_("ssh-key"), { algorithm: "ED25519" }); 72 | this.validatorKey = new svmkit.KeyPair(_("validator-key")); 73 | this.voteAccountKey = new svmkit.KeyPair(_("vote-account-key")); 74 | 75 | const machineType = nodeConfig.get("machineType") ?? "n1-standard-4"; 76 | const osImage = nodeConfig.get("osImage") ?? "debian-12"; 77 | const diskSize = nodeConfig.getNumber("diskSize") ?? 64; 78 | 79 | this.instance = new gcp.compute.Instance( 80 | _("instance"), 81 | { 82 | machineType, 83 | bootDisk: { 84 | initializeParams: { 85 | image: osImage, 86 | size: diskSize, 87 | }, 88 | }, 89 | networkInterfaces: [ 90 | { 91 | network: network.id, 92 | subnetwork: subnet.id, 93 | accessConfigs: [{}], 94 | }, 95 | ], 96 | serviceAccount: { 97 | scopes: ["https://www.googleapis.com/auth/cloud-platform"], 98 | }, 99 | allowStoppingForUpdate: true, 100 | tags: [], 101 | metadata: { 102 | "enable-oslogin": "false", 103 | "ssh-keys": this.sshKey.publicKeyOpenssh.apply((k) => `${user}:${k}`), 104 | }, 105 | }, 106 | { dependsOn: firewalls }, 107 | ); 108 | 109 | this.publicIP = this.instance.networkInterfaces.apply((interfaces) => { 110 | return interfaces[0].accessConfigs![0].natIp; 111 | }); 112 | 113 | this.privateIP = this.instance.networkInterfaces.apply((interfaces) => { 114 | return interfaces[0].networkIp; 115 | }); 116 | 117 | this.connection = { 118 | host: this.publicIP, 119 | user, 120 | privateKey: this.sshKey.privateKeyOpenssh, 121 | }; 122 | 123 | this.machine = new svmkit.machine.Machine( 124 | _("machine"), 125 | { 126 | connection: this.connection, 127 | }, 128 | { 129 | dependsOn: [this.instance], 130 | }, 131 | ); 132 | } 133 | 134 | configureValidator( 135 | flags: svmkit.types.input.agave.FlagsArgs, 136 | environment: svmkit.types.input.solana.EnvironmentArgs, 137 | startupPolicy: svmkit.types.input.agave.StartupPolicyArgs, 138 | dependsOn: pulumi.Input[], 139 | runnerConfig?: pulumi.Input, 140 | ) { 141 | return new svmkit.validator.Agave( 142 | `${this.name}-validator`, 143 | { 144 | environment, 145 | runnerConfig, 146 | connection: this.connection, 147 | version: agaveVersion, 148 | startupPolicy, 149 | shutdownPolicy: { 150 | force: true, 151 | }, 152 | keyPairs: { 153 | identity: this.validatorKey.json, 154 | voteAccount: this.voteAccountKey.json, 155 | }, 156 | flags, 157 | timeoutConfig: { 158 | rpcServiceTimeout: 120, 159 | }, 160 | info: { 161 | name: this.name, 162 | details: "An AWS network-based SPE validator node.", 163 | }, 164 | }, 165 | { 166 | dependsOn: [this.machine, ...dependsOn], 167 | }, 168 | ); 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /aws-validator-agave-ts/README.md: -------------------------------------------------------------------------------- 1 | # Solana Validator running on AWS 2 | 3 | This example brings up a single Solana validator on AWS. This is a 4 | non-voting functional example, and shouldn't used for any production 5 | use case. You may have performance problems with the default 6 | `node:instanceType`. 7 | 8 | [![Watch the video](https://img.youtube.com/vi/jHvUuGpmU9o/0.jpg)](https://youtu.be/jHvUuGpmU9o) 9 | 10 | ## Pulumi Configuration Options 11 | 12 | | Name | Description | Required | Default Value | 13 | | :------------------ | :--------------------------------------------------------------- | :------- | :------------ | 14 | | solana:network | The known Solana cluster to connect to. | no | testnet | 15 | | validator:version | The version of the validator APT package to install. | no | 2.2.14-1 | 16 | | node:instanceType | The AWS instance type to use for all of the nodes. | no | t3.2xlarge | 17 | | node:instanceArch | The AWS architecture type to use for AMI lookup. | no | x86\_64 | 18 | | node:volumeIOPS | The number of IOPS to provide to the ledger and accounts volumes.| no | 5000 | 19 | | node:rootVolumeSize | The size of the AWS instance's root volume, in gigabytes. | no | 32 | 20 | | node:instanceAmi | The AMI to use for all of the nodes. | no | _(debian-12)_ | 21 | | node:user | The user to log into all of the nodes as. | no | admin | 22 | 23 | ### Performance testing 24 | 25 | The default configuration for IOPS is targeted at functional testing. 26 | For perfomance testing, set node:volumeIOPS to 16000. 27 | 28 | ## Running the Example 29 | 30 | 0. Have `pulumi` installed, logged in to wherever you're storing state, and configured to work with AWS. 31 | 32 | - https://www.pulumi.com/docs/iac/cli/commands/pulumi_login/ 33 | - https://github.com/pulumi/pulumi-aws?tab=readme-ov-file#configuration 34 | 35 | 1. Run `pulumi install`; this will install all of the required pieces for this example. 36 | 37 | ``` 38 | % pulumi install 39 | Installing dependencies... 40 | 41 | yarn install v1.22.22 42 | [1/4] 🔍 Resolving packages... 43 | [2/4] 🚚 Fetching packages... 44 | [3/4] 🔗 Linking dependencies... 45 | [4/4] 🔨 Building fresh packages... 46 | ✨ Done in 3.69s. 47 | Finished installing dependencies 48 | ``` 49 | 50 | 2. Create and select a Pulumi stack 51 | 52 | ``` 53 | % pulumi stack init new-validator 54 | Created stack 'new-validator' 55 | ``` 56 | 57 | 3. Run `pulumi up` 58 | 59 | ``` 60 | % pulumi up 61 | Previewing update (new-validator) 62 | 63 | . 64 | . 65 | . 66 | 67 | Do you want to perform this update? yes 68 | Updating (new-validator) 69 | 70 | View in Browser (Ctrl+O): https://app.pulumi.com/alexander_guy/aws-validator-agave-ts/new-validator/updates/1 71 | 72 | Type Name Status 73 | + pulumi:pulumi:Stack aws-validator-agave-ts-new-validator created (60s) 74 | + ├─ svmkit:index:KeyPair vote-account-key created (0.18s) 75 | + ├─ svmkit:index:KeyPair validator-key created (0.39s) 76 | + ├─ tls:index:PrivateKey ssh-key created (0.29s) 77 | + ├─ aws:ec2:SecurityGroup security-group created (4s) 78 | + ├─ aws:ec2:KeyPair keypair created (0.70s) 79 | + ├─ aws:ec2:Instance instance created (14s) 80 | + └─ svmkit:validator:Agave validator created (36s) 81 | 82 | Outputs: 83 | PUBLIC_DNS_NAME: "ec2-35-86-146-3.us-west-2.compute.amazonaws.com" 84 | SSH_PRIVATE_KEY: [secret] 85 | 86 | Resources: 87 | + 8 created 88 | 89 | Duration: 1m2s 90 | ``` 91 | 92 | 4. Verify that the validator has connected to the network. 93 | 94 | ``` 95 | % ./ssh-to-host 0 journalctl -f -u svmkit-agave-validator 96 | [2024-11-20T17:50:24.275774661Z INFO solana_download_utils] downloaded 3048373992 bytes 9.3% 17468680.0 bytes/s 97 | [2024-11-20T17:50:30.278042126Z INFO solana_download_utils] downloaded 3154173560 bytes 9.6% 17626600.0 bytes/s 98 | [2024-11-20T17:50:36.286639128Z INFO solana_download_utils] downloaded 3259296912 bytes 10.0% 17495494.0 bytes/s 99 | [2024-11-20T17:50:42.291898545Z INFO solana_download_utils] downloaded 3364530312 bytes 10.3% 17523540.0 bytes/s 100 | [2024-11-20T17:50:48.297096944Z INFO solana_download_utils] downloaded 3470044624 bytes 10.6% 17570496.0 bytes/s 101 | ``` 102 | 103 | 5. You can then do some of the following by manually: 104 | 105 | - Airdrop Solana to your validator's accounts. 106 | - Create a vote account for your validator. 107 | - Create stake for your validator. 108 | 109 | Please see the [Solana Operations](https://docs.solanalabs.com/operations/) manual for more information. 110 | To SSH into the validator node you just created, run `./ssh-to-host 0` with no additional arguments. 111 | 112 | 6. (Optional) Tear down the example 113 | 114 | ``` 115 | % pulumi down 116 | ``` 117 | -------------------------------------------------------------------------------- /aws-network-spe-py/spe/node.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import pulumi 4 | import pulumi_aws as aws 5 | import pulumi_tls as tls 6 | import pulumi_svmkit as svmkit 7 | 8 | from .network import external_sg, internal_sg, subnet_id 9 | 10 | node_config = pulumi.Config("node") 11 | validator_config = pulumi.Config("validator") 12 | 13 | agave_version = validator_config.get('version') or '2.2.14-1' 14 | instance_type = node_config.get('instanceType') or "c6i.xlarge" 15 | instance_ami = node_config.get('instanceAmi') 16 | user = node_config.get('user') or 'admin' 17 | 18 | iops = node_config.get_int('volumeIOPS') or 5000 19 | swap_size = node_config.get_int('swapSize') or 8 20 | root_volume_size = (node_config.get_int('rootVolumeSize') or 32) + swap_size 21 | 22 | ami = instance_ami or aws.ec2.get_ami( 23 | filters=[ 24 | { 25 | "name": "name", 26 | "values": ["debian-12-*"], 27 | }, 28 | { 29 | "name": "architecture", 30 | "values": [node_config.get('instanceArch') or 'x86_64'], 31 | }, 32 | ], 33 | owners=["136693071363"], # Debian 34 | most_recent=True, 35 | ).id 36 | 37 | 38 | class Node: 39 | def __init__(self, name): 40 | self.name = name 41 | 42 | def _(s): 43 | return f"{self.name}-{s}" 44 | 45 | self.ssh_key = tls.PrivateKey(_("ssh-key"), algorithm="ED25519") 46 | self.key_pair = aws.ec2.KeyPair( 47 | _("keypair"), public_key=self.ssh_key.public_key_openssh) 48 | 49 | self.validator_key = svmkit.KeyPair(_("validator-key")) 50 | self.vote_account_key = svmkit.KeyPair(_("vote-account-key")) 51 | 52 | stack_name = pulumi.get_stack() 53 | 54 | self.instance = aws.ec2.Instance( 55 | _("instance"), 56 | ami=ami, 57 | instance_type=instance_type, 58 | key_name=self.key_pair.key_name, 59 | root_block_device={ 60 | "volume_size": root_volume_size, 61 | "volume_type": "gp3", 62 | "iops": iops, 63 | }, 64 | vpc_security_group_ids=[external_sg.id, internal_sg.id], 65 | subnet_id=subnet_id, 66 | associate_public_ip_address=True, 67 | ebs_block_devices=[ 68 | { 69 | "device_name": "/dev/sdf", 70 | "volume_size": 100, 71 | "volume_type": "gp3", 72 | "iops": iops, 73 | }, 74 | { 75 | "device_name": "/dev/sdg", 76 | "volume_size": 204, 77 | "volume_type": "gp3", 78 | "iops": iops, 79 | }, 80 | ], 81 | user_data=f"""#!/bin/bash 82 | # Format the /dev/sdf and /dev/sdg devices with the ext4 filesystem. 83 | mkfs -t ext4 /dev/sdf 84 | mkfs -t ext4 /dev/sdg 85 | 86 | # Create directories for Solana accounts and ledger data. 87 | mkdir -p /home/sol/accounts 88 | mkdir -p /home/sol/ledger 89 | 90 | # Append entries to /etc/fstab to mount the devices and swap at boot. 91 | cat <> /etc/fstab 92 | /dev/sdf /home/sol/accounts ext4 defaults 0 0 93 | /dev/sdg /home/sol/ledger ext4 defaults 0 0 94 | /swapfile none swap sw 0 0 95 | EOF 96 | 97 | # Setup swap space 98 | fallocate -l {swap_size}GiB /swapfile 99 | chmod 600 /swapfile 100 | mkswap /swapfile 101 | 102 | # Reload systemd manager configuration and mount all filesystems. 103 | systemctl daemon-reload 104 | mount -a 105 | swapon -a 106 | """, 107 | tags={ 108 | "Name": stack_name + "-" + self.name, 109 | "Stack": stack_name, 110 | } 111 | ) 112 | 113 | self.connection = svmkit.ssh.ConnectionArgsDict({ 114 | "host": self.instance.public_dns, 115 | "user": user, 116 | "private_key": self.ssh_key.private_key_openssh, 117 | }) 118 | 119 | self.machine = svmkit.machine.Machine( 120 | f"{self.name}-machine", 121 | connection=self.connection, 122 | opts=pulumi.ResourceOptions(depends_on=[self.instance]) 123 | ) 124 | 125 | def configure_validator(self, flags: Union['svmkit.agave.FlagsArgs', 'svmkit.agave.FlagsArgsDict'], environment: Union['svmkit.solana.EnvironmentArgs', 'svmkit.solana.EnvironmentArgsDict'], startup_policy: Union['svmkit.agave.StartupPolicyArgs', 'svmkit.agave.StartupPolicyArgsDict'], depends_on=[]): 126 | return svmkit.validator.Agave( 127 | f"{self.name}-validator", 128 | environment=environment, 129 | runner_config=svmkit.runner.ConfigArgs( 130 | package_config=svmkit.deb.PackageConfigArgs( 131 | additional=['svmkit-spl-token-cli'], 132 | ), 133 | ), 134 | connection=self.connection, 135 | version=agave_version, 136 | startup_policy=startup_policy, 137 | shutdown_policy={ 138 | "force": True, 139 | }, 140 | key_pairs={ 141 | "identity": self.validator_key.json, 142 | "vote_account": self.vote_account_key.json, 143 | }, 144 | flags=flags, 145 | timeout_config={ 146 | "rpc_service_timeout": 120, 147 | }, 148 | info={ 149 | "name": self.name, 150 | "details": "An AWS network-based SPE validator node.", 151 | }, 152 | opts=pulumi.ResourceOptions( 153 | depends_on=([self.machine] + depends_on)) 154 | ) 155 | -------------------------------------------------------------------------------- /bare-metal-fd-ts/README.md: -------------------------------------------------------------------------------- 1 | # Frankendancer running on a remote host 2 | 3 | This example brings up a single Frankendancer validator on any remote machine with SSH access. 4 | 5 | ## Demo Video 6 | [![Watch the video](https://img.youtube.com/vi/dMkJeig4Hh8/0.jpg)](https://www.youtube.com/watch?v=dMkJeig4Hh8) 7 | 8 | 9 | ## Pulumi Configuration Options 10 | 11 | | Name | Description | Required | Default Value | 12 | | :---------------- | :---------------------------------------------------------------- | :------- | :------------ | 13 | | solana:network | The known Solana cluster to connect to. | no | testnet | 14 | | remote:host | The hostname of the remote machine. | yes | | 15 | | remote:user | The login user of the remote machine. | yes | | 16 | | remote:privateKey | **(SECRET)** The OpenSSH (PEM) private key for the remote machine. | yes | | 17 | 18 | ## Running the Example 19 | 20 | 0. Have `pulumi` installed, logged in to wherever you're storing state, and configured to work with AWS. 21 | 22 | - https://www.pulumi.com/docs/iac/cli/commands/pulumi_login/ 23 | - https://github.com/pulumi/pulumi-aws?tab=readme-ov-file#configuration 24 | 25 | 1. If the remote machine has inbound network traffic blocked by default, ensure that inbound traffic is allowed on TCP ports 8000-8020 and UDP ports 8000-8020 and 8900-8920. 26 | 27 | 2. Run `pulumi install`; this will install all of the required pieces for this example. 28 | 29 | ``` 30 | % pulumi install 31 | Installing dependencies... 32 | 33 | yarn install v1.22.22 34 | [1/4] 🔍 Resolving packages... 35 | [2/4] 🚚 Fetching packages... 36 | [3/4] 🔗 Linking dependencies... 37 | [4/4] 🔨 Building fresh packages... 38 | ✨ Done in 3.69s. 39 | Finished installing dependencies 40 | ``` 41 | 42 | 3. Create and select a Pulumi stack 43 | 44 | ``` 45 | % pulumi stack init new-validator 46 | Created stack 'new-validator' 47 | ``` 48 | 49 | 4. Provide the credentials for connecting to the machine via SSH with `pulumi config` 50 | 51 | ``` 52 | % pulumi config set remote:host my-host.example 53 | % pulumi config set remote:user my-user 54 | % pulumi config set --secret remote:privateKey < my-access-key.pem 55 | ``` 56 | 57 | 5. Run `pulumi up` 58 | 59 | ``` 60 | % pulumi up 61 | Previewing update (testing) 62 | 63 | View in Browser (Ctrl+O): https://app.pulumi.com/someaccount/bare-metal-fd-ts/new-validator/previews/3f1f47c9-006a-4d2e-afcc-9f4b8be067d7 64 | 65 | Type Name Plan 66 | + pulumi:pulumi:Stack bare-metal-agave-ts-new-validator create 67 | + ├─ svmkit:index:KeyPair validator-key create 68 | + ├─ svmkit:index:KeyPair vote-account-key create 69 | + ├─ svmkit:index:KeyPair withdrawer-key create 70 | + └─ svmkit:validator:Firedancer fd create 71 | 72 | Outputs: 73 | nodes: [ 74 | ... 75 | ] 76 | validatorKey : output 77 | voteAccountKey : output 78 | withdrawerKey : output 79 | 80 | Resources: 81 | + 5 to create 82 | ``` 83 | 84 | 6. Verify that the validator has connected to the network. 85 | 86 | ``` 87 | % ./ssh-to-host 0 sudo journalctl -f -u svmkit-fd-validator 88 | Warning: Permanently added 'my-host.example' (ED25519) to the list of known hosts. 89 | Feb 14 23:17:46 host652096 systemd[1]: Starting svmkit-fd-validator.service - SVMkit FD Validator... 90 | Feb 14 23:17:46 host652096 systemd[1]: Started svmkit-fd-validator.service - SVMkit FD Validator. 91 | Feb 14 23:17:47 host652096 fdctl[839211]: Log at "/tmp/fd-0.0.0_839211_sol_host652096_2025_02_14_23_17_47_291751494_GMT+00" 92 | Feb 14 23:17:47 host652096 fdctl[839211]: NOTICE 02-14 23:17:47.295840 839211 f0 main src/disco/topo/fd_topo.c(446): 93 | Feb 14 23:17:47 host652096 fdctl[839211]: SUMMARY 94 | Feb 14 23:17:47 host652096 fdctl[839211]: Total Tiles: 23 95 | Feb 14 23:17:47 host652096 fdctl[839211]: Total Memory Locked: 57220812800 bytes (53 GiB + 298 MiB + 20 KiB) 96 | Feb 14 23:17:47 host652096 fdctl[839211]: Required Gigantic Pages: 53 97 | Feb 14 23:17:47 host652096 fdctl[839211]: Required Huge Pages: 149 98 | Feb 14 23:17:47 host652096 fdctl[839211]: Required Normal Pages: 43 99 | Feb 14 23:17:47 host652096 fdctl[839211]: Required Gigantic Pages (NUMA node 0): 53 100 | Feb 14 23:17:47 host652096 fdctl[839211]: Required Huge Pages (NUMA node 0): 149 101 | Feb 14 23:17:47 host652096 fdctl[839211]: Agave Affinity: 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47 102 | ``` 103 | 104 | 7. Connect to the Frankendancer UI: 105 | 106 | ``` 107 | % ./ssh-to-host 0 -L 8080:localhost:80 108 | ``` 109 | 110 | Now use your local browser to access the proxied HTTP port via [http://localhost:8080](http://localhost:8080). 111 | 112 | 8. You can then do some of the following manually: 113 | 114 | - Airdrop Solana to your validator's accounts. 115 | - Create a vote account for your validator. 116 | - Create stake for your validator. 117 | 118 | Please see the [Solana Operations](https://docs.solanalabs.com/operations/) manual for more information. 119 | To SSH into the validator node you just spun up, run `./ssh-to-host 0` with no additional arguments. 120 | 121 | 9. (Optional) Tear down the example 122 | 123 | ``` 124 | % pulumi down 125 | ``` 126 | -------------------------------------------------------------------------------- /guides/RUNNING-BENCH-TPS.md: -------------------------------------------------------------------------------- 1 | # Running Solana Bench TPS on SVMKit Network 2 | 3 | This guide demonstrates how to install and run `solana-bench-tps` on a SVMKit network for benchmarking transaction processing capabilities. Benchmarking is essential for validators and cluster operators to evaluate their cluster's performance. 4 | 5 | ## Overview 6 | 7 | This guide covers: 8 | 1. Setting up the required environment 9 | 2. Installing the Solana benchmarking tool from apt 10 | 3. Running benchmark tests with various configurations 11 | 4. Interpreting benchmark results 12 | 13 | ## Step-by-Step Implementation 14 | 15 | ### Step 1: Install SVMKit Bench TPS Package 16 | 17 | First, add the SVMKit repository and install the bench-tps package: 18 | 19 | ```bash 20 | # 1. Add the SVMKit repository 21 | echo "deb [trusted=yes] https://apt.abklabs.com/svmkit dev main" | sudo tee /etc/apt/sources.list.d/svmkit.list 22 | 23 | # 2. Update repositories 24 | sudo apt update 25 | 26 | # 3. Install the solana-bench-tps package 27 | sudo apt install -y svmkit-solana-bench-tps 28 | ``` 29 | 30 | ### Step 2: Run Basic Benchmark 31 | 32 | Run a basic benchmark against your local SVMKit node: 33 | 34 | ```bash 35 | solana-bench-tps \ 36 | --client-node-id /home/sol/validator-keypair.json \ 37 | -u http://localhost:8899 \ 38 | --duration 30 \ 39 | --tx-count 10 \ 40 | --threads 2 41 | ``` 42 | 43 | Parameters explained: 44 | - `--client-node-id`: Path to validator keypair that will sign transactions 45 | - `-u` or `--url`: URL of the SVMKit network RPC endpoint 46 | - `--duration`: Length of the test in seconds 47 | - `--tx-count`: Total number of transactions to generate 48 | - `--threads`: Number of threads to use for sending transactions 49 | 50 | ### Step 3: Advanced Benchmark with TPU Client 51 | 52 | For more accurate results that better simulate real-world conditions, use the TPU client mode: 53 | 54 | ```bash 55 | solana-bench-tps \ 56 | --client-node-id /home/sol/validator-keypair.json \ 57 | --url http://localhost:8899 \ 58 | --duration 60 \ 59 | --tx-count 3500 \ 60 | --use-tpu-client \ 61 | --num-lamports-per-account 10000 62 | ``` 63 | 64 | Parameters explained: 65 | - `--use-tpu-client`: Uses the Transaction Processing Unit client for sending transactions directly to the TPU port, which can achieve higher throughput than RPC 66 | - `--num-lamports-per-account`: Specifies the amount of lamports to fund each test account with (10000 in this case) 67 | 68 | ## Benchmark Configuration Options 69 | 70 | Customize your benchmarks with these additional options: 71 | 72 | ### Performance Tuning 73 | 74 | ```bash 75 | solana-bench-tps \ 76 | --client-node-id /home/sol/validator-keypair.json \ 77 | --url http://localhost:8899 \ 78 | --duration 60 \ 79 | --tx-count 5000 \ 80 | --threads 4 \ 81 | --sustained 82 | ``` 83 | 84 | The `--sustained` flag maintains a constant rate of transactions rather than sending them all at once. 85 | 86 | ### Network Configuration Testing 87 | 88 | For testing different network configurations: 89 | 90 | ```bash 91 | solana-bench-tps \ 92 | --client-node-id /home/sol/validator-keypair.json \ 93 | --url http://localhost:8899 \ 94 | --duration 120 \ 95 | --tx-count 10000 \ 96 | --threads 8 \ 97 | --compute-unit-price 100 \ 98 | --lamports-per-signature 5000 99 | ``` 100 | 101 | ## Interpreting Results 102 | 103 | After running a benchmark, you'll see output similar to: 104 | 105 | ``` 106 | Confirmed 975 transactions | Finalized 952 transactions 107 | Average TPS: 32.5 over 30 seconds 108 | Maximum TPS: 45.2 109 | Minimum TPS: 21.8 110 | Average confirmation time: 1.35s 111 | Maximum confirmation time: 2.89s 112 | Minimum confirmation time: 0.65s 113 | Average finalization time: 4.21s 114 | ``` 115 | 116 | Key metrics to evaluate: 117 | - **Average TPS**: Transactions processed per second (higher is better) 118 | - **Confirmation time**: Time until transactions are confirmed (lower is better) 119 | - **Finalization time**: Time until transactions are finalized (lower is better) 120 | 121 | ## Troubleshooting 122 | 123 | Common issues and solutions: 124 | 125 | 1. **RPC connection errors**: 126 | - Verify the correct RPC endpoint URL 127 | - Check network connectivity 128 | - Ensure the node is running and synchronized 129 | 130 | 2. **Transaction failures**: 131 | - Ensure the client keypair has sufficient balance 132 | - Reduce transaction count or rate if node is overwhelmed 133 | - Check node logs for error messages 134 | 135 | 3. **Low TPS results**: 136 | - Try increasing threads 137 | - Verify hardware specifications meet requirements 138 | - Check for network congestion or resource contention 139 | 140 | ## Conclusion 141 | 142 | By following this guide, you've learned how to install and run `solana-bench-tps` to benchmark your SVMKit network. These benchmarks provide valuable insights into your network's performance and can help identify optimization opportunities. 143 | 144 | For more comprehensive testing, consider running multiple benchmarks with different parameters and comparing the results. Regularly benchmarking your network ensures optimal performance for validators and users. 145 | 146 | ## Recommended Benchmark Command 147 | 148 | The following command represents a recommended configuration for benchmarking SVMKit networks: 149 | 150 | ```bash 151 | solana-bench-tps \ 152 | --client-node-id /home/sol/validator-keypair.json \ 153 | --use-tpu-client \ 154 | --tx-count 3500 \ 155 | --duration 60 \ 156 | --url "http://localhost:8899" \ 157 | --num-lamports-per-account 10000 158 | ``` 159 | 160 | This configuration: 161 | - Uses the TPU client for higher throughput 162 | - Sends 3500 transactions over 60 seconds 163 | - Funds test accounts with 10000 lamports each 164 | - Connects to a local SVMKit node -------------------------------------------------------------------------------- /guides/DELINQUENT-VALIDATOR.md: -------------------------------------------------------------------------------- 1 | # Delinquent Validator 2 | 3 | ## What Does It Mean for a Validator to Be Delinquent? 4 | 5 | A delinquent validator is one that has stopped voting or producing blocks and is no longer actively participating in consensus. The validator will be marked as “Delinquent” in the output of solana validators if it fails to vote for a prolonged period (typically after 128 missed slots, or about 12.8 seconds on mainnet-beta). 6 | 7 | Common Reasons for Delinquency: 8 | 9 | • Network connectivity issues 10 | • Hardware or resource exhaustion (CPU, memory, disk I/O) 11 | • Software crashes or misconfigurations 12 | • Outdated Solana software version 13 | • Running with an insufficient stake and getting skipped 14 | 15 | Example Output of a Delinquent Validator: 16 | 17 | ``` 18 | solana validators 19 | 20 | Identity Vote Account Commission Last Vote Root Slot Skip Rate Credits Version Active Stake 21 |   2B79QyF9idx6fnDbsQto5kxmiphFaZihGQRhLHs1ULp1 7V4beuNHASzj7LXSz7bGaAoQXSjeb38QS9itFPy1LyJe 100% 4108 ( 0) 4059 ( 0) 0.00% 0 1.18.26 9.999999344 SOL (25.00%) 22 |   HA6bFQhwq9D4Ky4ktQYXbJBWVTPd3waCF9aq5a4gscPG 6BwD54hsoopcELhKn6wmPv4MUzh8MiLiuT5bTCqVvvVQ 100% 4108 ( 0) 4059 ( 0) 0.00% 0 1.18.26 9.999999344 SOL (25.00%) 23 |   BKi2EPLT2nZcPimmZ8uUsyx6tquioeuYnRR8Bhoi4dKq 8zVEyvu2Bj4iAUqy6XMW8RhdU3K5gsP5hKVxBJQxFEgW 100% 4108 ( 0) 4059 ( 0) 0.00% 0 1.18.26 9.999999344 SOL (25.00%) 24 | ⚠️ 31cNaRo15yoLaJ6sifaikdTbKpFJ4HQ6GQmD8Nu3yUyZ 6A4LNBdhtZdMdTaWhTVu7jKcuy5QPfoeDYfg2vnNvMCg 100% 3827 3796 100.00% 0 unknown 9.999999344 SOL (25.00%) 25 | 26 | Average Stake-Weighted Skip Rate: 25.00% 27 | Average Unweighted Skip Rate: 25.00% 28 | 29 | Active Stake: 39.999997376 SOL 30 | Current Stake: 29.999998032 SOL (75.00%) 31 | Delinquent Stake: 9.999999344 SOL (25.00%) 32 | 33 | Stake By Version: 34 | 1.18.26 - 3 current validators (75.00%) 35 | unknown - 0 current validators ( 0.00%) 1 delinquent validators (25.00%) 36 | ``` 37 | 38 | ## Recovery Steps 39 | 40 | > **Note:** This guide is based on the Agave validator. System unit names may differ depending on the SVMKIT validator variant. 41 | 42 | 1. Check Validator Logs 43 | 44 | Run the following command to check recent logs: 45 | 46 | ``` 47 | journalctl -n 100 -u svmkit-agave-validator 48 | ``` 49 | 50 | Look for errors related to connection issues, missing votes, or performance degradation. 51 | 52 | 2. Check System Resource Usage 53 | 54 | Verify system performance using htop or top: 55 | 56 | ``` 57 | sudo apt install htop -y 58 | htop 59 | ``` 60 | 61 | Check disk usage and IOPS: 62 | 63 | ``` 64 | sudo apt install sysstat -y 65 | iostat -xm 5 66 | ``` 67 | 68 | If CPU is pegged at 100% or RAM usage is excessive, consider optimizing hardware or reducing load. 69 | 70 | 3. Ensure the Validator is Running 71 | 72 | Check if the validator process is active: 73 | 74 | ``` 75 | systemctl status svmkit-agave-validator 76 | ``` 77 | 78 | If it’s not running, restart it: 79 | 80 | ``` 81 | systemctl restart svmkit-agave-validator 82 | ``` 83 | 84 | 4. Use Catchup to Track the Validator 85 | 86 | Monitor how far behind the validator is: 87 | 88 | ``` 89 | solana catchup --our-localhost 90 | ⠐ 871 slot(s) behind (us:3798 them:4669), our node is falling behind at -2.2 slots/second (AVG: -2.8 slots/second) 91 | ``` 92 | 93 | If the validator is struggling to catch up, check logs for issues related to voting on the heaviest fork or interpreting the leader schedule: 94 | 95 | ``` 96 | journalctl -u svmkit-agave-validator 97 | ``` 98 | 99 | Example of failure logs: 100 | 101 | ``` 102 | [2025-02-24T08:52:27.534731204Z INFO solana_core::replay_stage] Couldn't vote on heaviest fork: 3863, heaviest_fork_failures: [FailedThreshold(3863, 4, 9999999344, 39999997376)] 103 | [2025-02-24T08:52:27.534765676Z ERROR solana_core::replay_stage] 31cNaRo15yoLaJ6sifaikdTbKpFJ4HQ6GQmD8Nu3yUyZ No next leader found 104 | ``` 105 | 106 | If the validator is unable to vote or follow the heaviest fork, a reboot from a fresh snapshot is required. 107 | 108 | 5. Reset the Validator Ledger 109 | 110 | The simplest way to restart a validator is to delete the ledger directory and let the validator download a fresh snapshot from an entrypoint. 111 | 112 | Steps to Reset the Validator: 113 | 114 | ``` 115 | # Stop the validator 116 | 117 | sudo systemctl stop svmkit-agave-validator 118 | 119 | # Clear the ledger directory 120 | 121 | sudo rm -rf /home/sol/ledger/* 122 | 123 | # Restart the validator 124 | 125 | sudo systemctl restart svmkit-agave-validator 126 | ``` 127 | 128 | 6. Monitor Validator Recovery 129 | 130 | After restarting, ensure your validator is catching up: 131 | 132 | ``` 133 | solana catchup --our-localhost 134 | 31cNaRo15yoLaJ6sifaikdTbKpFJ4HQ6GQmD8Nu3yUyZ has caught up (us:6676 them:6676) 135 | ``` 136 | 137 | Once caught up, confirm the validator is voting again: 138 | 139 | ``` 140 | solana validators 141 | Identity Vote Account Commission Last Vote Root Slot Skip Rate Credits Version Active Stake 142 |   BKi2EPLT2nZcPimmZ8uUsyx6tquioeuYnRR8Bhoi4dKq 8zVEyvu2Bj4iAUqy6XMW8RhdU3K5gsP5hKVxBJQxFEgW 100% 9608 ( 0) 9577 ( 0) 0.00% 1376 1.18.26 9.999999344 SOL (25.00%) 143 |   2B79QyF9idx6fnDbsQto5kxmiphFaZihGQRhLHs1ULp1 7V4beuNHASzj7LXSz7bGaAoQXSjeb38QS9itFPy1LyJe 100% 9608 ( 0) 9577 ( 0) 0.00% 1376 1.18.26 9.999999344 SOL (25.00%) 144 |   HA6bFQhwq9D4Ky4ktQYXbJBWVTPd3waCF9aq5a4gscPG 6BwD54hsoopcELhKn6wmPv4MUzh8MiLiuT5bTCqVvvVQ 100% 9608 ( 0) 9577 ( 0) 0.00% 1376 1.18.26 9.999999344 SOL (25.00%) 145 |   31cNaRo15yoLaJ6sifaikdTbKpFJ4HQ6GQmD8Nu3yUyZ 6A4LNBdhtZdMdTaWhTVu7jKcuy5QPfoeDYfg2vnNvMCg 100% 9608 ( 0) 9577 ( 0) 0.00% 1376 1.18.26 9.999999344 SOL (25.00%) 146 | 147 | Average Stake-Weighted Skip Rate: 0.00% 148 | Average Unweighted Skip Rate: 0.00% 149 | 150 | Active Stake: 39.999997376 SOL 151 | 152 | Stake By Version: 153 | 1.18.26 - 4 current validators (100.00%) 154 | ``` 155 | -------------------------------------------------------------------------------- /aws-validator-fd-ts/README.md: -------------------------------------------------------------------------------- 1 | # Frankendancer running on AWS 2 | 3 | This example brings up a single Frakendancer validator on AWS. 4 | 5 | ## Pulumi Configuration Options 6 | 7 | | Name | Description | Default Value | 8 | |:--------------------|:------------------------------------------------------------------|:--------------| 9 | | solana:network | The known Solana cluster to connect to. | testnet | 10 | | node:instanceType | The AWS instance type to use for all of the nodes. | r7a.8xlarge | 11 | | node:instanceArch | The AWS architecture type to use for AMI lookup. | x86\_64 | 12 | | node:volumeIOPS | The number of IOPS to provide to the ledger and accounts volumes. | 5000 | 13 | | node:rootVolumeSize | The size of the AWS instance's root volume, in gigabytes. | 32 | 14 | | node:instanceAmi | The AMI to use for all of the nodes. | _(debian-12)_ | 15 | | node:user | The user to log into all of the nodes as. | admin | 16 | 17 | ### Performance testing 18 | 19 | The default configuration for IOPS is targeted at functional testing. 20 | For perfomance testing, set node:volumeIOPS to 16000. 21 | 22 | ## Running the Example 23 | 24 | 0. Have `pulumi` installed, logged in to wherever you're storing state, and configured to work with AWS. 25 | 26 | - https://www.pulumi.com/docs/iac/cli/commands/pulumi_login/ 27 | - https://github.com/pulumi/pulumi-aws?tab=readme-ov-file#configuration 28 | 29 | 1. Run `pulumi install`; this will install all of the required pieces for this example. 30 | 31 | ``` 32 | % pulumi install 33 | Installing dependencies... 34 | 35 | yarn install v1.22.22 36 | [1/4] 🔍 Resolving packages... 37 | [2/4] 🚚 Fetching packages... 38 | [3/4] 🔗 Linking dependencies... 39 | [4/4] 🔨 Building fresh packages... 40 | ✨ Done in 3.69s. 41 | Finished installing dependencies 42 | ``` 43 | 44 | 2. Create and select a Pulumi stack 45 | 46 | ``` 47 | % pulumi stack init new-validator 48 | Created stack 'new-validator' 49 | ``` 50 | 51 | 3. Run `pulumi up` 52 | 53 | ``` 54 | % pulumi up 55 | Previewing update (new-validator) 56 | 57 | View in Browser (Ctrl+O): https://app.pulumi.com/someaccount/aws-validator-fd-ts/new-validator/previews/3f1f47c9-006a-4d2e-afcc-9f4b8be067d7 58 | 59 | Type Name Plan 60 | + pulumi:pulumi:Stack aws-validator-fd-ts-new-validator create 61 | + ├─ svmkit:index:KeyPair validator-key create 62 | + ├─ tls:index:PrivateKey ssh-key create 63 | + ├─ svmkit:index:KeyPair vote-account-key create 64 | + ├─ aws:ec2:SecurityGroup security-group create 65 | + ├─ svmkit:index:KeyPair withdrawer-key create 66 | + ├─ aws:ec2:KeyPair keypair create 67 | + ├─ aws:ec2:Instance instance create 68 | + └─ svmkit:validator:Firedancer fd create 69 | 70 | Outputs: 71 | PUBLIC_DNS_NAME: output 72 | SSH_PRIVATE_KEY: output 73 | validatorKey : output 74 | voteAccountKey : output 75 | withdrawerKey : output 76 | 77 | Resources: 78 | + 9 to create 79 | 80 | . 81 | . 82 | . 83 | 84 | ``` 85 | 86 | 4. Verify that the validator has connected to the network. 87 | 88 | ``` 89 | % ./ssh-to-host 0 sudo journalctl -f -u svmkit-fd-validator 90 | INFO: saving validator host key in /var/folders/56/ljnh2nx524s73bm0dyw3hy780000gn/T/tmp.gsnuaIbzwS/tmp.uxlxVQYehz... 91 | Warning: Permanently added 'ec2-35-86-195-35.us-west-2.compute.amazonaws.com' (ED25519) to the list of known hosts. 92 | Dec 22 13:04:23 ip-172-31-53-101 fdctl[2228]: NOTICE 12-22 13:04:23.220832 2228 14 bank:3 src/disco/topo/fd_topo_run.c(32): booting tile bank:3 pid:2227 tid:2284 93 | Dec 22 13:04:23 ip-172-31-53-101 fdctl[2228]: NOTICE 12-22 13:04:23.256807 2228 15 poh:0 src/disco/topo/fd_topo_run.c(32): booting tile poh:0 pid:2227 tid:2285 94 | Dec 22 13:04:23 ip-172-31-53-101 fdctl[2228]: NOTICE 12-22 13:04:23.284829 2228 17 store:0 src/disco/topo/fd_topo_run.c(32): booting tile store:0 pid:2227 tid:2286 95 | Dec 22 13:04:23 ip-172-31-53-101 fdctl[2228]: NOTICE 12-22 13:04:23.296823 2228 f0 agave src/app/fdctl/run/run_agave.c(208): booting agave pid:2227 96 | Dec 22 13:04:23 ip-172-31-53-101 fdctl[2228]: WARNING 12-22 13:04:23.328786 2228 f0 agave perf/src/lib.rs(51): CUDA is disabled 97 | Dec 22 13:04:23 ip-172-31-53-101 fdctl[2228]: WARNING 12-22 13:04:23.376601 9 f21 0 metrics/src/metrics.rs(324): datapoint: os-config vm.max_map_count=1000000i 98 | Dec 22 13:04:23 ip-172-31-53-101 fdctl[2228]: WARNING 12-22 13:04:23.380596 9 f21 0 metrics/src/metrics.rs(324): datapoint: os-config net.core.optmem_max=20480i 99 | Dec 22 13:04:23 ip-172-31-53-101 fdctl[2228]: WARNING 12-22 13:04:23.382228 9 f21 0 metrics/src/metrics.rs(324): datapoint: os-config net.core.netdev_max_backlog=1000i 100 | Dec 22 13:04:23 ip-172-31-53-101 fdctl[2228]: WARNING 12-22 13:04:23.421123 2228 f0 agave perf/src/perf_libs.rs(107): "/opt/frankendancer/bin/perf-libs" does not exist 101 | Dec 22 13:04:23 ip-172-31-53-101 fdctl[2276]: WARNING 12-22 13:04:23.883328 2276 20 gui:0 src/app/fdctl/run/tiles/fd_gui.c(417): GUI server listening at http://127.0.0.1:80 102 | ``` 103 | 104 | 5. Connect to the Frankendancer UI: 105 | 106 | ``` 107 | % ./ssh-to-host 0 -L 8080:localhost:80 108 | ``` 109 | 110 | Now use your local browser to access the proxied HTTP port via [http://localhost:8080](http://localhost:8080). 111 | 112 | 6. You can then do some of the following by manually: 113 | 114 | - Airdrop Solana to your validator's accounts. 115 | - Create a vote account for your validator. 116 | - Create stake for your validator. 117 | 118 | Please see the [Solana Operations](https://docs.solanalabs.com/operations/) manual for more information. 119 | To SSH into the validator node you just created, run `./ssh-to-host 0` with no additional arguments. 120 | 121 | 7. (Optional) Tear down the example 122 | 123 | ``` 124 | % pulumi down 125 | ``` 126 | -------------------------------------------------------------------------------- /aws-network-spe-py/token-demo: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ../bin/opsh 2 | # shellcheck shell=bash 3 | 4 | # Define default wallet files and transaction amounts 5 | PAYER_WALLET="payer.json" 6 | RECEIVER_WALLET="receiver.json" 7 | TRANSFER_AMOUNT=2 8 | MINT_AMOUNT=100 9 | TOKEN_TRANSFER_AMOUNT=50 10 | SPEINFO=speInfo 11 | 12 | # Import necessary libraries 13 | lib::import ssh 14 | 15 | # Parse command-line options for custom wallet files and transaction amounts 16 | while getopts "p:r:t:m:x:" opt; do 17 | case $opt in 18 | p) PAYER_WALLET="$OPTARG" ;; # Set payer wallet file 19 | r) RECEIVER_WALLET="$OPTARG" ;; # Set receiver wallet file 20 | t) TRANSFER_AMOUNT="$OPTARG" ;; # Set transfer amount from treasury to payer 21 | m) MINT_AMOUNT="$OPTARG" ;; # Set mint amount for tokens 22 | x) TOKEN_TRANSFER_AMOUNT="$OPTARG" ;; # Set token transfer amount from payer to receiver 23 | *) log::fatal "Invalid option: -$OPTARG" ;; # Handle invalid options 24 | esac 25 | done 26 | 27 | # Shift positional parameters to remove processed options 28 | shift $((OPTIND - 1)) 29 | 30 | # Ensure exactly one positional argument (statedir) is provided 31 | [[ $# -eq 1 ]] || log::fatal "usage: $0 [-p payer_wallet] [-r receiver_wallet] [-t transfer_amount] [-m mint_amount] [-x token_transfer_amount] " 32 | 33 | # Set the state directory and secure it 34 | STATEDIR=$1 35 | shift 36 | umask 077 37 | 38 | # Create and navigate to the state directory 39 | mkdir -p "$STATEDIR" 40 | cd "$STATEDIR" || log::fatal "couldn't enter state directory '$STATEDIR'!" 41 | log::info "storing demo state inside '$STATEDIR'..." 42 | 43 | # Dump SPE information into a file 44 | log::info "dumping SPE info..." 45 | pulumi stack output --show-secrets $SPEINFO >"$SPEINFO" 46 | 47 | # Function to retrieve information from the SPE info file 48 | get() { 49 | jq --exit-status -r "$@" <"$SPEINFO" || log::fatal "failed to get info for key $*" 50 | } 51 | 52 | # Begin SSH configuration 53 | ssh::begin 54 | 55 | # Configure SSH to ignore host key checking 56 | ssh::config </dev/null 2>&1 77 | solana-keygen new --outfile "$RECEIVER_WALLET" --no-passphrase --force >/dev/null 2>&1 78 | 79 | # Add any files that should be accessible to spl-token here 80 | remote_files=( 81 | "$PAYER_WALLET" 82 | ) 83 | 84 | tmpdir="$(mktemp -u '/tmp/svmkit-token-demo.XXXXXXXX')" 85 | tar -cz "${remote_files[@]}" | ssh -q "$ssh_address" mkdir "$tmpdir" '&&' tar -xzC "$tmpdir" 86 | 87 | tmpdir::delete() { 88 | ssh -q "$ssh_address" rm -rf "$tmpdir" 89 | } 90 | 91 | exit::trigger tmpdir::delete 92 | 93 | exec::spl-token() { 94 | ssh -q "$ssh_address" -- cd "$tmpdir" '&&' spl-token -ulocalhost "$@" 95 | } 96 | 97 | # Retrieve and log public keys for payer and receiver 98 | payer_pubkey=$(solana-keygen pubkey "$PAYER_WALLET") 99 | receiver_pubkey=$(solana-keygen pubkey "$RECEIVER_WALLET") 100 | log::info "Payer wallet created with public key: $payer_pubkey" 101 | log::info "Receiver wallet created with public key: $receiver_pubkey" 102 | 103 | # Transfer funds from treasury to payer 104 | exec::solana airdrop "$TRANSFER_AMOUNT" "$payer_pubkey" >/dev/null 2>&1 105 | payer_balance=$(exec::solana balance "$payer_pubkey") 106 | log::info "Airdropped $payer_balance from the faucet to the payer wallet." 107 | 108 | # Create a new token mint and set payer as the mint authority 109 | mint=$(exec::spl-token create-token --mint-authority "$payer_pubkey" --fee-payer "$PAYER_WALLET" --output json-compact | jq -r '.commandOutput.address') 110 | 111 | # Create associated token accounts for payer and receiver 112 | payer_ata_signature=$(exec::spl-token create-account "$mint" --owner "$payer_pubkey" --fee-payer "$PAYER_WALLET" --output json-compact | jq -r '.signature') 113 | receiver_ata_signature=$(exec::spl-token create-account "$mint" --owner "$receiver_pubkey" --fee-payer "$PAYER_WALLET" --output json-compact | jq -r '.signature') 114 | log::info "Created payer associated token account with transaction: $payer_ata_signature" 115 | log::info "Created receiver associated token account with transaction: $receiver_ata_signature" 116 | 117 | # Retrieve and log associated token account addresses 118 | payer_ata_address=$(exec::spl-token address --token "$mint" --owner "$payer_pubkey" --verbose --output json-compact | jq -r '.associatedTokenAddress') 119 | receiver_ata_address=$(exec::spl-token address --token "$mint" --owner "$receiver_pubkey" --verbose --output json-compact | jq -r '.associatedTokenAddress') 120 | log::info "Payer associated token account address: $payer_ata_address" 121 | log::info "Receiver associated token account address: $receiver_ata_address" 122 | 123 | # Mint tokens to payer's associated token account 124 | mint_to_payer_signature=$(exec::spl-token mint --fee-payer "$PAYER_WALLET" --mint-authority "$PAYER_WALLET" --output json-compact "$mint" "$MINT_AMOUNT" -- "$payer_ata_address" | jq -r '.signature') 125 | log::info "Minted $MINT_AMOUNT tokens to payer with transaction: $mint_to_payer_signature" 126 | 127 | # Check and log the balance of the payer's wallet 128 | log::info "Checking balance of payer wallet" 129 | log::info "Payer Public Key: $payer_pubkey" 130 | log::info "Mint Address: $mint" 131 | balance=$(exec::spl-token balance --owner "$payer_pubkey" "$mint") 132 | log::info "Balance: $balance" 133 | 134 | # Transfer tokens from payer to receiver 135 | payer_to_receiver_signature=$(exec::spl-token transfer --fee-payer "$PAYER_WALLET" --owner "$PAYER_WALLET" --output json-compact "$mint" "$TOKEN_TRANSFER_AMOUNT" "$receiver_ata_address" | jq -r '.signature') 136 | log::info "Transfered $TOKEN_TRANSFER_AMOUNT tokens from payer to receiver with transaction: $payer_to_receiver_signature" 137 | 138 | # Check and log balances of payer and receiver after transfer 139 | after_payer_balance=$(exec::spl-token balance --owner "$payer_pubkey" "$mint") 140 | log::info "Balance of payer: $after_payer_balance" 141 | after_receiver_balance=$(exec::spl-token balance --owner "$receiver_pubkey" "$mint") 142 | log::info "Balance of receiver: $after_receiver_balance" 143 | 144 | # vim:set ft=sh: 145 | -------------------------------------------------------------------------------- /guides/HARD-FORK-RESTART.md: -------------------------------------------------------------------------------- 1 | # Hard Fork Restart 2 | 3 | This guide provides step-by-step instructions on how to perform a hard fork in an SVM network when the cluster cannot achieve consensus due to an insufficient number of validator participation. 4 | 5 | ## Understanding consensus 6 | 7 | Consensus is defined as at least 2/3 (66.67%) of the total stake agreeing on a fork. 8 | 9 | For 3 nodes with equal stake, each validator has: 10 | 11 | 100% / 3 = 33.33% 12 | 13 | When one validator fails, the remaining two hold: 14 | 15 | 33.33% + 33.33% = 66.66% 16 | 17 | which is 0.01% short of consensus, and thus halts the cluster. 18 | 19 | ## Scenario: 3-Node Cluster Failure 20 | 21 | ``` 22 | $ solana validators 23 | 24 | Identity Vote Account Commission Last Vote Root Slot Active Stake 25 | 2wZXe6ZTaeq9sZ5TdjYNKCxLeEZjioFQ8fwkyuzt4RBB 9cSatxfzU33mmjA6AwuXkqmiH3gxxa65cRqqh9X7uVuB 100% 4343 4312 9.999999344 SOL (33.33%) 26 | HfdV6f5SBZFSVn3y6yLubFc358BGH3jHYWpiQLpoB88E 2v7p2rkk1zdQLtGtfJn4zkXxLkiKGyW9NbCHak2nbErt 100% 4343 4312 9.999999344 SOL (33.33%) 27 | 3C4VnPXBbN1aStk9Lej2zpLVBwqKJAHHi1Y3dn5r7zCY J15gWTa5aJWm3hFcRNSN1GfTckf2HbJrhZ81sWPT3j4X 100% 4343 4312 9.999999344 SOL (33.33%) 28 | ``` 29 | 30 | To simulate an outage stop any of the validators. 31 | 32 | ``` 33 | $ sudo systemctl stop svmkit-agave-validator.service 34 | ``` 35 | 36 | The stopped validator is marked as unknown. The remaining 2 validators will not reach consensus and voting halts. Note that this guide assumes equally distributed stake. If you have just deployed the cluster, please wait until all stake is activated. This will take several hours and is dependent on the `slots_per_epoch` configured at genesis. 37 | 38 | ``` 39 | $ solana validators 40 | 41 | Stake By Version: 42 | 2.1.9 - 2 current validators (66.67%) 43 | unknown - 1 current validators (33.33%) 44 | ``` 45 | 46 | ## Performing a Hard Fork 47 | 48 | Since the cluster cannot continue, a hard-fork restart is required. 49 | 50 | 1. Stop all validators 51 | 52 | ``` 53 | $ sudo systemctl stop svmkit-agave-validator.service 54 | ``` 55 | 56 | 2. Install the ledger tool (if not already installed) 57 | 58 | The agave-ledger-tool is required to check slots and create snapshots. 59 | 60 | ``` 61 | $ sudo apt-get install svmkit-agave-ledger-tool 62 | ``` 63 | 64 | 3. Check the latest optimistic slot on all validators 65 | 66 | ``` 67 | $ sudo -i -u sol agave-ledger-tool -l ledger latest-optimistic-slots 68 | ``` 69 | 70 | Output will look like: 71 | 72 | ``` 73 | Slot Hash Timestamp Vote Only? 74 | 157666 65DDoDsUFu5tf4N3HuxEhpqMiNeq4qqombE4VjLjdgE8 2025-03-06T12:55:04.579+00:00 true 75 | ``` 76 | 77 | If there are slight differences in the slot numbers between validators (off by 1 or 2), use a slot value that all validators have in common. If the difference is significant, you may need to use a slot from a previous epoch. 78 | 79 | 4. Generate hard fork snapshot on each validator 80 | 81 | Try using the common slot first: 82 | 83 | ``` 84 | $ sudo -i -u sol agave-ledger-tool create-snapshot [COMMON_SLOT] --hard-fork [COMMON_SLOT] 85 | ``` 86 | 87 | If you encounter an error like "The epoch accounts hash cannot be awaited when Invalid!", try using a slot from a previous epoch. For example: 88 | 89 | ``` 90 | $ sudo -i -u sol agave-ledger-tool create-snapshot 157600 --hard-fork 157600 91 | ``` 92 | 93 | Successful output should look like: 94 | 95 | ``` 96 | [2025-03-06T16:13:30.066226548Z INFO solana_metrics::metrics] datapoint: archive-snapshot-package slot=157600i archive_format="TarZstd" duration_ms=53i full-snapshot-archive-size=393107i 97 | Successfully created snapshot for slot 157600, hash 2kh5hvfnFSHkprX5dgHX8qBggBJjEHb5Y5LKdPnRxvvo: /home/sol/ledger/snapshot-157600-Fh11y82UnfnV3nkGYcJqcuauY8NsCwDmQYr27dNPATkn.tar.zst 98 | Shred version: 34346 99 | ``` 100 | 101 | Note the bank hash value (in the example above, it's `2kh5hvfnFSHkprX5dgHX8qBggBJjEHb5Y5LKdPnRxvvo`) that appears after "Successfully created snapshot for slot". 102 | 103 | 5. Add hard fork parameters to the validator startup command 104 | 105 | Edit the run-validator script on each validator: 106 | 107 | ``` 108 | $ sudo -i -u sol vim run-validator 109 | ``` 110 | 111 | Add the following parameters to the `agave-validator` command: 112 | 113 | ``` 114 | agave-validator \ 115 | --wait-for-supermajority [SLOT] \ 116 | --expected-bank-hash [BANK_HASH] \ 117 | --hard-fork [SLOT] \ 118 | --expected-shred-version [SHRED_VERSION] \ 119 | --no-snapshot-fetch \ 120 | [other existing parameters] 121 | ``` 122 | 123 | Example: 124 | ``` 125 | agave-validator \ 126 | --wait-for-supermajority 157600 \ 127 | --expected-bank-hash 2kh5hvfnFSHkprX5dgHX8qBggBJjEHb5Y5LKdPnRxvvo \ 128 | --hard-fork 157600 \ 129 | --expected-shred-version 34346 \ 130 | --no-snapshot-fetch \ 131 | [other existing parameters] 132 | ``` 133 | 134 | 6. Restart validators in sequence 135 | 136 | Start with the bootstrap validator first: 137 | 138 | ``` 139 | $ sudo systemctl restart svmkit-agave-validator.service 140 | ``` 141 | 142 | Wait for the bootstrap validator to stabilize before starting the other validators. This is important because the bootstrap validator only depends on its local ledger while other validators may require connections to working entrypoints. 143 | 144 | 7. Confirm block production is restored 145 | 146 | Check that all validators are now recognized with the correct version: 147 | 148 | ``` 149 | $ solana validators 150 | ``` 151 | 152 | Expected output: 153 | ``` 154 | Identity Vote Account Commission Last Vote Root Slot Skip Rate Credits Version Active Stake 155 | 8MgGF4yRhB5SRpyvgsJorgzVYoB5mJqZkVT7Pp8kkQq8 8mQZiAM5MUntnZGib83QUxCh4dybMLkCaErGbH7NXHnZ 100% 157637 ( 0) 157606 ( 0) 0.00% 32368 1.18.26 9.999999344 SOL (33.33%) 156 | Cmfqr43vcjW7qgRdJhgF6utTZFTjzKPJaucc8SdPoQ8t HRJUsDx8Mda6tdkfWBqrVPg71jUr3RE4kX9cFbvTRYT7 100% 157637 ( 0) 157606 ( 0) 0.00% 32368 1.18.26 9.999999344 SOL (33.33%) 157 | FeBH2q4qNmRCzNWVv82xeABERD9cd8u3qmEhmWLtSYQW 4Rz6Kty9qQJ1xYgCuXuQd6xa89rdMb7LzCgwUbcwzSaa 100% 157637 ( 0) 157606 ( 0) 0.00% 32368 1.18.26 9.999999344 SOL (33.33%) 158 | 159 | Stake By Version: 160 | 1.18.26 - 3 current validators (100.00%) 161 | ``` 162 | 163 | All validators should show the same version (in this example, 1.18.26). The version displayed will depend on your specific deployment, but the important part is that all validators show a consistent version with no "unknown" entries, and all have active stake. 164 | 165 | Once supermajority of active stake agrees on the fork, voting will resume without additional intervention. 166 | 167 | ## Troubleshooting 168 | 169 | - If validators stop with errors after restart, try restarting them in sequence: bootstrap first, then each additional validator. 170 | - It may take some time for all validators to recognize each other and show the correct version information. 171 | - If snapshot creation fails with epoch-related errors, try selecting a slot further back in the previous epoch. -------------------------------------------------------------------------------- /guides/AWS-BENCHMARK-REPORT.md: -------------------------------------------------------------------------------- 1 | # Solana TPS Threshold Report: Instance Requirements Analysis 2 | 3 | ## Executive Summary 4 | 5 | This report provides a detailed analysis of Transactions Per Second (TPS) thresholds and the required AWS instance sizes to facilitate target TPS goals of 100, 1,000, 10,000, and 100,000. Based on comprehensive benchmarking tests across various AWS instance types and node configurations, we present possible hardware requirements for each performance tier. 6 | 7 | ## Key Findings and Recommendations 8 | 9 | | TPS Target | Recommended Instance | vCPUs | Memory | Network | Node Count | Actual TPS Achieved | Max TPS | Storage | 10 | |------------|---------------------|-------|--------|---------|------------|---------------------|---------|---------| 11 | | 100 TPS | c6i.xlarge | 4 | 8 GB | 12.5 Gbps | 3 | 106.70 (avg) | 108.97 | 500GB accounts, 1TB ledger | 12 | | 1,000 TPS | c6i.2xlarge | 8 | 16 GB | 12.5 Gbps | 3 | 988.82 (avg) | 1,008.78 | 750GB accounts, 1.5TB ledger | 13 | | 10,000 TPS | c6i.4xlarge | 16 | 32 GB | 12.5 Gbps | 3 | 9,483.11 (avg) | 12,816.68 | 1TB accounts, 2TB ledger | 14 | | 100,000 TPS | c6i.16xlarge | 64 | 128 GB | 25 Gbps | 10 | 72,060.38 (avg) | 117,542.17 | 3TB accounts, 6TB ledger | 15 | 16 | ## Detailed Test Results 17 | 18 | ### 100 TPS Performance Tier 19 | 20 | **Optimal Configuration**: c6i.xlarge (4 vCPUs, 8 GB RAM) with 3 nodes 21 | **Command Used**: 22 | ```bash 23 | solana-bench-tps \ 24 | --client-node-id /home/sol/validator-keypair.json \ 25 | --use-tpu-client \ 26 | --tx-count 100 \ 27 | --duration 60 \ 28 | --url "http://localhost:8899" \ 29 | --num-lamports-per-account 10000 30 | ``` 31 | 32 | **Performance Results**: 33 | - Average TPS: 106.70 34 | - Maximum TPS: 108.97 35 | - Drop Rate: 0.00% 36 | 37 | **Resource Utilization**: 38 | - CPU: 37.65% average 39 | - Memory: 24.19% average 40 | - Network Traffic: 595.97 KB/s in, 335.28 KB/s out 41 | - Disk I/O: 16,204.80 KB/s [^1] 42 | 43 | **Analysis**: The c6i.xlarge instance easily handled 100 TPS with significant headroom remaining. Resource utilization was moderate, with zero transaction drops, indicating a stable and reliable configuration for this performance tier. 44 | 45 | [^1]: The higher disk I/O observed in the c6i.xlarge instance (100 TPS test) compared to the c6i.2xlarge instance (1,000 TPS test) is due to memory management behavior. System metrics show aggressive memory reclamation (295,128 pages stolen per sample) with substantial dirty memory (~109,169 KB average) awaiting disk writes. Limited available memory (only ~107,941 KB) forced the Linux kernel to perform frequent cache flushes to disk, with write activity spikes reaching 58,000 KB/s. The c6i.xlarge operated under memory constraints, requiring more frequent page offloading to disk than the c6i.2xlarge, which had sufficient memory to maintain data in cache. 46 | 47 | ### 1,000 TPS Performance Tier 48 | 49 | **Optimal Configuration**: c6i.2xlarge (8 vCPUs, 16 GB RAM) with 3 nodes 50 | **Command Used**: 51 | ```bash 52 | solana-bench-tps \ 53 | --client-node-id /home/sol/validator-keypair.json \ 54 | --use-tpu-client \ 55 | --tx-count 1000 \ 56 | --duration 60 \ 57 | --url "http://localhost:8899" \ 58 | --num-lamports-per-account 10000 59 | ``` 60 | 61 | **Performance Results**: 62 | - Average TPS: 988.82 63 | - Maximum TPS: 1,008.78 64 | - Drop Rate: 0.00% 65 | 66 | **Resource Utilization**: 67 | - CPU: 19.01% average 68 | - Memory: 17.80% average 69 | - Network Traffic: 1,088.32 KB/s in, 412.29 KB/s out 70 | - Disk I/O: 6,142.93 KB/s 71 | 72 | **Analysis**: The c6i.2xlarge instance demonstrated excellent performance at the 1,000 TPS level with low resource utilization and zero transaction drops. This configuration provides a good balance of cost and performance for applications requiring 1,000 TPS. 73 | 74 | ### 10,000 TPS Performance Tier 75 | 76 | **Optimal Configuration**: c6i.4xlarge (16 vCPUs, 32 GB RAM) with 3 nodes 77 | **Command Used**: 78 | ```bash 79 | solana-bench-tps \ 80 | --client-node-id /home/sol/validator-keypair.json \ 81 | --use-tpu-client \ 82 | --tx-count 10000 \ 83 | --duration 60 \ 84 | --url "http://localhost:8899" \ 85 | --num-lamports-per-account 10000 86 | ``` 87 | 88 | **Performance Results**: 89 | - Average TPS: 9,483.11 90 | - Maximum TPS: 12,816.68 91 | - Drop Rate: 0.01% 92 | 93 | **Resource Utilization**: 94 | - CPU: 18.22% average 95 | - Memory: 11.77% average 96 | - Network Traffic: 4,902.45 KB/s in, 1,122.54 KB/s out 97 | - Disk I/O: 45,373.29 KB/s 98 | 99 | **Analysis**: The c6i.4xlarge instance approached the 10,000 TPS target with very low resource utilization, achieving 9,483 TPS average and peaks over 12,800 TPS. With a minimal drop rate of 0.01%, this configuration is highly suitable for applications demanding near 10,000 TPS performance. 100 | 101 | ### 100,000 TPS Performance Tier 102 | 103 | **Optimal Configuration**: c6i.16xlarge (64 vCPUs, 128 GB RAM) with 10 nodes 104 | **Command Used**: 105 | ```bash 106 | solana-bench-tps \ 107 | --client-node-id /home/sol/validator-keypair.json \ 108 | --use-tpu-client \ 109 | --tx-count 100000 \ 110 | --duration 60 \ 111 | --url "http://localhost:8899" \ 112 | --num-lamports-per-account 10000 113 | ``` 114 | 115 | **Performance Results**: 116 | - Average TPS: 72,060.38 117 | - Maximum TPS: 117,542.17 118 | - Drop Rate: 0.08% 119 | 120 | **Resource Utilization**: 121 | - CPU: 30.73% average 122 | - Memory: 14.16% average 123 | - Network Traffic: 64,460.45 KB/s in, 9,924.43 KB/s out 124 | - Disk I/O: 234,303.52 KB/s 125 | 126 | **Analysis**: The 10-node setup with c6i.16xlarge instances delivered exceptional performance, achieving over 72,000 TPS on average with peaks above 117,000 TPS. This configuration demonstrated that Solana can approach and even exceed 100,000 TPS with proper hardware sizing. The low drop rate of 0.08% indicates the system maintained good reliability even at this high throughput level. 127 | 128 | ## Cost Considerations for Production Deployments 129 | 130 | | TPS Target | Instance Type | Node Count | Monthly Cost (Estimated) | 131 | |------------|-----------------|------------|--------------------------| 132 | | 100 TPS | c6i.xlarge | 3 | ~$375 (On-Demand) | 133 | | 1,000 TPS | c6i.2xlarge | 3 | ~$750 (On-Demand) | 134 | | 10,000 TPS | c6i.4xlarge | 3 | ~$1,500 (On-Demand) | 135 | | 100,000 TPS | c6i.16xlarge | 10 | ~$22,000 (On-Demand) | 136 | 137 | *Note: Costs include instance pricing only; additional costs for storage, data transfer, and other AWS services would apply.* 138 | 139 | ## Conclusion 140 | 141 | This benchmarking study demonstrates that Solana can achieve impressive performance levels with proper hardware sizing: 142 | 143 | 1. **100 TPS Requirements**: Even modest hardware (c6i.xlarge) with 3 nodes can easily sustain this performance level with minimal resource utilization. 144 | 145 | 2. **1,000 TPS Requirements**: Slightly larger instances (c6i.2xlarge) with 3 nodes provide excellent performance at this tier with low resource consumption. 146 | 147 | 3. **10,000 TPS Requirements**: Mid-range instances (c6i.4xlarge) with 3 nodes can deliver performance approaching 10,000 TPS with relatively low resource utilization. 148 | 149 | 4. **100,000 TPS Requirements**: Achieving performance at this tier requires more powerful instances (c6i.16xlarge) and a larger validator set (10 nodes). While the tested configuration averaged over 72,000 TPS with peaks exceeding 117,000 TPS, sustained 100,000 TPS would likely require additional optimizations or nodes. 150 | 151 | For production deployments, we recommend selecting the appropriate instance type based on your TPS requirements and adding a 30-50% resource buffer to account for varying network conditions, transaction complexity, and future growth. -------------------------------------------------------------------------------- /gcp-network-spe-ts/README.md: -------------------------------------------------------------------------------- 1 | # Solana Permissioned Environment Inside a GCP VPC using TypeScript 2 | 3 | This example brings up a cluster of Solana validators, all using 4 | private addresses, inside a Google Compute VPC. Genesis is performed, 5 | a snapshot is distributed, and gossip is set up on private addresses 6 | inside the VPC. 7 | 8 | ## Pulumi Configuration Options 9 | 10 | | Name | Description | Required | Default Value | 11 | | :---------------- | :------------------------------------------------------------------------ | :------- | :------------ | 12 | | validator:version | The version of the validator APT package to install. | no | 2.2.14-1 | 13 | | node:count | The number of nodes to launch, including the bootstrap node. | no | 3 | 14 | | node:machineType | The instance type to use for all of the nodes. | no | n1-standard-4 | 15 | | node:osImage | The operating system image to use for the nodes. | no | debian-12 | 16 | | node:diskSize | The size of the volume to use for OS, accounts, and ledger, in gigabytes. | no | 64 | 17 | | gcp:project | The GCP project to create all resources under. | no | _(system)_ | 18 | | gcp:region | The GCP region to create all resources in. | yes | | 19 | | gcp:zone | The **fully-qualified** GCP availability zone to create all resources in. | yes | | 20 | | node:user | The user to log into all of the nodes as. | no | admin | 21 | 22 | NOTE: These configuration settings are not for a production usecase. They're sized to be allowed in 23 | free-tier GCP accounts, and demonstrate functional behavior. 24 | 25 | ## Running the Example 26 | 27 | 0. Have `pulumi` installed, logged in to wherever you're storing state, and configured to work with AWS. 28 | 29 | - https://www.pulumi.com/docs/iac/cli/commands/pulumi_login/ 30 | - https://www.pulumi.com/registry/packages/gcp/installation-configuration/ 31 | 32 | 1. Run `pulumi install`; this will install all of the required pieces for this example. 33 | 34 | ``` 35 | % pulumi install 36 | Installing dependencies... 37 | 38 | yarn install v1.22.22 39 | [1/4] 🔍 Resolving packages... 40 | [2/4] 🚚 Fetching packages... 41 | [3/4] 🔗 Linking dependencies... 42 | [4/4] 🔨 Building fresh packages... 43 | ✨ Done in 2.25s. 44 | Finished installing dependencies 45 | ``` 46 | 47 | 2. Create and select a Pulumi stack 48 | 49 | ``` 50 | % pulumi stack init new-spe 51 | Created stack 'new-spe' 52 | ``` 53 | 54 | 3. Run `pulumi up` 55 | 56 | ``` 57 | Previewing update (new-spe) 58 | 59 | View in Browser (Ctrl+O): https://app.pulumi.com/someuser/gcp-network-spe-ts/new-spe/previews/3288dae5-a59d-4458-85a3-1b3716cad12a 60 | 61 | Type Name Plan 62 | + pulumi:pulumi:Stack gcp-network-spe-ts-new-spe create 63 | + ├─ svmkit:index:KeyPair node1-vote-account-key create 64 | + ├─ svmkit:index:KeyPair node0-vote-account-key create 65 | + ├─ svmkit:index:KeyPair node1-validator-key create 66 | + ├─ svmkit:index:KeyPair stake-account-key create 67 | + ├─ svmkit:index:KeyPair treasury-key create 68 | + ├─ svmkit:index:KeyPair bootstrap-node-validator-key create 69 | + ├─ tls:index:PrivateKey node1-ssh-key create 70 | + ├─ svmkit:index:KeyPair faucet-key create 71 | + ├─ svmkit:index:KeyPair node0-stakeAccount-key create 72 | + ├─ svmkit:index:KeyPair node0-validator-key create 73 | + ├─ svmkit:index:KeyPair node1-stakeAccount-key create 74 | + ├─ svmkit:index:KeyPair bootstrap-node-vote-account-key create 75 | + ├─ tls:index:PrivateKey bootstrap-node-ssh-key create 76 | + ├─ gcp:compute:Network network create 77 | + ├─ tls:index:PrivateKey node0-ssh-key create 78 | + ├─ gcp:compute:Subnetwork subnet create 79 | + ├─ gcp:compute:Firewall external create 80 | + ├─ gcp:compute:Firewall internal create 81 | + ├─ gcp:compute:Instance node0-instance create 82 | + ├─ gcp:compute:Instance bootstrap-node-instance create 83 | + ├─ gcp:compute:Instance node1-instance create 84 | + ├─ svmkit:genesis:Solana genesis create 85 | + ├─ svmkit:validator:Agave node1-validator create 86 | + ├─ svmkit:account:Transfer node0-transfer create 87 | + ├─ svmkit:validator:Agave node0-validator create 88 | + ├─ svmkit:account:Transfer node1-transfer create 89 | + ├─ svmkit:account:VoteAccount node1-voteAccount create 90 | + ├─ svmkit:account:VoteAccount node0-voteAccount create 91 | + ├─ svmkit:faucet:Faucet bootstrap-faucet create 92 | + ├─ svmkit:validator:Agave bootstrap-node-validator create 93 | + ├─ svmkit:account:StakeAccount node1-stakeAccount create 94 | + └─ svmkit:account:StakeAccount node0-stakeAccount create 95 | 96 | . 97 | . 98 | . 99 | ``` 100 | 101 | 4. Access the bootstrap node to ensure the network is operational and communicating. Initially, only the bootstrap validator will confirm blocks. The other validators are set up to vote and participate in gossip but will not validate blocks until staked. 102 | 103 | ``` 104 | % ./ssh-to-host 0 105 | Warning: Permanently added '34.121.185.137' (ED25519) to the list of known hosts. 106 | Linux bootstrap-node-instance-5d63d8f 6.1.0-29-cloud-amd64 #1 SMP PREEMPT_DYNAMIC Debian 6.1.123-1 (2025-01-02) x86_64 107 | 108 | The programs included with the Debian GNU/Linux system are free software; 109 | the exact distribution terms for each program are described in the 110 | individual files in /usr/share/doc/*/copyright. 111 | 112 | Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent 113 | permitted by applicable law. 114 | Last login: Sat Feb 8 21:13:57 2025 from 122.132.224.111 115 | admin@bootstrap-node-instance-5d63d8f:~$ solana gossip 116 | IP Address | Identity | Gossip | TPU | RPC Address | Version | Feature Set 117 | ----------------+----------------------------------------------+--------+-------+-----------------------+---------+---------------- 118 | 10.0.1.2 | 9ZJ37p2q1JVSwZtmLA9FsWyNdduBDLjTgmcbh6EF8Bf4 | 8001 | 8004 | 10.0.1.2:8899 | 1.18.26 | 3241752014 119 | 10.0.1.4 | 7GiDaQ78852T937aFJhz2wTPd6tJL3YvU1MqnAVE6H1y | 8001 | 8004 | 10.0.1.4:8899 | 1.18.26 | 3241752014 120 | 10.0.1.3 | DF8myMDobK25b8m2rPeacZYSm9KSdLHoMd89Q6r6Saaz | 8001 | 8004 | 10.0.1.3:8899 | 1.18.26 | 3241752014 121 | Nodes: 3 122 | 123 | admin@bootstrap-node-instance-5d63d8f:~$ solana validators 124 | Identity Vote Account Commission Last Vote Root Slot Skip Rate Credits Version Active Stake 125 |   7GiDaQ78852T937aFJhz2wTPd6tJL3YvU1MqnAVE6H1y FtzUx7aRfNrt1MmsnAnQfg3ztjrbHSRJqGDcf3AaiX1P 100% 222 (-89) 200 (-74) - 0 1.18.26 0.047024937 SOL (7.92%) 126 |   DF8myMDobK25b8m2rPeacZYSm9KSdLHoMd89Q6r6Saaz BivdSw5m9rWAUwZtAWPNi76BBRAr31JS7bGqv3qq7yeN 100% 219 (-92) 200 (-74) 100.00% 0 1.18.26 0.047024937 SOL (7.92%) 127 |   9ZJ37p2q1JVSwZtmLA9FsWyNdduBDLjTgmcbh6EF8Bf4 9e4YZEFiUU2b4UVeko2H3L7BQHX8vrSirefFnQ3At2pg 100% 311 ( 0) 274 ( 0) 0.00% 112 1.18.26 0.499999344 SOL (84.17%) 128 | 129 | Average Stake-Weighted Skip Rate: 7.92% 130 | Average Unweighted Skip Rate: 50.00% 131 | 132 | Active Stake: 0.594049218 SOL 133 | 134 | Stake By Version: 135 | 1.18.26 - 3 current validators (100.00%) 136 | ``` 137 | 138 | 5. (Optional) Tear down the example 139 | 140 | ``` 141 | % pulumi down 142 | ``` 143 | -------------------------------------------------------------------------------- /gcp-network-spe-ts/index.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from "@pulumi/pulumi"; 2 | import * as svmkit from "@svmkit/pulumi-svmkit"; 3 | import { Node, agaveVersion } from "./spe"; 4 | 5 | const nodeConfig = new pulumi.Config("node"); 6 | const totalNodes = nodeConfig.getNumber("count") ?? 3; 7 | const firewallConfig = new pulumi.Config("firewall"); 8 | const tunerConfig = new pulumi.Config("tuner"); 9 | 10 | const gossipPort = 8001; 11 | const rpcPort = 8899; 12 | const faucetPort = 9900; 13 | 14 | const faucetKey = new svmkit.KeyPair("faucet-key"); 15 | const treasuryKey = new svmkit.KeyPair("treasury-key"); 16 | const stakeAccountKey = new svmkit.KeyPair("stake-account-key"); 17 | 18 | const bootstrapNode = new Node("bootstrap-node"); 19 | 20 | const runnerConfig = {}; 21 | 22 | // Firewall setup 23 | const firewallVariant = 24 | firewallConfig.get("variant") ?? 25 | svmkit.firewall.FirewallVariant.Generic; 26 | 27 | // Retrieve the default firewall parameters for that variant 28 | const genericFirewallParamsOutput = 29 | svmkit.firewall.getDefaultFirewallParamsOutput({ 30 | variant: firewallVariant, 31 | }); 32 | 33 | // "Apply" those params so we can pass them to the Firewall constructor 34 | const firewallParams = genericFirewallParamsOutput.apply((f) => ({ 35 | allowPorts: [ 36 | ...(f.allowPorts ?? []), 37 | "8000:8020/tcp", 38 | "8000:8020/udp", 39 | "8900/tcp", 40 | gossipPort.toString(), 41 | rpcPort.toString(), 42 | faucetPort.toString(), 43 | ], 44 | })); 45 | 46 | // Create the Firewall resource on the EC2 instance 47 | const _firewall = new svmkit.firewall.Firewall( 48 | "firewall", 49 | { 50 | connection: bootstrapNode.connection, 51 | params: firewallParams, 52 | }, 53 | { 54 | dependsOn: [bootstrapNode.machine], 55 | }, 56 | ); 57 | 58 | // Tuner setup 59 | const tunerVariant = 60 | tunerConfig.get("variant") ?? 61 | svmkit.tuner.TunerVariant.Generic; 62 | 63 | // Retrieve the default tuner parameters for that variant 64 | const genericTunerParamsOutput = svmkit.tuner.getDefaultTunerParamsOutput({ 65 | variant: tunerVariant, 66 | }); 67 | 68 | // "Apply" those params so we can pass them to the Tuner constructor 69 | const tunerParams = genericTunerParamsOutput.apply((p) => ({ 70 | cpuGovernor: p.cpuGovernor, 71 | kernel: p.kernel, 72 | net: p.net, 73 | vm: p.vm, 74 | fs: p.fs, 75 | })); 76 | 77 | // Create the Tuner resource on the EC2 instance 78 | const _tuner = new svmkit.tuner.Tuner( 79 | "tuner", 80 | { 81 | connection: bootstrapNode.connection, 82 | params: tunerParams, 83 | }, 84 | { 85 | dependsOn: [bootstrapNode.machine], 86 | }, 87 | ); 88 | 89 | const genesis = new svmkit.genesis.Solana( 90 | "genesis", 91 | { 92 | connection: bootstrapNode.connection, 93 | version: agaveVersion, 94 | flags: { 95 | ledgerPath: "/home/sol/ledger", 96 | bootstrapValidators: [ 97 | { 98 | identityPubkey: bootstrapNode.validatorKey.publicKey, 99 | votePubkey: bootstrapNode.voteAccountKey.publicKey, 100 | stakePubkey: stakeAccountKey.publicKey, 101 | }, 102 | ], 103 | faucetPubkey: faucetKey.publicKey, 104 | bootstrapValidatorStakeLamports: 10000000000, // 10 SOL 105 | enableWarmupEpochs: true, 106 | slotsPerEpoch: 8192, 107 | clusterType: "development", 108 | faucetLamports: 1000, 109 | targetLamportsPerSignature: 0, 110 | inflation: "none", 111 | lamportsPerByteYear: 1, 112 | }, 113 | primordial: [ 114 | { 115 | pubkey: bootstrapNode.validatorKey.publicKey, 116 | lamports: 1000000000000, // 1000 SOL 117 | }, 118 | { 119 | pubkey: treasuryKey.publicKey, 120 | lamports: 100000000000000, // 100000 SOL 121 | }, 122 | { 123 | pubkey: faucetKey.publicKey, 124 | lamports: 1000000000000, // 1000 SOL 125 | }, 126 | ], 127 | }, 128 | { 129 | dependsOn: [bootstrapNode.machine], 130 | }, 131 | ); 132 | 133 | const solEnv = { 134 | rpcURL: bootstrapNode.privateIP.apply((ip) => `http://${ip}:${rpcPort}`), 135 | }; 136 | 137 | const rpcFaucetAddress = bootstrapNode.privateIP.apply( 138 | (ip) => `${ip}:${faucetPort}`, 139 | ); 140 | 141 | const baseFlags: svmkit.types.input.agave.FlagsArgs = { 142 | onlyKnownRPC: false, 143 | rpcPort, 144 | dynamicPortRange: "8002-8020", 145 | privateRPC: false, 146 | gossipPort, 147 | rpcBindAddress: "0.0.0.0", 148 | walRecoveryMode: "skip_any_corrupted_record", 149 | limitLedgerSize: 50000000, 150 | blockProductionMethod: "central-scheduler", 151 | fullSnapshotIntervalSlots: 1000, 152 | noWaitForVoteToStartLeader: true, 153 | useSnapshotArchivesAtStartup: "when-newest", 154 | allowPrivateAddr: true, 155 | rpcFaucetAddress, 156 | }; 157 | 158 | const bootstrapFlags: svmkit.types.input.agave.FlagsArgs = { 159 | ...baseFlags, 160 | fullRpcAPI: true, 161 | noVoting: false, 162 | gossipHost: bootstrapNode.privateIP, 163 | enableExtendedTxMetadataStorage: true, 164 | enableRpcTransactionHistory: true, 165 | }; 166 | 167 | const faucet = new svmkit.faucet.Faucet( 168 | "bootstrap-faucet", 169 | { 170 | connection: bootstrapNode.connection, 171 | keypair: faucetKey.json, 172 | flags: { 173 | perRequestCap: 1000, 174 | }, 175 | }, 176 | { 177 | dependsOn: [genesis], 178 | }, 179 | ); 180 | 181 | const bootstrapValidator = bootstrapNode.configureValidator( 182 | bootstrapFlags, 183 | solEnv, 184 | { 185 | waitForRPCHealth: true, 186 | }, 187 | [faucet], 188 | runnerConfig, 189 | ); 190 | 191 | const nonBootstrapNodes = [...Array(totalNodes - 1)].map( 192 | (_, i) => new Node(`node${i}`), 193 | ); 194 | const allNodes = [bootstrapNode, ...nonBootstrapNodes]; 195 | 196 | nonBootstrapNodes.forEach((node) => { 197 | const otherNodes = allNodes.filter((x) => x != node); 198 | const entryPoint = otherNodes.map((node) => 199 | node.privateIP.apply((v) => `${v}:${gossipPort}`), 200 | ); 201 | 202 | const _tuner = new svmkit.tuner.Tuner( 203 | node.name + "-tuner", 204 | { 205 | connection: node.connection, 206 | params: tunerParams, 207 | }, 208 | { 209 | dependsOn: [node.machine], 210 | }, 211 | ); 212 | 213 | const flags: svmkit.types.input.agave.FlagsArgs = { 214 | ...baseFlags, 215 | entryPoint, 216 | knownValidator: otherNodes.map((x) => x.validatorKey.publicKey), 217 | expectedGenesisHash: genesis.genesisHash, 218 | fullRpcAPI: node == bootstrapNode, 219 | gossipHost: node.privateIP, 220 | }; 221 | 222 | node.configureValidator( 223 | flags, 224 | solEnv, 225 | {}, 226 | [bootstrapValidator], 227 | runnerConfig, 228 | ); 229 | 230 | const transfer = new svmkit.account.Transfer( 231 | node.name + "-transfer", 232 | { 233 | connection: bootstrapNode.connection, 234 | transactionOptions: { 235 | keyPair: treasuryKey.json, 236 | }, 237 | amount: 100, 238 | recipientPubkey: node.validatorKey.publicKey, 239 | allowUnfundedRecipient: true, 240 | }, 241 | { 242 | dependsOn: [bootstrapValidator], 243 | }, 244 | ); 245 | const voteAccount = new svmkit.account.VoteAccount( 246 | node.name + "-voteAccount", 247 | { 248 | connection: bootstrapNode.connection, 249 | keyPairs: { 250 | identity: node.validatorKey.json, 251 | voteAccount: node.voteAccountKey.json, 252 | authWithdrawer: treasuryKey.json, 253 | }, 254 | }, 255 | { 256 | dependsOn: [transfer], 257 | }, 258 | ); 259 | 260 | const stakeAccountKey = new svmkit.KeyPair(node.name + "-stakeAccount-key"); 261 | new svmkit.account.StakeAccount( 262 | node.name + "-stakeAccount", 263 | { 264 | connection: bootstrapNode.connection, 265 | 266 | transactionOptions: { 267 | keyPair: treasuryKey.json, 268 | }, 269 | keyPairs: { 270 | stakeAccount: stakeAccountKey.json, 271 | voteAccount: node.voteAccountKey.json, 272 | }, 273 | amount: 10, 274 | }, 275 | { 276 | dependsOn: [voteAccount], 277 | }, 278 | ); 279 | }); 280 | 281 | export const nodes = allNodes.map((x) => { 282 | return { 283 | name: x.name, 284 | connection: x.connection, 285 | }; 286 | }); 287 | export const speInfo = { 288 | treasuryKey: treasuryKey, 289 | bootstrap: { 290 | connection: bootstrapNode.connection, 291 | }, 292 | otherValidators: nonBootstrapNodes.map((node) => ({ 293 | voteAccountKey: node.voteAccountKey, 294 | })), 295 | }; 296 | -------------------------------------------------------------------------------- /aws-network-spe-py/README.md: -------------------------------------------------------------------------------- 1 | # Solana Permissioned Environment Inside an AWS VPC 2 | 3 | This example brings up a cluster of Solana validators, all using private addresses, inside an AWS VPC. 4 | Genesis is performed, a snapshot is distributed, and gossip is set up on private addresses inside the VPC. 5 | 6 | ![An example of launching an SPE on AWS](https://github.com/abklabs/svmkit-media/blob/main/SVMKitSPELaunch/10x_speed_shorter.gif?raw=true) 7 | 8 | ## Here's a short explainer video: 9 | 10 | [![YouTube Video](https://img.youtube.com/vi/8rgUikRios4/0.jpg)](https://www.youtube.com/embed/8rgUikRios4?si=lqUuZfgD_9fImpG0) 11 | 12 | ## Performance Benchmarks 13 | 14 | The default configuration of this Solana validator cluster can achieve the following transaction processing performance: 15 | 16 | ``` 17 | [2025-04-04T18:42:43.913253413Z INFO solana_bench_tps::bench] Node address | Max TPS | Total Transactions 18 | [2025-04-04T18:42:43.913256071Z INFO solana_bench_tps::bench] ---------------------+---------------+-------------------- 19 | [2025-04-04T18:42:43.913258431Z INFO solana_bench_tps::bench] http://localhost:8899 | 7006.67 | 210480 20 | Average max TPS: 7006.67, 0 nodes had 0 TPS 21 | Highest TPS: 7006.67 sampling period 1s max transactions: 210480 clients: 1 drop rate: 0.00 22 | [2025-04-04T18:42:43.913305787Z INFO solana_bench_tps::bench] Average TPS: 3406.448 23 | ``` 24 | 25 | This benchmark was conducted using the following command: 26 | 27 | ```bash 28 | solana-bench-tps \ 29 | --client-node-id /home/sol/validator-keypair.json \ 30 | --use-tpu-client \ 31 | --tx-count 3500 \ 32 | --duration 60 \ 33 | --url "http://localhost:8899" \ 34 | --num-lamports-per-account 10000 35 | ``` 36 | 37 | System utilization during benchmark: 38 | - CPU Utilization: Average 56.19% 39 | - Memory Utilization: ~4.3GB 40 | - Network Traffic: ~1100Kbps inbound, ~2500Kbps outbound 41 | 42 | ## Pulumi Configuration Options 43 | 44 | | Name | Description | Default Value | 45 | | :------------------ | :---------------------------------------------------------------- | :------------ | 46 | | node:count | The number of nodes to launch, including the bootstrap node. | 3 | 47 | | node:instanceType | The AWS instance type to use for all of the nodes. | c6i.xlarge | 48 | | node:instanceArch | The AWS instance architecture type to use for the AMI lookup. | x86_64 | 49 | | node:volumeIOPS | The number of IOPS to provide to the ledger and accounts volumes. | 5000 | 50 | | node:rootVolumeSize | The size of the AWS instance's root volume, in gigabytes. | 32 | 51 | | node:swapSize | The number of gigabytes of swap space to allocate. | 8 | 52 | | node:instanceAmi | The AMI to use for all of the nodes. | _(debian-12)_ | 53 | | node:user | The user to log into all of the nodes as. | admin | 54 | | network:vpcId | The AWS VPC id to deploy the example into. | Default VPC | 55 | | validator:version | The version of the validator APT package to install. | 2.2.14-1 | 56 | 57 | Note: All EC2 instances must have a public IPv4 addresses. This is a limitation of the example, 58 | not of AWS or svmkit. 59 | 60 | ## Running the Example 61 | 62 | 0. Have `pulumi` installed, logged in to wherever you're storing state, and configured to work with AWS. 63 | 64 | - https://www.pulumi.com/docs/iac/cli/commands/pulumi_login/ 65 | - https://github.com/pulumi/pulumi-aws?tab=readme-ov-file#configuration 66 | 67 | 1. Run `pulumi install`; this will install all of the required pieces for this example. 68 | 69 | ``` 70 | % pulumi install 71 | Installing dependencies... 72 | 73 | Creating virtual environment... 74 | Finished creating virtual environment 75 | Updating pip, setuptools, and wheel in virtual environment... 76 | Requirement already satisfied: pip in ./venv/lib/python3.13/site-packages (24.2) 77 | 78 | . 79 | . 80 | . 81 | 82 | Finished installing dependencies 83 | ``` 84 | 85 | 2. Create and select a Pulumi stack 86 | 87 | ``` 88 | % pulumi stack init new-spe 89 | Created stack 'new-spe' 90 | ``` 91 | 92 | 3. Run `pulumi up` 93 | 94 | ``` 95 | % pulumi up 96 | Please choose a stack, or create a new one: 97 | Please enter your desired stack name. 98 | To create a stack in an organization, use the format / (e.g. `acmecorp/dev`): dev 99 | Created stack 'dev' 100 | Previewing update (dev) 101 | 102 | View in Browser (Ctrl+O): https://app.pulumi.com/mylog/dev/previews/390sd21119-5cd0-497d-a945-d86738a9 103 | 104 | Type Name Plan Info 105 | + pulumi:pulumi:Stack aws-network-spe-py-dev create 1 message 106 | + ├─ aws:ec2:SecurityGroup external-access create 107 | + ├─ aws:ec2:SecurityGroup internal-access create 108 | + ├─ tls:index:PrivateKey bootstrap-node-ssh-key create 109 | + ├─ svmkit:index:KeyPair bootstrap-node-vote-account-key create 110 | + ├─ svmkit:index:KeyPair bootstrap-node-validator-key create 111 | + ├─ svmkit:index:KeyPair stake-account-key create 112 | + ├─ aws:ec2:KeyPair bootstrap-node-keypair create 113 | + ├─ svmkit:index:KeyPair faucet-key create 114 | + ├─ svmkit:index:KeyPair treasury-key create 115 | + ├─ svmkit:index:KeyPair node1-validator-key create 116 | + ├─ aws:ec2:Instance node1-instance create 117 | + ├─ svmkit:index:KeyPair node0-validator-key create 118 | + ├─ svmkit:index:KeyPair node1-vote-account-key create 119 | + ├─ svmkit:index:KeyPair node0-vote-account-key create 120 | + ├─ tls:index:PrivateKey node1-ssh-key create 121 | + ├─ aws:ec2:KeyPair node1-keypair create 122 | + ├─ aws:ec2:Instance node0-instance create 123 | + ├─ tls:index:PrivateKey node0-ssh-key create 124 | + ├─ aws:ec2:KeyPair node0-keypair create 125 | + ├─ aws:ec2:Instance bootstrap-node-instance create 126 | + ├─ svmkit:genesis:Solana genesis create 127 | + ├─ svmkit:validator:Agave node1-validator create 128 | + ├─ svmkit:validator:Agave bootstrap-node-validator create 129 | + └─ svmkit:validator:Agave node0-validator create 130 | 131 | Diagnostics: 132 | pulumi:pulumi:Stack (aws-network-spe-py-dev): 133 | 0 errors, 0 warnings, 0 informations 134 | ``` 135 | 136 | 4. Access the bootstrap node to ensure the network is operational and communicating. Initially, only the bootstrap validator will confirm blocks. The other validators are set up to vote and participate in gossip but will not validate blocks until staked. 137 | 138 | ``` 139 | % ./ssh-to-host 0 solana gossip 140 | Warning: Permanently added '34.221.138.152' (ED25519) to the list of known hosts. 141 | IP Address | Identity | Gossip | TPU | RPC Address | Version | Feature Set 142 | ----------------+----------------------------------------------+--------+-------+-----------------------+---------+---------------- 143 | 172.31.15.168 | FaWcX8EgsvNVzneG9AWxPbc4tW7TwdGHoiLttS4vCJZX | 8001 | 8004 | 172.31.15.168:8899 | 1.18.24 | 3241752014 144 | 172.31.15.107 | DhCUqnynb172CV4SZBaSUBpC156SAMoeo6kBvcwNFbz7 | 8001 | 8004 | 172.31.15.107:8899 | 1.18.24 | 3241752014 145 | 172.31.7.92 | ENzVE5FCbgjQhrmRCtRowaWk16qjvJFScinpHf12rg9d | 8001 | 8004 | 172.31.7.92:8899 | 1.18.24 | 3241752014 146 | Nodes: 3 147 | 148 | % ./ssh-to-host 0 solana validators 149 | Warning: Permanently added '34.221.138.152' (ED25519) to the list of known hosts. 150 | Identity Vote Account Commission Last Vote Root Slot Skip Rate Credits Version Active Stake 151 |   FaWcX8EgsvNVzneG9AWxPbc4tW7TwdGHoiLttS4vCJZX FwHm1TwydnqnGskixP8Dhi3TWDhoJQYp6tPm2YFykjUi 100% 292 ( 0) 261 ( 0) 0.00% 0 1.18.24 0.499999344 SOL (100.00%) 152 | 153 | Average Stake-Weighted Skip Rate: 0.00% 154 | Average Unweighted Skip Rate: 0.00% 155 | 156 | Active Stake: 0.499999344 SOL 157 | 158 | Stake By Version: 159 | 1.18.24 - 1 current validators (100.00%) 160 | ``` 161 | 162 | 5. Run token demo script. 163 | 164 | This script mints a token and allocates a portion of the supply to a recipient. Initially, the cluster's treasury provides the necessary funds to the minter. 165 | 166 | ``` 167 | % ./token-demo token-demo-state 168 | ``` 169 | 170 | 6. Access the RPC and Explorer via Port Forwarding (Optional) 171 | 172 | In the example, the deployed validators and explorer are running remotely, so you’ll need to forward the relevant ports to your local machine if you wish to interact with the RPC API or view the Explorer in your browser. The Solana JSON RPC API typically listens on port 8899 and the Explorer on port 3000. 173 | 174 | 175 | Forward the ports to your local machine: 176 | 177 | ``` 178 | ./ssh-to-host 0 -L 8899:localhost:8899 -L 3000:localhost:3000 179 | ``` 180 | Test the RPC API by running the following command: 181 | ``` 182 | curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id": 1,"method": "getHealth"}' 183 | ``` 184 | Additionally you can set your local Solana config to the same port to interact with the cluster: 185 | ``` 186 | solana set --url http://localhost:8899 187 | 188 | solana block 189 | ``` 190 | View the explorer for the SPE in your browser at `http://localhost:3000` . Note that the explorer 191 | frontend requires access to the node as well, so you need to make sure you've forwarded the 192 | validator's RPC port to your local machine as well. 193 | 194 | 195 | 7. (Optional) Tear down the example 196 | 197 | ``` 198 | % pulumi down 199 | ``` 200 | -------------------------------------------------------------------------------- /bin/opsh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | #- SECTION 00-header 4 | set -euo pipefail 5 | IFS='' 6 | 7 | if [ -z "$BASH_VERSION" ]; then 8 | echo "FATAL: $0 requires bash to function properly!" 9 | exit 1 10 | fi 11 | 12 | if [[ ${BASH_VERSINFO[0]} -lt 4 ]]; then 13 | echo "FATAL: $0 requires bash v4 or greater!" 14 | exit 1 15 | fi 16 | 17 | shopt -s inherit_errexit 18 | set -o errtrace 19 | 20 | OPSHROOTDIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")"/.. &>/dev/null && pwd) 21 | 22 | _OPSH_VERSION="0.6.0" 23 | _OPSH_LIB_IMPORTED=() 24 | EXIT_FUNCS=() 25 | 26 | exit::trap() { 27 | local i 28 | for ((i = ${#EXIT_FUNCS[@]} - 1; i >= 0; i--)); do 29 | eval "${EXIT_FUNCS[$i]}" 30 | done 31 | } 32 | 33 | trap exit::trap EXIT 34 | 35 | exit::trigger() { 36 | EXIT_FUNCS+=("$@") 37 | } 38 | 39 | TMPDIR=$(mktemp -d) 40 | export TMPDIR 41 | 42 | temp::cleanup() { 43 | log::debug cleaning up "$TMPDIR"... 44 | rm -rf "$TMPDIR" 45 | } 46 | 47 | exit::trigger temp::cleanup 48 | 49 | # shellcheck disable=SC2120 # these options are optional. 50 | temp::file() { 51 | mktemp -p "$TMPDIR" "$@" 52 | } 53 | 54 | # shellcheck disable=SC2120 # these options are optional. 55 | temp::dir() { 56 | mktemp -d -p "$TMPDIR" "$@" 57 | } 58 | 59 | CRED='' 60 | CGRN='' 61 | CYEL='' 62 | CBLU='' 63 | CNONE='' 64 | 65 | if [[ -t 1 ]]; then 66 | CRED='\033[0;31m' 67 | CGRN='\033[0;32m' 68 | CYEL='\033[0;33m' 69 | CBLU='\033[0;34m' 70 | CNONE='\033[0m' 71 | fi 72 | 73 | log::output() { 74 | local level 75 | level="$1" 76 | shift 77 | 78 | printf "$level:\t%s\n" "$*" >&2 79 | } 80 | 81 | log::debug() { 82 | [[ -v DEBUG ]] || return 0 83 | 84 | log::output "${CBLU}DEBUG${CNONE}" "$@" 85 | } 86 | 87 | log::info() { 88 | log::output "${CGRN}INFO${CNONE}" "$@" 89 | } 90 | 91 | log::warn() { 92 | log::output "${CYEL}WARN${CNONE}" "$@" 93 | } 94 | 95 | log::error() { 96 | log::output "${CRED}ERROR${CNONE}" "$@" 97 | } 98 | 99 | log::fatal() { 100 | log::output "${CRED}FATAL${CNONE}" "$@" 101 | exit 1 102 | } 103 | 104 | lib::import::is-imported() { 105 | local libfile i 106 | libfile=$1 107 | shift 108 | 109 | for i in "${_OPSH_LIB_IMPORTED[@]}"; do 110 | if [[ $libfile == "$i" ]]; then 111 | return 0 112 | fi 113 | done 114 | 115 | return 1 116 | } 117 | 118 | lib::import() { 119 | local libfile 120 | for libname in "$@"; do 121 | if lib::import::is-imported "$libname"; then 122 | continue 123 | fi 124 | 125 | libfile="$OPSHROOTDIR/share/opsh/$libname.opsh" 126 | [[ -f $libfile ]] || log::fatal "library '$libname' not found!" 127 | 128 | # shellcheck disable=SC1090 129 | source "$libfile" 130 | _OPSH_LIB_IMPORTED+=("$libname") 131 | done 132 | } 133 | 134 | array::join() { 135 | local IFS=$1 136 | shift 137 | echo "$*" 138 | } 139 | 140 | array::split() { 141 | local -n name=$1 142 | shift 143 | local IFS=$1 144 | shift 145 | 146 | # shellcheck disable=SC2034,SC2206 147 | name=($@) 148 | } 149 | 150 | opsh::version() { 151 | if [[ -v _OPSH_VERSION ]]; then 152 | echo "$_OPSH_VERSION" 153 | return 0 154 | fi 155 | 156 | lib::import git 157 | 158 | git::repo::version 159 | } 160 | 161 | opsh::version::require() { 162 | local minver=$1 163 | shift 164 | 165 | local thisver 166 | thisver=$(opsh::version) || log::fatal "couldn't retrieve installed opsh version!" 167 | 168 | lib::import semver 169 | 170 | semver::test "$thisver" -ge "$minver" || log::fatal "opsh ($thisver) is older than required ($minver)!" 171 | } 172 | _OPSH_LIB_IMPORTED+=(apt) 173 | _OPSH_LIB_IMPORTED+=(cloud-init) 174 | _OPSH_LIB_IMPORTED+=(command) 175 | _OPSH_LIB_IMPORTED+=(git) 176 | _OPSH_LIB_IMPORTED+=(semver) 177 | _OPSH_LIB_IMPORTED+=(ssh) 178 | _OPSH_LIB_IMPORTED+=(step-runner) 179 | _OPSH_LIB_IMPORTED+=(test-harness) 180 | apt::env() { 181 | # Disable interactive frontends by default. 182 | DEBIAN_FRONTEND=noninteractive 183 | export DEBIAN_FRONTEND 184 | 185 | : "${SUDO:=sudo --preserve-env=DEBIAN_FRONTEND}" 186 | : "${APT:=$SUDO apt-get -qy}" 187 | } 188 | 189 | apt::update() { 190 | ( 191 | apt::env 192 | $APT update "$@" 193 | ) 194 | } 195 | 196 | apt::install() { 197 | ( 198 | apt::env 199 | $APT install "$@" 200 | ) 201 | } 202 | lib::import command 203 | 204 | cloud-init::is-enabled() { 205 | command::exists cloud-init || return 1 206 | } 207 | 208 | cloud-init::wait-for-finish() { 209 | local ret=0 210 | 211 | cloud-init status --wait &>/dev/null || ret=$? 212 | 213 | case "$ret" in 214 | 0 | 2) 215 | return 0 216 | ;; 217 | *) 218 | return "$ret" 219 | ;; 220 | esac 221 | } 222 | command::exists() { 223 | local command=$1 224 | shift 225 | 226 | command -v "$command" &>/dev/null || return 1 227 | } 228 | git::repo::version() { 229 | git describe --tags --dirty 2>/dev/null || git rev-parse --short HEAD 230 | } 231 | 232 | git::repo::current-branch() { 233 | git rev-parse --abbrev-ref HEAD 234 | } 235 | 236 | git::repo::is-clean() { 237 | [[ $(git status --porcelain | wc -c) -eq 0 ]] || return 1 238 | } 239 | 240 | git::tag::exists() { 241 | local tag=$1 242 | shift 243 | 244 | [[ $(git tag -l "$tag") == "$tag" ]] || return 1 245 | } 246 | _OPSH_SEMVER_NUM='0|[1-9][0-9]*' 247 | _OPSH_SEMVER_REGEX="^[vV]?($_OPSH_SEMVER_NUM)\\.($_OPSH_SEMVER_NUM)\\.($_OPSH_SEMVER_NUM)((\+|-).+)?\$" 248 | 249 | OPSH_SEMVER=() 250 | 251 | semver::parse() { 252 | local ver=$1 253 | shift 254 | 255 | # shellcheck disable=SC2319 256 | [[ "$ver" =~ $_OPSH_SEMVER_REGEX ]] || return "$?" 257 | 258 | OPSH_SEMVER=("${BASH_REMATCH[@]:1:3}") 259 | 260 | if [[ -n ${BASH_REMATCH[4]} ]]; then 261 | OPSH_SEMVER+=("${BASH_REMATCH[4]}") 262 | fi 263 | } 264 | 265 | # Note: This is a very naive version of a semver test. It can be 266 | # extended in future to support a full set of expressions. 267 | 268 | semver::test() { 269 | semver::parse "$1" || return 1 270 | # shellcheck disable=SC2034 271 | local left=("${OPSH_SEMVER[@]}") 272 | shift 273 | 274 | local op=$1 275 | shift 276 | 277 | semver::parse "$1" || return 1 278 | # shellcheck disable=SC2034 279 | local right=("${OPSH_SEMVER[@]}") 280 | shift 281 | 282 | [[ $# -eq 0 ]] || return 1 283 | 284 | local gtret ltret eqret 285 | 286 | case "$op" in 287 | -eq) 288 | gtret=1 ltret=1 eqret=0 289 | ;; 290 | -gt) 291 | gtret=0 ltret=1 eqret=1 292 | ;; 293 | -lt) 294 | gtret=1 ltret=0 eqret=1 295 | ;; 296 | -ge) 297 | gtret=0 ltret=1 eqret=0 298 | ;; 299 | -le) 300 | gtret=1 ltret=0 eqret=0 301 | ;; 302 | *) 303 | log::fatal "unknown semver expression comparison operator '$op'!" 304 | ;; 305 | esac 306 | 307 | for i in {0..2}; do 308 | if [[ ${left[$i]} -gt ${right[$i]} ]]; then 309 | return $gtret 310 | fi 311 | if [[ ${left[$i]} -lt ${right[$i]} ]]; then 312 | return $ltret 313 | fi 314 | done 315 | 316 | return $eqret 317 | 318 | } 319 | 320 | semver::bump() { 321 | local -l pos=$1 322 | shift 323 | 324 | local ver=$1 325 | shift 326 | 327 | local prefix="" 328 | 329 | if [[ "$ver" =~ ^([vV]) ]]; then 330 | prefix="${BASH_REMATCH[0]}" 331 | fi 332 | 333 | semver::parse "$ver" || return 1 334 | 335 | local idx 336 | 337 | case "$pos" in 338 | major) 339 | idx=0 340 | ;; 341 | minor) 342 | idx=1 343 | ;; 344 | patch) 345 | idx=2 346 | ;; 347 | *) 348 | return 1 349 | ;; 350 | esac 351 | 352 | OPSH_SEMVER[idx]=$((OPSH_SEMVER[idx] + 1)) 353 | 354 | for ((idx = idx + 1; idx < 3; idx++)); do 355 | OPSH_SEMVER[idx]=0 356 | done 357 | 358 | prefix="$prefix$(array::join . "${OPSH_SEMVER[@]:0:3}")" 359 | 360 | if [[ ${#OPSH_SEMVER[@]} -eq 4 ]]; then 361 | prefix="$prefix${OPSH_SEMVER[3]}" 362 | fi 363 | 364 | echo "$prefix" 365 | } 366 | export _OPSH_SSH_CONTEXT 367 | 368 | ssh::end() { 369 | [[ -v _OPSH_SSH_CONTEXT ]] || return 0 370 | eval "$(cat "$_OPSH_SSH_CONTEXT/env")" 371 | eval "$(ssh-agent -k | grep -v echo)" 372 | unset _OPSH_SSH_CONTEXT 373 | } 374 | 375 | ssh::begin() { 376 | _OPSH_SSH_CONTEXT=$(temp::dir) 377 | chmod 700 "$_OPSH_SSH_CONTEXT" 378 | 379 | log::debug "launching local SSH agent..." 380 | ssh-agent | grep -v echo >"$_OPSH_SSH_CONTEXT/env" 2>/dev/null 381 | eval "$(cat "$_OPSH_SSH_CONTEXT/env")" 382 | 383 | exit::trigger ssh::end 384 | } 385 | 386 | ssh::config() { 387 | cat >>"$_OPSH_SSH_CONTEXT/config" 388 | } 389 | 390 | ssh::background::close() { 391 | log::debug "closing SSH port forwarding..." 392 | 393 | echo >&"${_OPSH_SSH_COPROC[1]}" 394 | wait "$_OPSH_SSH_COPROC_PID" 395 | } 396 | 397 | ssh::background::run() { 398 | local response 399 | log::debug "launching port forwarding..." 400 | coproc _OPSH_SSH_COPROC { ssh -F "$_OPSH_SSH_CONTEXT/config" "$@" "echo goliath online ; read"; } 401 | read -r response <&"${_OPSH_SSH_COPROC[0]}" 402 | 403 | [[ $response = "goliath online" ]] || log::fatal "failed to port forward" 404 | 405 | exit::trigger ssh::background::close 406 | } 407 | 408 | ssh::key::add() { 409 | local keyfile 410 | keyfile=$(temp::file) 411 | chmod 600 "$keyfile" 412 | 413 | if [[ $# -gt 0 ]]; then 414 | for i in "$@"; do 415 | cat "$i" >"$keyfile" 416 | ssh-add "$keyfile" 2>/dev/null 417 | done 418 | else 419 | cat >"$keyfile" 420 | ssh-add "$keyfile" 2>/dev/null 421 | 422 | fi 423 | rm "$keyfile" 424 | } 425 | # Run a series of functions, in order, starting with a prefix. 426 | steps::run() { 427 | local prefix start name 428 | 429 | prefix=$1 430 | shift 431 | start="" 432 | 433 | if [[ $# -gt 0 ]]; then 434 | start="${prefix}::$1" 435 | shift 436 | log::warn "starting steps with $start..." 437 | fi 438 | 439 | while read -r name; do 440 | if [[ $name > $start || $name = "$start" ]]; then 441 | log::info "running step $name..." 442 | $name 443 | fi 444 | done < <(declare -F | grep "$prefix::" | awk '{ print $3; }') 445 | } 446 | # NOTE: This API is in flux. It will probably change over time. 447 | 448 | _TESTING_REGISTERED_FUNCS=() 449 | _TESTING_REGISTERED_DESC=() 450 | 451 | testing::register() { 452 | local func 453 | func=$1 454 | shift 455 | 456 | _TESTING_REGISTERED_FUNCS+=("$func") 457 | 458 | if [[ $# -gt 0 ]]; then 459 | _TESTING_REGISTERED_DESC+=("$*") 460 | else 461 | _TESTING_REGISTERED_DESC+=("") 462 | fi 463 | } 464 | 465 | testing::run() { 466 | echo "TAP version 13" 467 | echo "1..${#_TESTING_REGISTERED_FUNCS[@]}" 468 | 469 | local res desc outfile n 470 | 471 | outfile=$(temp::file) 472 | 473 | n=1 474 | for func in "${_TESTING_REGISTERED_FUNCS[@]}"; do 475 | res=0 476 | ("$func") >"$outfile" 2>&1 || res=$? 477 | 478 | if [[ $res -ne 0 ]]; then 479 | echo -n "not " 480 | fi 481 | 482 | desc="${_TESTING_REGISTERED_DESC[$((n - 1))]}" 483 | 484 | if [[ -z $desc ]]; then 485 | echo "ok $n" 486 | else 487 | echo "ok $n - $desc" 488 | fi 489 | 490 | if [[ -s $outfile ]]; then 491 | sed 's:^:# :' <"$outfile" 492 | fi 493 | 494 | n=$((n + 1)) 495 | done 496 | } 497 | 498 | testing::fail() { 499 | local msg="" 500 | 501 | if [[ $# -gt 0 ]]; then 502 | msg=": $*" 503 | fi 504 | 505 | log::fatal "${BASH_SOURCE[1]}:${BASH_LINENO[0]} inside ${FUNCNAME[1]}${msg}" 506 | } 507 | #- SECTION 20-command 508 | if [[ $# -lt 1 ]]; then 509 | log::fatal "$0 requires a single argument of the script to run!" 510 | fi 511 | 512 | SCRIPTFILE="$1" 513 | shift 514 | [[ -f $SCRIPTFILE ]] || log::fatal "$0 can only run normal files that exist!" 515 | 516 | # shellcheck disable=SC2034 517 | SCRIPTDIR=$(dirname -- "$SCRIPTFILE") 518 | 519 | BASH_ARGV0=$SCRIPTFILE 520 | # shellcheck disable=SC1090 521 | source "$SCRIPTFILE" 522 | #- SECTION 30-end 523 | -------------------------------------------------------------------------------- /aws-network-spe-py/__main__.py: -------------------------------------------------------------------------------- 1 | import pulumi 2 | import pulumi_aws as aws 3 | import pulumi_tls as tls 4 | import pulumi_svmkit as svmkit 5 | from typing import cast 6 | 7 | 8 | from spe import Node, agave_version, user 9 | 10 | GOSSIP_PORT = 8001 11 | RPC_PORT = 8899 12 | FAUCET_PORT = 9900 13 | EXPLORER_PORT = 3000 14 | 15 | node_config = pulumi.Config("node") 16 | 17 | total_nodes = node_config.get_int("count") or 3 18 | 19 | tuner_config = pulumi.Config("tuner") 20 | firewall_config = pulumi.Config("firewall") 21 | 22 | # Watchtower Notification Config 23 | watchtower_config = pulumi.Config("watchtower") 24 | 25 | slack_webhook_url = watchtower_config.get("slack_webhook_url") or None 26 | discord_webhook_url = watchtower_config.get("discord_webhook_url") or None 27 | telegram_bot_token = watchtower_config.get("telegram_bot_token") or None 28 | telegram_chat_id = watchtower_config.get("telegram_chat_id") or None 29 | pagerduty_integration_key = watchtower_config.get( 30 | "pagerduty_integration_key") or None 31 | twilio_account_sid = watchtower_config.get("twilio_account_sid") or None 32 | twilio_auth_token = watchtower_config.get("twilio_auth_token") or None 33 | twilio_to_number = watchtower_config.get("twilio_to_number") or None 34 | twilio_from_number = watchtower_config.get("twilio_from_number") or None 35 | 36 | bootstrap_node = Node("bootstrap-node") 37 | faucet_key = svmkit.KeyPair("faucet-key") 38 | treasury_key = svmkit.KeyPair("treasury-key") 39 | stake_account_key = svmkit.KeyPair("stake-account-key") 40 | 41 | genesis = svmkit.genesis.Solana( 42 | "genesis", 43 | connection=bootstrap_node.connection, 44 | version=agave_version, 45 | flags={ 46 | "bootstrap_validators": [ 47 | { 48 | "identity_pubkey": bootstrap_node.validator_key.public_key, 49 | "vote_pubkey": bootstrap_node.vote_account_key.public_key, 50 | "stake_pubkey": stake_account_key.public_key, 51 | } 52 | ], 53 | "ledger_path": "/home/sol/ledger", 54 | "faucet_pubkey": faucet_key.public_key, 55 | "bootstrap_validator_stake_lamports": 10000000000, # 10 SOL 56 | "enable_warmup_epochs": True, 57 | "slots_per_epoch": 8192, 58 | "cluster_type": "development", 59 | "faucet_lamports": 1000, 60 | "target_lamports_per_signature": 0, 61 | "inflation": "none", 62 | "lamports_per_byte_year": 1 63 | }, 64 | primordial=[ 65 | { 66 | "pubkey": bootstrap_node.validator_key.public_key, 67 | "lamports": 1000000000000, # 1000 SOL 68 | }, 69 | { 70 | "pubkey": treasury_key.public_key, 71 | "lamports": 100000000000000, # 100000 SOL 72 | }, 73 | { 74 | "pubkey": faucet_key.public_key, 75 | "lamports": 1000000000000, # 1000 SOL 76 | }, 77 | ], 78 | opts=pulumi.ResourceOptions( 79 | depends_on=[bootstrap_node.machine]) 80 | ) 81 | 82 | sol_env = svmkit.solana.EnvironmentArgs( 83 | rpc_url=bootstrap_node.instance.private_ip.apply( 84 | lambda ip: f"http://{ip}:{RPC_PORT}") 85 | ) 86 | 87 | rpc_faucet_address = bootstrap_node.instance.private_ip.apply( 88 | lambda ip: f"{ip}:{FAUCET_PORT}" 89 | ) 90 | 91 | base_flags = svmkit.agave.FlagsArgsDict({ 92 | "only_known_rpc": False, 93 | "rpc_port": RPC_PORT, 94 | "dynamic_port_range": "8002-8020", 95 | "private_rpc": False, 96 | "gossip_port": GOSSIP_PORT, 97 | "rpc_bind_address": "0.0.0.0", 98 | "wal_recovery_mode": "skip_any_corrupted_record", 99 | "limit_ledger_size": 50000000, 100 | "block_production_method": "central-scheduler", 101 | "full_snapshot_interval_slots": 1000, 102 | "no_wait_for_vote_to_start_leader": True, 103 | "use_snapshot_archives_at_startup": "when-newest", 104 | "allow_private_addr": True, 105 | "rpc_faucet_address": rpc_faucet_address, 106 | }) 107 | 108 | bootstrap_flags = base_flags.copy() 109 | bootstrap_flags.update({ 110 | "full_rpc_api": True, 111 | "no_voting": False, 112 | "gossip_host": bootstrap_node.instance.private_ip, 113 | "extra_flags": [ 114 | "--enable-extended-tx-metadata-storage", # Enabled so that 115 | "--enable-rpc-transaction-history", # Solana Explorer has 116 | # the data it needs. 117 | ] 118 | }) 119 | 120 | faucet = svmkit.faucet.Faucet( 121 | "bootstrap-faucet", 122 | connection=bootstrap_node.connection, 123 | keypair=faucet_key.json, 124 | flags={ 125 | "per_request_cap": 1000, 126 | }, 127 | opts=pulumi.ResourceOptions(depends_on=([genesis]))) 128 | 129 | bootstrap_validator = bootstrap_node.configure_validator( 130 | bootstrap_flags, environment=sol_env, startup_policy={ 131 | "wait_for_rpc_health": True}, 132 | depends_on=[faucet]) 133 | 134 | explorer = svmkit.explorer.Explorer( 135 | "bootstrap-explorer", 136 | connection=bootstrap_node.connection, 137 | environment=sol_env, 138 | name="Demo", 139 | symbol="DMO", 140 | cluster_name="demonet", 141 | rpcurl="http://localhost:8899", 142 | flags={ 143 | "hostname": "0.0.0.0", 144 | "port": EXPLORER_PORT, 145 | }, 146 | opts=pulumi.ResourceOptions(depends_on=([bootstrap_validator]))) 147 | 148 | nodes = [Node(f"node{n}") for n in range(total_nodes - 1)] 149 | all_nodes = [bootstrap_node] + nodes 150 | 151 | for node in nodes: 152 | other_nodes = [x for x in all_nodes if x != node] 153 | entry_point = [x.instance.private_ip.apply( 154 | lambda v: f"{v}:{GOSSIP_PORT}") for x in other_nodes] 155 | 156 | flags = base_flags.copy() 157 | flags.update({ 158 | "entry_point": entry_point, 159 | "known_validator": [x.validator_key.public_key for x in other_nodes], 160 | "expected_genesis_hash": genesis.genesis_hash, 161 | "full_rpc_api": node == bootstrap_node, 162 | "gossip_host": node.instance.private_ip, 163 | }) 164 | 165 | validator = node.configure_validator(flags, environment=sol_env, startup_policy=svmkit.agave.StartupPolicyArgs(), 166 | depends_on=[bootstrap_validator]) 167 | 168 | transfer = svmkit.account.Transfer(node.name + "-transfer", 169 | connection=bootstrap_node.connection, 170 | transaction_options={ 171 | "key_pair": treasury_key.json, 172 | }, 173 | amount=100, 174 | recipient_pubkey=node.validator_key.public_key, 175 | allow_unfunded_recipient=True, 176 | opts=pulumi.ResourceOptions(depends_on=[bootstrap_validator])) 177 | 178 | vote_account = svmkit.account.VoteAccount(node.name + "-voteAccount", 179 | connection=bootstrap_node.connection, 180 | key_pairs={ 181 | "identity": node.validator_key.json, 182 | "vote_account": node.vote_account_key.json, 183 | "auth_withdrawer": treasury_key.json, 184 | }, 185 | opts=pulumi.ResourceOptions(depends_on=([transfer]))) 186 | 187 | stake_account_key = svmkit.KeyPair(node.name + "-stakeAccount-key") 188 | svmkit.account.StakeAccount(node.name + "-stakeAccount", 189 | connection=bootstrap_node.connection, 190 | transaction_options={ 191 | "key_pair": treasury_key.json, 192 | }, 193 | key_pairs={ 194 | "stake_account": stake_account_key.json, 195 | "vote_account": node.vote_account_key.json, 196 | }, 197 | amount=10, 198 | opts=pulumi.ResourceOptions(depends_on=([vote_account]))) 199 | 200 | watchtower_notifications: svmkit.watchtower.NotificationConfigArgsDict = {} 201 | 202 | if slack_webhook_url: 203 | watchtower_notifications["slack"] = cast(svmkit.watchtower.SlackConfigArgsDict, { 204 | "webhookUrl": slack_webhook_url 205 | }) 206 | 207 | if discord_webhook_url: 208 | watchtower_notifications["discord"] = cast(svmkit.watchtower.DiscordConfigArgsDict, { 209 | "webhookUrl": discord_webhook_url 210 | }) 211 | 212 | if telegram_bot_token and telegram_chat_id: 213 | watchtower_notifications["telegram"] = cast(svmkit.watchtower.TelegramConfigArgsDict, { 214 | "botToken": telegram_bot_token, 215 | "chatId": telegram_chat_id 216 | }) 217 | 218 | if pagerduty_integration_key: 219 | watchtower_notifications["pager_duty"] = cast(svmkit.watchtower.PagerDutyConfigArgsDict, { 220 | "integrationKey": pagerduty_integration_key 221 | }) 222 | 223 | if twilio_account_sid and twilio_auth_token and twilio_to_number and twilio_from_number: 224 | watchtower_notifications["twilio"] = cast(svmkit.watchtower.TwilioConfigArgsDict, { 225 | "accountSid": twilio_account_sid, 226 | "authToken": twilio_auth_token, 227 | "toNumber": twilio_to_number, 228 | "fromNumber": twilio_from_number 229 | }) 230 | 231 | watchtower = svmkit.watchtower.Watchtower( 232 | 'bootstrap-watchtower', 233 | connection=bootstrap_node.connection, 234 | environment=sol_env, 235 | flags={ 236 | "validator_identity": [node.validator_key.public_key for node in all_nodes], 237 | }, 238 | notifications=watchtower_notifications, 239 | opts=pulumi.ResourceOptions(depends_on=([bootstrap_validator])) 240 | ) 241 | 242 | tuner_variant_name = tuner_config.get("variant") or "generic" 243 | tuner_variant = svmkit.tuner.TunerVariant(tuner_variant_name) 244 | 245 | generic_tuner_params_output = svmkit.tuner.get_default_tuner_params_output( 246 | variant=tuner_variant) 247 | 248 | 249 | tuner_params = generic_tuner_params_output.apply(lambda p: cast(svmkit.tuner.TunerParamsArgsDict, { 250 | "cpu_governor": p.cpu_governor, 251 | "kernel": p.kernel, 252 | "net": p.net, 253 | "vm": p.vm, 254 | "fs": p.fs, 255 | })) 256 | 257 | pulumi.export("tuner_params", tuner_params) 258 | 259 | firewall_variant_name = firewall_config.get("variant") or "generic" 260 | firewall_variant = svmkit.firewall.FirewallVariant(firewall_variant_name) 261 | 262 | generic_firewall_params_output = svmkit.firewall.get_default_firewall_params_output( 263 | variant=firewall_variant) 264 | 265 | firewall_params = generic_firewall_params_output.apply(lambda f: cast(svmkit.firewall.FirewallParamsArgsDict, { 266 | "allow_ports": list(dict.fromkeys( 267 | list(f.allow_ports or []) + 268 | [ 269 | "dns", 270 | "ssh", 271 | "8000:8020/tcp", 272 | "8000:8020/udp", 273 | "8900/tcp", 274 | str(GOSSIP_PORT), 275 | str(RPC_PORT), 276 | str(FAUCET_PORT), 277 | str(EXPLORER_PORT) 278 | ] 279 | )) 280 | }) 281 | ) 282 | 283 | pulumi.export("firewall_params", firewall_params) 284 | 285 | for node in all_nodes: 286 | tuner = svmkit.tuner.Tuner( 287 | node.name + "-tuner", 288 | connection=node.connection, 289 | params=tuner_params, 290 | opts=pulumi.ResourceOptions(depends_on=([node.machine])) 291 | ) 292 | 293 | firewall = svmkit.firewall.Firewall( 294 | node.name + "-firewall", 295 | connection=node.connection, 296 | params=firewall_params, 297 | opts=pulumi.ResourceOptions(depends_on=([node.machine])) 298 | 299 | ) 300 | 301 | pulumi.export("nodes", 302 | [{ 303 | "name": x.name, 304 | "connection": x.connection, 305 | } for x in all_nodes]) 306 | 307 | pulumi.export("speInfo", 308 | { 309 | "treasuryKey": treasury_key, 310 | "bootstrap": { 311 | "connection": bootstrap_node.connection 312 | }, 313 | "otherValidators": [{"voteAccountKey": node.vote_account_key} for node in nodes], 314 | }) 315 | --------------------------------------------------------------------------------