├── .dockerignore
├── .github
├── dependabot.yml
└── workflows
│ └── test-and-release.yml
├── .gitignore
├── .rustfmt.nightly.toml
├── CODEOWNERS
├── Cargo.toml
├── Cross.toml
├── LICENSE
├── README.md
├── avalanche-kms
├── Cargo.toml
├── README.md
└── src
│ ├── create
│ └── mod.rs
│ ├── delete
│ └── mod.rs
│ ├── evm_balance
│ └── mod.rs
│ ├── evm_transfer_from_hotkey
│ └── mod.rs
│ ├── info
│ └── mod.rs
│ └── main.rs
├── avalanche-ops
├── Cargo.toml
├── README.md
├── artifacts
│ └── default.metrics.rules.yaml
└── src
│ ├── artifacts.rs
│ ├── aws
│ ├── artifacts.rs
│ ├── avalanched.rs
│ ├── cfn-templates
│ │ ├── asg_ubuntu.yaml
│ │ ├── ec2_instance_role.yaml
│ │ ├── ssm_install_subnet_chain.yaml
│ │ └── vpc.yaml
│ ├── mod.rs
│ └── spec.rs
│ ├── dev-machines
│ └── cfn-templates
│ │ └── asg_ubuntu.yaml
│ ├── dev_machine_artifacts.rs
│ └── lib.rs
├── avalanched-aws
├── Cargo.toml
└── src
│ ├── agent
│ ├── cloudwatch.rs
│ └── mod.rs
│ ├── alias_chain
│ └── mod.rs
│ ├── install_artifacts
│ └── mod.rs
│ ├── install_chain
│ └── mod.rs
│ ├── install_subnet
│ └── mod.rs
│ └── main.rs
├── avalancheup-aws
├── Cargo.toml
├── example-aws.md
├── img
│ ├── avalancheup.drawio.png
│ └── example-aws
│ │ ├── 01.png
│ │ ├── 02.png
│ │ ├── 03.png
│ │ ├── 04.png
│ │ ├── 05.png
│ │ ├── 06.png
│ │ ├── 07.png
│ │ ├── 08.png
│ │ ├── 09.png
│ │ ├── 10.png
│ │ ├── 11.png
│ │ ├── 12.png
│ │ ├── 13.png
│ │ ├── 14.png
│ │ ├── 15.png
│ │ ├── 16.png
│ │ ├── 17.png
│ │ ├── 18.png
│ │ ├── 19.png
│ │ ├── 20.png
│ │ ├── 21.png
│ │ ├── 22.png
│ │ ├── 23.png
│ │ ├── 24.png
│ │ ├── 25.png
│ │ ├── 26.png
│ │ ├── 27.png
│ │ ├── 28.png
│ │ ├── 29.png
│ │ ├── 30.png
│ │ ├── 31.png
│ │ ├── 32.png
│ │ ├── 33.png
│ │ ├── 34.png
│ │ ├── 35.png
│ │ ├── 36.png
│ │ ├── 37.png
│ │ ├── 38.png
│ │ ├── 39.png
│ │ ├── 40.png
│ │ ├── 41.png
│ │ ├── 42.png
│ │ ├── 43.png
│ │ ├── 44.png
│ │ ├── 45.png
│ │ ├── 46.png
│ │ ├── 47.png
│ │ ├── 48.png
│ │ ├── core-1.png
│ │ ├── core-2.png
│ │ └── core-3.png
├── recipes-aws-advanced.md
├── recipes-aws.md
└── src
│ ├── README.md
│ ├── add_primary_network_validators
│ └── mod.rs
│ ├── apply
│ ├── dev_machine.rs
│ └── mod.rs
│ ├── default_spec
│ └── mod.rs
│ ├── delete
│ └── mod.rs
│ ├── endpoints
│ └── mod.rs
│ ├── install_subnet_chain
│ └── mod.rs
│ ├── main.rs
│ ├── subnet_config
│ └── mod.rs
│ └── subnet_evm
│ ├── chain_config.rs
│ ├── genesis.rs
│ └── mod.rs
├── blizzard-aws
├── Cargo.toml
└── src
│ ├── cloudwatch.rs
│ ├── command.rs
│ ├── evm.rs
│ ├── flags.rs
│ ├── main.rs
│ └── x.rs
├── blizzardup-aws
├── Cargo.toml
├── README.md
├── cfn-templates
│ ├── asg_ubuntu.yaml
│ ├── ec2_instance_role.yaml
│ └── vpc.yaml
└── src
│ ├── apply
│ └── mod.rs
│ ├── aws.rs
│ ├── blizzard.rs
│ ├── default_spec
│ └── mod.rs
│ ├── delete
│ └── mod.rs
│ ├── lib.rs
│ ├── main.rs
│ ├── query
│ └── mod.rs
│ └── status.rs
├── cdk
└── avalancheup-aws
│ ├── .gitignore
│ ├── .npmignore
│ ├── README.md
│ ├── bin
│ └── avalancheup-aws.ts
│ ├── cdk-avalanche-ops.png
│ ├── cdk.json
│ ├── img
│ ├── demo1.png
│ ├── demo10.png
│ ├── demo2.png
│ ├── demo3.png
│ ├── demo4.png
│ ├── demo5.png
│ ├── demo6.png
│ ├── demo7.png
│ ├── demo8.png
│ └── demo9.png
│ ├── jest.config.js
│ ├── lib
│ └── avalancheup-aws-stack.ts
│ ├── package-lock.json
│ ├── package.json
│ ├── test
│ └── avalancheup-aws.test.ts
│ └── tsconfig.json
├── devnet-faucet
├── .gitignore
├── Cargo.toml
├── README.md
├── demo.png
└── src
│ ├── README.md
│ ├── command.rs
│ ├── flags.rs
│ ├── main.rs
│ └── static
│ └── index.html
├── scripts
├── build.release.sh
├── tests.lint.sh
├── tests.unit.sh
└── tests.unused.sh
├── staking-key-cert-s3-downloader
├── Cargo.toml
└── src
│ ├── command.rs
│ ├── flags.rs
│ └── main.rs
└── staking-signer-key-s3-downloader
├── Cargo.toml
└── src
├── command.rs
├── flags.rs
└── main.rs
/.dockerignore:
--------------------------------------------------------------------------------
1 | .git
2 | .cache
3 | target
4 | *.md
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: cargo
4 | directory: "/"
5 | schedule:
6 | interval: daily
7 | time: "10:00"
8 | open-pull-requests-limit: 10
9 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Generated by Cargo
2 | # will have compiled files and executables
3 | /target/
4 |
5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
7 | Cargo.lock
8 |
9 | # These are backup files generated by rustfmt
10 | **/*.rs.bk
11 |
12 | # for "cross build" outputs
13 | /release
14 |
15 | .DS_Store
16 | .vscode
17 |
--------------------------------------------------------------------------------
/.rustfmt.nightly.toml:
--------------------------------------------------------------------------------
1 | # ref. https://github.com/rust-lang/rustfmt/blob/master/Configurations.md
2 | binop_separator = "Back"
3 | blank_lines_lower_bound = 0
4 | blank_lines_upper_bound = 1
5 | brace_style = "SameLineWhere"
6 | color = "Auto"
7 | combine_control_expr = true
8 | comment_width = 80
9 | condense_wildcard_suffixes = true
10 | control_brace_style = "AlwaysSameLine"
11 | edition = "2021"
12 | empty_item_single_line = true
13 | enum_discrim_align_threshold = 20
14 | error_on_line_overflow = true
15 | error_on_unformatted = false
16 | fn_args_layout = "Tall"
17 | fn_call_width = 60
18 | fn_single_line = false
19 | force_explicit_abi = true
20 | force_multiline_blocks = false
21 | format_code_in_doc_comments = true
22 | format_generated_files = true
23 | format_macro_bodies = true
24 | format_macro_matchers = true
25 | format_strings = true
26 | hard_tabs = false
27 | imports_granularity = "Crate"
28 | imports_indent = "Block"
29 | imports_layout = "Mixed"
30 | indent_style = "Block"
31 | max_width = 100
32 | normalize_doc_attributes = true
33 | reorder_imports = true
34 | trailing_comma = "Vertical"
35 | trailing_semicolon = true
36 | unstable_features = true
37 | use_field_init_shorthand = true
38 | use_small_heuristics = "Off"
39 | use_try_shorthand = true
40 | where_single_line = false
41 | wrap_comments = true
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # CODEOWNERS
2 | * @gyuho @exdx
3 |
4 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | members = [
3 | "avalanche-kms",
4 | "avalanche-ops",
5 | "avalanched-aws",
6 | "avalancheup-aws",
7 | "blizzard-aws",
8 | "blizzardup-aws",
9 | "devnet-faucet",
10 | "staking-key-cert-s3-downloader",
11 | "staking-signer-key-s3-downloader",
12 | ]
13 |
--------------------------------------------------------------------------------
/Cross.toml:
--------------------------------------------------------------------------------
1 | [build]
2 | default-target = "x86_64-unknown-linux-gnu" # use this target if none is explicitly provided
3 | pre-build = [ # additional commands to run prior to building the package
4 | "dpkg --add-architecture $CROSS_DEB_ARCH",
5 | "apt-get update && apt-get --assume-yes install libssl-dev:$CROSS_DEB_ARCH"
6 | ]
7 |
8 | [target.aarch64-unknown-linux-gnu]
9 | pre-build = [
10 | "dpkg --add-architecture $CROSS_DEB_ARCH",
11 | "apt-get update && apt-get install --assume-yes libssl-dev:$CROSS_DEB_ARCH"
12 | ]
13 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (C) 2023, Ava Labs, Inc. All rights reserved.
2 |
3 | Ecosystem License
4 | Version: 1.1
5 |
6 | Subject to the terms herein, Ava Labs, Inc. (**“Ava Labs”**) hereby grants you
7 | a limited, royalty-free, worldwide, non-sublicensable, non-transferable,
8 | non-exclusive license to use, copy, modify, create derivative works based on,
9 | and redistribute the Software, in source code, binary, or any other form,
10 | including any modifications or derivative works of the Software (collectively,
11 | **“Licensed Software”**), in each case subject to this Ecosystem License
12 | (**“License”**).
13 |
14 | This License applies to all copies, modifications, derivative works, and any
15 | other form or usage of the Licensed Software. You will include and display
16 | this License, without modification, with all uses of the Licensed Software,
17 | regardless of form.
18 |
19 | You will use the Licensed Software solely (i) in connection with the Avalanche
20 | Public Blockchain platform, having a NetworkID of 1 (Mainnet) or 5 (Fuji), and
21 | associated blockchains, comprised exclusively of the Avalanche X-Chain,
22 | C-Chain, P-Chain and any subnets linked to the P-Chain (“Avalanche Authorized
23 | Platform”) or (ii) for non-production, testing or research purposes within the
24 | Avalanche ecosystem, in each case, without any commercial application
25 | (“Non-Commercial Use”); provided that this License does not permit use of the
26 | Licensed Software in connection with (a) any forks of the Avalanche Authorized
27 | Platform or (b) in any manner not operationally connected to the Avalanche
28 | Authorized Platform other than, for the avoidance of doubt, the limited
29 | exception for Non-Commercial Use. Ava Labs may publicly announce changes or
30 | additions to the Avalanche Authorized Platform, which may expand or modify
31 | usage of the Licensed Software. Upon such announcement, the Avalanche
32 | Authorized Platform will be deemed to be the then-current iteration of such
33 | platform.
34 |
35 | You hereby acknowledge and agree to the terms set forth at
36 | www.avalabs.org/important-notice.
37 |
38 | If you use the Licensed Software in violation of this License, this License
39 | will automatically terminate and Ava Labs reserves all rights to seek any
40 | remedy for such violation.
41 |
42 | Except for uses explicitly permitted in this License, Ava Labs retains all
43 | rights in the Licensed Software, including without limitation the ability to
44 | modify it.
45 |
46 | Except as required or explicitly permitted by this License, you will not use
47 | any Ava Labs names, logos, or trademarks without Ava Labs’ prior written
48 | consent.
49 |
50 | You may use this License for software other than the “Licensed Software”
51 | specified above, as long as the only change to this License is the definition
52 | of the term “Licensed Software.”
53 |
54 | The Licensed Software may reference third party components. You acknowledge
55 | and agree that these third party components may be governed by a separate
56 | license or terms and that you will comply with them.
57 |
58 | **TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE LICENSED SOFTWARE IS PROVIDED
59 | ON AN “AS IS” BASIS, AND AVA LABS EXPRESSLY DISCLAIMS AND EXCLUDES ALL
60 | REPRESENTATIONS, WARRANTIES AND OTHER TERMS AND CONDITIONS, WHETHER EXPRESS OR
61 | IMPLIED, INCLUDING WITHOUT LIMITATION BY OPERATION OF LAW OR BY CUSTOM,
62 | STATUTE OR OTHERWISE, AND INCLUDING, BUT NOT LIMITED TO, ANY IMPLIED WARRANTY,
63 | TERM, OR CONDITION OF NON-INFRINGEMENT, MERCHANTABILITY, TITLE, OR FITNESS FOR
64 | PARTICULAR PURPOSE. YOU USE THE LICENSED SOFTWARE AT YOUR OWN RISK. AVA LABS
65 | EXPRESSLY DISCLAIMS ALL LIABILITY (INCLUDING FOR ALL DIRECT, CONSEQUENTIAL OR
66 | OTHER DAMAGES OR LOSSES) RELATED TO ANY USE OF THE LICENSED SOFTWARE.**
67 |
68 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 
5 | [](https://opensource.org/licenses/BSD-3-Clause)
6 |
7 | See https://talks.gyuho.dev/avalanche-aws-2022q3.html for slides.
8 |
9 | See ["Cheapest way to set up a network or validator"](avalancheup-aws/recipes-aws.md#cheapest-way-to-set-up-a-network-or-validator).
10 |
11 | See [`tokenvm/DEVNETS.md`](https://github.com/ava-labs/hypersdk/blob/main/examples/tokenvm/DEVNETS.md) for dev net testing.
12 |
13 | Use https://devnet.routescan.io for DEVNET explorers.
14 |
15 | # Avalanche Ops
16 |
17 | A **single command to launch Avalanche nodes from scratch that joins any network of choice (e.g., test, fuji, main) or create a custom Avalanche network**. Provisions all resources required to run a node or network with recommended setups (configurable).
18 |
19 | Distributed systems are full of subtle edge cases. The fact that such event or bug may only emerge under special circumstances warrants exhaustive test coverage beyond simple unit testing. Furthermore, the lack of tests slows down software release process, let alone long-term architectural changes.
20 |
21 | `avalanche-ops` aims to find vulnerabilities in Avalanche protocol by intentionally causing failures, and provides a reliable and faster way to validate the fix. In addition, `avalanche-ops` implements some basic principles and best practices for operating Avalanche node in production.
22 |
23 | `avalanche-ops` is a set of operation toolkits for Avalanche nodes:
24 | - 🦀 Written in Rust
25 | - ✅ Optimized for ephemeral network create/delete
26 | - ✅ Fully automates VM (or physical machine) provisioning
27 | - ✅ Fully automates node installation and operations
28 | - ✅ Fully automates custom network setups
29 | - ✅ Fully automates custom VM (subnet) setups
30 | - 🔥 Simulates routine failure conditions (slow network)
31 | - 📨 Securely encrypt all artifacts in case of backups
32 | - Automates subnet/VM installation
33 | - Automates load/stress testing
34 | - Support private network
35 |
36 | `avalanche-ops` is:
37 | - 🚫 NOT a replacement of [`avalanchego`](https://github.com/ava-labs/avalanchego)
38 | - 🚫 NOT implementing any client-side load generation (to be done in Avalanche client/node projects)
39 | - 🚫 NOT implementing any Avalanche-specific test cases (focus on infrastructure setups)
40 | - 🚫 NOT using Kubernetes, prefers physical machines (or cloud VMs)
41 |
42 | ## Workflow
43 |
44 | **`avalancheup`** is the client (or "control plane") that runs on the operator's host machine or test runner, which provisions a set of remote machines based on user-provided configuration. **`avalanched`** is an agent (or daemon) that runs on every remote machine, which creates and installs Avalanche-specific resources (e.g., TLS certificate generation, anchor-node discovery, write avalanche node service file).
45 |
46 | To set up a custom network, provide **`avalancheup`** with executable binaries to run in remote machines. Which then generates a genesis file with pre-funded keys and provisions remote machines to install the user-provided artifacts. A custom network requires two groups of machines: (1) anchor node (beacon node, only required for custom network), and (2) non-anchor node. During the bootstrap phase, regardless of its node kind, **`avalanched`** auto-generates TLS certificates and stores them encrypted in the remote storage. Beacon nodes publish its information in YAML to the shared remote storage, and non-anchor nodes list the storage to discover anchor nodes.
47 |
48 | 
49 |
50 | ## `avalancheup` and `avalanched` on AWS
51 |
52 | See [`recipes-aws.md`](./avalancheup-aws/recipes-aws.md) and [`example-aws.md`](./avalancheup-aws/example-aws.md).
53 |
54 | 
55 |
56 | 
57 |
58 | ## Installation
59 |
60 | ```bash
61 | # to build manually
62 | ./scripts/build.release.sh
63 | ```
64 |
65 | ```bash
66 | # to download from the github release page
67 | # https://github.com/ava-labs/avalanche-ops/releases/tag/latest
68 | # or visit https://github.com/ava-labs/avalanche-ops/releases
69 | curl -L \
70 | https://github.com/ava-labs/avalanche-ops/releases/download/latest/avalanched-aws.x86_64-unknown-linux-gnu \
71 | -o ${HOME}/avalanched-aws.x86_64-unknown-linux-gnu
72 | ```
73 |
74 | ## Permissions
75 |
76 | To use `avalancheup` you need AWS credentials stored locally to authenticate with against the AWS API.
77 |
78 | Use the [aws CLI tool](https://aws.amazon.com/cli/) to login to AWS from the command line. It is recommended to start from a clean `.aws` folder each time you use avalanche-ops.
79 |
80 | Use `aws configure sso` to login, with the following settings:
81 | ```
82 | SSO session name: (hit enter)
83 | SSO start URL:
84 | SSO region:
85 | ```
86 |
87 | After that the tool will login to AWS via the browser. Then enter the following settings:
88 | ```
89 | CLI default client Region:
90 | CLI default output format: json
91 | CLI profile name:
92 | ```
93 |
94 | Once logged in, you can use the avalanche-ops suite of tools successfully. Note that the daemons and other backend services that run directly in AWS have their own authentication patterns.
95 |
96 | ## TODOs
97 |
98 | Contributions are welcome!
99 |
100 | - Support mainnet fork
101 | - Failure injection testing
102 | - Better Avalanche node health checks
103 | - Support ARM
104 | - Support Raspberry Pi
105 | - Support key rotation
106 |
107 | ## Other projects
108 |
109 | - [`avalanche-network-runner`](https://github.com/ava-labs/avalanche-network-runner) to run a local network
110 | - [`avalanchego-operator`](https://github.com/ava-labs/avalanchego-operator) to run a Kubernetes operator
111 |
--------------------------------------------------------------------------------
/avalanche-kms/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "avalanche-kms"
3 | version = "1.0.0" # https://github.com/ava-labs/avalanche-ops/releases
4 | edition = "2021"
5 | rust-version = "1.70"
6 |
7 | [dependencies]
8 | avalanche-types = { version = "0.1.4", features = ["jsonrpc_client", "wallet", "wallet_evm", "kms_aws"] } # https://crates.io/crates/avalanche-types
9 | aws-manager = { version = "0.30.2", features = ["kms", "sts"] } # https://github.com/gyuho/aws-manager/tags
10 | clap = { version = "4.4.0", features = ["cargo", "derive"] } # https://github.com/clap-rs/clap/releases
11 | crossterm = "0.27.0"
12 | dialoguer = "0.10.4"
13 | env_logger = "0.10.0"
14 | ethers-signers = "2.0.7"
15 | id-manager = "0.0.3"
16 | log = "0.4.20"
17 | primitive-types = "0.12.1" # https://crates.io/crates/primitive-types
18 | random-manager = "0.0.5"
19 | serde = { version = "1.0.186", features = ["derive"] } # https://github.com/serde-rs/serde/releases
20 | serde_with = { version = "3.2.0", features = ["hex"] }
21 | serde_yaml = "0.9.25" # https://github.com/dtolnay/serde-yaml/releases
22 | tokio = { version = "1.32.0", features = ["full"] } # https://github.com/tokio-rs/tokio/releases
23 |
--------------------------------------------------------------------------------
/avalanche-kms/README.md:
--------------------------------------------------------------------------------
1 |
2 | ## Avalanche KMS tool (`avalanche-kms`)
3 |
4 | ```bash
5 | ./scripts/build.release.sh
6 | ./target/release/avalanche-kms --help
7 |
8 | ./target/release/avalanche-kms create --help
9 | ./target/release/avalanche-kms delete --help
10 | ./target/release/avalanche-kms info --help
11 | ```
12 |
13 | To create a new hotkey:
14 |
15 | ```bash
16 | ./target/release/avalanche-kms create \
17 | --key-type hot \
18 | --keys 1
19 | ```
20 |
21 | To create a new KMS key:
22 |
23 | ```bash
24 | ./target/release/avalanche-kms create \
25 | --region=us-west-2
26 | ```
27 |
28 | ```yaml
29 | # loaded key
30 |
31 | id: arn:aws:kms:us-west-2:931867039610:key/9ca6d1a5-bc21-4326-8562-ad106f36a439
32 | key_type: aws-kms
33 | addresses:
34 | 1:
35 | x: X-avax1e2hc8l88ew0y8muscv3e9u5ufumqnmzj8vnvfd
36 | p: P-avax1e2hc8l88ew0y8muscv3e9u5ufumqnmzj8vnvfd
37 | short_address: KUhknai5F6Hsr7SR7N1RjGidfCVSi6Umg
38 | eth_address: 0x75E3DC1926Ca033Ee06B0C378B0079241921e2AA
39 | h160_address: 0x75e3dc1926ca033ee06b0c378b0079241921e2aa
40 |
41 | # (mainnet)
42 | ```
43 |
44 | To get the key information:
45 |
46 | ```bash
47 | # new KMS key is successfully created
48 | ./target/release/avalanche-kms info \
49 | --region=us-west-2 \
50 | --network-id=1 \
51 | --key-arn arn:aws:kms:us-west-2:931867039610:key/9ca6d1a5-bc21-4326-8562-ad106f36a439
52 |
53 | ./target/release/avalanche-kms info \
54 | --region=us-west-2 \
55 | --network-id=5 \
56 | --key-arn arn:aws:kms:us-west-2:931867039610:key/9ca6d1a5-bc21-4326-8562-ad106f36a439
57 |
58 | ./target/release/avalanche-kms info \
59 | --region=us-west-2 \
60 | --network-id=1000 \
61 | --key-arn arn:aws:kms:us-west-2:931867039610:key/9ca6d1a5-bc21-4326-8562-ad106f36a439
62 | ```
63 |
64 | ```yaml
65 | # loaded KMS key
66 |
67 | id: arn:aws:kms:us-west-2:931867039610:key/9ca6d1a5-bc21-4326-8562-ad106f36a439
68 | key_type: aws-kms
69 | addresses:
70 | 1000:
71 | x: X-custom1e2hc8l88ew0y8muscv3e9u5ufumqnmzj5kpkwc
72 | p: P-custom1e2hc8l88ew0y8muscv3e9u5ufumqnmzj5kpkwc
73 | short_address: KUhknai5F6Hsr7SR7N1RjGidfCVSi6Umg
74 | eth_address: 0x75E3DC1926Ca033Ee06B0C378B0079241921e2AA
75 | h160_address: 0x75e3dc1926ca033ee06b0c378b0079241921e2aa
76 |
77 | # (network Id 1000)
78 | ```
79 |
80 | To schedule the key deletion:
81 |
82 | ```bash
83 | ./target/release/avalanche-kms delete \
84 | --region=us-west-2 \
85 | --key-arn arn:aws:kms:us-west-2:931867039610:key/9ca6d1a5-bc21-4326-8562-ad106f36a439 \
86 | --pending-windows-in-days 7
87 | ```
88 |
--------------------------------------------------------------------------------
/avalanche-kms/src/delete/mod.rs:
--------------------------------------------------------------------------------
1 | use std::io::{self, stdout};
2 |
3 | use avalanche_types::key;
4 | use aws_manager::{self, kms, sts};
5 | use clap::{value_parser, Arg, Command};
6 | use crossterm::{
7 | execute,
8 | style::{Color, Print, ResetColor, SetForegroundColor},
9 | };
10 | use dialoguer::{theme::ColorfulTheme, Select};
11 | use tokio::time::Duration;
12 |
13 | pub const NAME: &str = "delete";
14 |
15 | pub fn command() -> Command {
16 | Command::new(NAME)
17 | .about("Deletes an AWS KMS key")
18 | .arg(
19 | Arg::new("LOG_LEVEL")
20 | .long("log-level")
21 | .short('l')
22 | .help("Sets the log level")
23 | .required(false)
24 | .num_args(1)
25 | .value_parser(["debug", "info"])
26 | .default_value("info"),
27 | )
28 | .arg(
29 | Arg::new("REGION")
30 | .long("region")
31 | .short('r')
32 | .help("Sets the AWS region for API calls/endpoints")
33 | .required(true)
34 | .num_args(1)
35 | .default_value("us-west-2"),
36 | )
37 | .arg(
38 | Arg::new("KEY_ARN")
39 | .long("key-arn")
40 | .short('a')
41 | .help("KMS key ARN")
42 | .required(true)
43 | .num_args(1),
44 | )
45 | .arg(
46 | Arg::new("PENDING_WINDOWS_IN_DAYS")
47 | .long("pending-windows-in-days")
48 | .help("Sets the schedule delete pending days")
49 | .required(false)
50 | .num_args(1)
51 | .value_parser(value_parser!(i32))
52 | .default_value("7"),
53 | )
54 | .arg(
55 | Arg::new("UNSAFE_SKIP_PROMPT")
56 | .long("unsafe-skip-prompt")
57 | .help("Skips prompt mode (unsafe)")
58 | .required(false)
59 | .num_args(0),
60 | )
61 | .arg(
62 | Arg::new("PROFILE_NAME")
63 | .long("profile-name")
64 | .help("Sets the AWS credential profile name for API calls/endpoints")
65 | .required(false)
66 | .default_value("default")
67 | .num_args(1),
68 | )
69 | }
70 |
71 | pub async fn execute(
72 | log_level: &str,
73 | region: &str,
74 | key_arn: &str,
75 | pending_windows_in_days: i32,
76 | unsafe_skip_prompt: bool,
77 | profile_name: String,
78 | ) -> io::Result<()> {
79 | // ref.
80 | env_logger::init_from_env(
81 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, log_level),
82 | );
83 |
84 | log::info!("requesting to delete {key_arn} ({region}) in {pending_windows_in_days} days");
85 |
86 | let shared_config = aws_manager::load_config(
87 | Some(region.to_string()),
88 | Some(profile_name),
89 | Some(Duration::from_secs(30)),
90 | )
91 | .await;
92 | let kms_manager = kms::Manager::new(&shared_config);
93 |
94 | let sts_manager = sts::Manager::new(&shared_config);
95 | let current_identity = sts_manager.get_identity().await.unwrap();
96 | log::info!("current identity {:?}", current_identity);
97 | println!();
98 |
99 | execute!(
100 | stdout(),
101 | SetForegroundColor(Color::Red),
102 | Print(format!(
103 | "\nLoading the KMS key {} in region {} for deletion\n",
104 | key_arn, region
105 | )),
106 | ResetColor
107 | )?;
108 | let key = key::secp256k1::kms::aws::Key::from_arn(kms_manager.clone(), key_arn)
109 | .await
110 | .unwrap();
111 | let key_info = key.to_info(1).unwrap();
112 |
113 | println!();
114 | println!("loaded KMS key\n\n{}\n(mainnet)\n", key_info);
115 | println!();
116 |
117 | if !unsafe_skip_prompt {
118 | let options = &[
119 | format!(
120 | "No, I am not ready to delete a new KMS key '{}' '{}' in {} days",
121 | region, key_arn, pending_windows_in_days
122 | ),
123 | format!(
124 | "Yes, let's delete a new KMS key '{}' '{}' in {} days",
125 | region, key_arn, pending_windows_in_days
126 | ),
127 | ];
128 | let selected = Select::with_theme(&ColorfulTheme::default())
129 | .with_prompt("Select your 'delete' option")
130 | .items(&options[..])
131 | .default(0)
132 | .interact()
133 | .unwrap();
134 | if selected == 0 {
135 | return Ok(());
136 | }
137 | }
138 |
139 | key.delete(pending_windows_in_days).await.unwrap();
140 |
141 | println!();
142 | log::info!("successfully scheduled to delete KMS key signer");
143 |
144 | Ok(())
145 | }
146 |
--------------------------------------------------------------------------------
/avalanche-kms/src/evm_balance/mod.rs:
--------------------------------------------------------------------------------
1 | use std::io;
2 |
3 | use avalanche_types::{jsonrpc::client::evm as avalanche_sdk_evm, units};
4 | use clap::{Arg, Command};
5 | use primitive_types::H160;
6 |
7 | pub const NAME: &str = "evm-balance";
8 |
9 | pub fn command() -> Command {
10 | Command::new(NAME)
11 | .about("Fetches the balance of an address")
12 | .arg(
13 | Arg::new("LOG_LEVEL")
14 | .long("log-level")
15 | .short('l')
16 | .help("Sets the log level")
17 | .required(false)
18 | .num_args(1)
19 | .value_parser(["debug", "info"])
20 | .default_value("info"),
21 | )
22 | .arg(
23 | Arg::new("CHAIN_RPC_URL")
24 | .long("chain-rpc-url")
25 | .help("Sets to fetch other information from the RPC endpoints (e.g., balances)")
26 | .required(true)
27 | .num_args(1),
28 | )
29 | .arg(
30 | Arg::new("ADDRESS")
31 | .long("address")
32 | .help("Sets the address")
33 | .required(true)
34 | .num_args(1),
35 | )
36 | }
37 |
38 | pub async fn execute(log_level: &str, chain_rpc_url: &str, addr: H160) -> io::Result<()> {
39 | // ref.
40 | env_logger::init_from_env(
41 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, log_level),
42 | );
43 |
44 | log::info!("fetching the balance of {addr} via {chain_rpc_url}");
45 |
46 | let balance = avalanche_sdk_evm::get_balance(chain_rpc_url, addr)
47 | .await
48 | .unwrap();
49 | println!(
50 | "{} balance: {} ({} ETH/AVAX)",
51 | addr,
52 | balance,
53 | units::cast_evm_navax_to_avax_i64(balance)
54 | );
55 |
56 | Ok(())
57 | }
58 |
--------------------------------------------------------------------------------
/avalanche-kms/src/evm_transfer_from_hotkey/mod.rs:
--------------------------------------------------------------------------------
1 | use std::io::{self, stdout};
2 |
3 | use avalanche_types::{
4 | jsonrpc::client::{evm as json_client_evm, info as json_client_info},
5 | key, units, wallet,
6 | };
7 | use clap::{Arg, Command};
8 | use crossterm::{
9 | execute,
10 | style::{Color, Print, ResetColor, SetForegroundColor},
11 | };
12 | use dialoguer::{theme::ColorfulTheme, Select};
13 | use primitive_types::{H160, U256};
14 |
15 | pub const NAME: &str = "evm-transfer-from-hotkey";
16 |
17 | pub fn command() -> Command {
18 | Command::new(NAME)
19 | .about("Transfers the EVM native tokens 'from' hotkey to the 'to' address")
20 | .arg(
21 | Arg::new("LOG_LEVEL")
22 | .long("log-level")
23 | .short('l')
24 | .help("Sets the log level")
25 | .required(false)
26 | .num_args(1)
27 | .value_parser(["debug", "info"])
28 | .default_value("info"),
29 | )
30 | .arg(
31 | Arg::new("CHAIN_RPC_URL")
32 | .long("chain-rpc-url")
33 | .help("Sets the EVM chain RPC endpoint")
34 | .required(true)
35 | .num_args(1),
36 | )
37 | .arg(
38 | Arg::new("TRANSFERER_KEY")
39 | .long("transferer-key")
40 | .help("Sets the from private key (in hex format)")
41 | .required(true)
42 | .num_args(1),
43 | )
44 | .arg(
45 | Arg::new("TRANSFER_AMOUNT_IN_NANO_AVAX")
46 | .long("transfer-amount-in-nano-avax")
47 | .help("Sets the transfer amount in nAVAX (cannot be overlapped with --transfer-amount-in-avax)")
48 | .required(false)
49 | .num_args(1),
50 | )
51 | .arg(
52 | Arg::new("TRANSFER_AMOUNT_IN_AVAX")
53 | .long("transfer-amount-in-avax")
54 | .help("Sets the transfer amount in AVAX (cannot be overlapped with --transfer-amount-in-nano-avax)")
55 | .required(false)
56 | .num_args(1),
57 | )
58 | .arg(
59 | Arg::new("TRANSFEREE_ADDRESSES")
60 | .long("transferee-addresses")
61 | .help("Sets the comma-separated transferee EVM addresses")
62 | .required(true)
63 | .num_args(1),
64 | )
65 | .arg(
66 | Arg::new("SKIP_PROMPT")
67 | .long("skip-prompt")
68 | .short('s')
69 | .help("Skips prompt mode")
70 | .required(false)
71 | .num_args(0),
72 | )
73 | }
74 |
75 | pub async fn execute(
76 | log_level: &str,
77 | chain_rpc_url: &str,
78 | transferer_key: &str,
79 | transfer_amount_navax: U256,
80 | transferee_addrs: Vec,
81 | skip_prompt: bool,
82 | ) -> io::Result<()> {
83 | // ref.
84 | env_logger::init_from_env(
85 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, log_level),
86 | );
87 |
88 | let resp = json_client_info::get_network_id(chain_rpc_url)
89 | .await
90 | .unwrap();
91 | let network_id = resp.result.unwrap().network_id;
92 |
93 | let chain_id = json_client_evm::chain_id(chain_rpc_url).await.unwrap();
94 | log::info!("running against {chain_rpc_url}, network Id {network_id}, chain Id {chain_id}");
95 |
96 | let transferer_key = key::secp256k1::private_key::Key::from_hex(transferer_key).unwrap();
97 | let transferer_key_info = transferer_key.to_info(network_id).unwrap();
98 | log::info!("loaded hot key: {}", transferer_key_info.eth_address);
99 |
100 | if !skip_prompt {
101 | let options = &[
102 | format!(
103 | "No, I am not ready to transfer {transfer_amount_navax} ({} ETH/AVX) from {} to total {} addresses: {:?}",
104 | units::cast_evm_navax_to_avax_i64(transfer_amount_navax), transferer_key_info.eth_address, transferee_addrs.len(), transferee_addrs
105 | ),
106 | format!(
107 | "Yes, let's transfer {transfer_amount_navax} ({} ETH/AVX) from {} to total {} addresses: {:?}",
108 | units::cast_evm_navax_to_avax_i64(transfer_amount_navax), transferer_key_info.eth_address, transferee_addrs.len(), transferee_addrs
109 | ),
110 | ];
111 | let selected = Select::with_theme(&ColorfulTheme::default())
112 | .with_prompt("Select your 'create' option")
113 | .items(&options[..])
114 | .default(0)
115 | .interact()
116 | .unwrap();
117 | if selected == 0 {
118 | return Ok(());
119 | }
120 | } else {
121 | log::info!("skipping prompt...")
122 | }
123 |
124 | for transferee_addr in transferee_addrs.iter() {
125 | execute!(
126 | stdout(),
127 | SetForegroundColor(Color::Green),
128 | Print(format!(
129 | "\ntransfering {transfer_amount_navax} ({} ETH/AVAX) from {} to {transferee_addr} via {chain_rpc_url}\n",
130 | units::cast_evm_navax_to_avax_i64(transfer_amount_navax), transferer_key_info.eth_address
131 | )),
132 | ResetColor
133 | )?;
134 | let transferer_key_signer: ethers_signers::LocalWallet =
135 | transferer_key.to_ethers_core_signing_key().into();
136 |
137 | let w = wallet::Builder::new(&transferer_key)
138 | .base_http_url(chain_rpc_url.to_string())
139 | .build()
140 | .await
141 | .unwrap();
142 | let transferer_evm_wallet = w
143 | .evm(&transferer_key_signer, chain_rpc_url, chain_id)
144 | .unwrap();
145 |
146 | let transferer_balance = transferer_evm_wallet.balance().await.unwrap();
147 | println!(
148 | "transferrer {} current balance: {} ({} ETH/AVAX)",
149 | transferer_key_info.eth_address,
150 | transferer_balance,
151 | units::cast_evm_navax_to_avax_i64(transferer_balance)
152 | );
153 | let transferee_balance = json_client_evm::get_balance(chain_rpc_url, *transferee_addr)
154 | .await
155 | .unwrap();
156 | println!(
157 | "transferee 0x{:x} current balance: {} ({} ETH/AVAX)",
158 | transferee_addr,
159 | transferee_balance,
160 | units::cast_evm_navax_to_avax_i64(transferee_balance)
161 | );
162 |
163 | let tx_id = transferer_evm_wallet
164 | .eip1559()
165 | .recipient(*transferee_addr)
166 | .value(transfer_amount_navax)
167 | .urgent()
168 | .check_acceptance(true)
169 | .submit()
170 | .await
171 | .unwrap();
172 | log::info!(
173 | "evm ethers wallet SUCCESS with transaction id 0x{:x}",
174 | tx_id
175 | );
176 | }
177 |
178 | Ok(())
179 | }
180 |
--------------------------------------------------------------------------------
/avalanche-kms/src/info/mod.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | io::{self, stdout},
3 | str::FromStr,
4 | };
5 |
6 | use avalanche_types::{
7 | jsonrpc::client::{evm as avalanche_sdk_evm, info as json_client_info},
8 | key::secp256k1::{self, KeyType},
9 | units, utils,
10 | };
11 | use aws_manager::{self, kms, sts};
12 | use clap::{Arg, Command};
13 | use crossterm::{
14 | execute,
15 | style::{Color, Print, ResetColor, SetForegroundColor},
16 | };
17 | use tokio::time::Duration;
18 |
19 | pub const NAME: &str = "info";
20 |
21 | pub fn command() -> Command {
22 | Command::new(NAME)
23 | .about("Fetches the info of an AWS KMS key")
24 | .arg(
25 | Arg::new("LOG_LEVEL")
26 | .long("log-level")
27 | .short('l')
28 | .help("Sets the log level")
29 | .required(false)
30 | .num_args(1)
31 | .value_parser(["debug", "info"])
32 | .default_value("info"),
33 | )
34 | .arg(
35 | Arg::new("REGION")
36 | .long("region")
37 | .short('r')
38 | .help("Sets the AWS region for API calls/endpoints")
39 | .required(true)
40 | .num_args(1)
41 | .default_value("us-west-2"),
42 | )
43 | .arg(
44 | Arg::new("KEY_TYPE")
45 | .long("key-type")
46 | .help("Sets the key type")
47 | .required(true)
48 | .value_parser(["aws-kms", "hot"])
49 | .num_args(1),
50 | )
51 | .arg(
52 | Arg::new("KEY")
53 | .long("key")
54 | .help("Hex-encoded hot key or KMS key ARN")
55 | .required(true)
56 | .num_args(1),
57 | )
58 | .arg(
59 | Arg::new("CHAIN_RPC_URL")
60 | .long("chain-rpc-url")
61 | .help("Sets to fetch other information from the RPC endpoints (e.g., balances)")
62 | .required(false)
63 | .num_args(1),
64 | )
65 | .arg(
66 | Arg::new("PROFILE_NAME")
67 | .long("profile-name")
68 | .help("Sets the AWS credential profile name for API calls/endpoints")
69 | .required(false)
70 | .default_value("default")
71 | .num_args(1),
72 | )
73 | }
74 |
75 | pub async fn execute(
76 | log_level: &str,
77 | region: &str,
78 | key_type: &str,
79 | key: &str,
80 | chain_rpc_url: &str,
81 | profile_name: String,
82 | ) -> io::Result<()> {
83 | // ref.
84 | env_logger::init_from_env(
85 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, log_level),
86 | );
87 |
88 | log::info!(
89 | "requesting info for KMS key {key_type} ({region}) with chain RPC URL '{chain_rpc_url}'"
90 | );
91 | let network_id = if chain_rpc_url.is_empty() {
92 | 1
93 | } else {
94 | let (scheme, host, port, _, _) =
95 | utils::urls::extract_scheme_host_port_path_chain_alias(chain_rpc_url).unwrap();
96 | let scheme = if let Some(s) = scheme {
97 | format!("{s}://")
98 | } else {
99 | String::new()
100 | };
101 | let rpc_ep = format!("{scheme}{host}");
102 | let rpc_url = if let Some(port) = port {
103 | format!("{rpc_ep}:{port}")
104 | } else {
105 | rpc_ep.clone() // e.g., DNS
106 | };
107 |
108 | let resp = json_client_info::get_network_id(&rpc_url).await.unwrap();
109 | resp.result.unwrap().network_id
110 | };
111 | log::info!("network Id: {network_id}");
112 |
113 | let shared_config = aws_manager::load_config(
114 | Some(region.to_string()),
115 | Some(profile_name),
116 | Some(Duration::from_secs(30)),
117 | )
118 | .await;
119 |
120 | let sts_manager = sts::Manager::new(&shared_config);
121 | let current_identity = sts_manager.get_identity().await.unwrap();
122 | log::info!("current identity {:?}", current_identity);
123 | println!();
124 |
125 | execute!(
126 | stdout(),
127 | SetForegroundColor(Color::Green),
128 | Print(format!(
129 | "\nLoading the hotkey or KMS key {} in region {}\n",
130 | key, region
131 | )),
132 | ResetColor
133 | )?;
134 | let converted_key_type = KeyType::from_str(key_type).unwrap();
135 | match converted_key_type {
136 | KeyType::AwsKms => {
137 | let kms_manager = kms::Manager::new(&shared_config);
138 | let key = secp256k1::kms::aws::Key::from_arn(kms_manager.clone(), key)
139 | .await
140 | .unwrap();
141 | let key_info = key.to_info(network_id).unwrap();
142 |
143 | println!();
144 | println!(
145 | "loaded KMS key\n\n{}\n(network Id {network_id})\n",
146 | key_info
147 | );
148 | println!();
149 |
150 | if !chain_rpc_url.is_empty() {
151 | let balance = avalanche_sdk_evm::get_balance(chain_rpc_url, key_info.h160_address)
152 | .await
153 | .unwrap();
154 | println!(
155 | "{} balance: {} ({} ETH/AVAX)",
156 | key_info.eth_address,
157 | balance,
158 | units::cast_evm_navax_to_avax_i64(balance)
159 | );
160 | }
161 | }
162 | KeyType::Hot => {
163 | let k = secp256k1::private_key::Key::from_hex(key).unwrap();
164 |
165 | if !chain_rpc_url.is_empty() {
166 | let balance =
167 | avalanche_sdk_evm::get_balance(chain_rpc_url, k.to_public_key().to_h160())
168 | .await
169 | .unwrap();
170 | println!(
171 | "{} balance: {} ({} ETH/AVAX)",
172 | k.to_public_key().to_eth_address(),
173 | balance,
174 | units::cast_evm_navax_to_avax_i64(balance)
175 | );
176 | }
177 | }
178 | KeyType::Unknown(s) => panic!("unknown key type {s}"),
179 | }
180 |
181 | Ok(())
182 | }
183 |
--------------------------------------------------------------------------------
/avalanche-ops/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "avalanche-ops"
3 | version = "1.0.0" # https://crates.io/crates/avalanche-ops
4 | edition = "2021"
5 | rust-version = "1.70"
6 | publish = true
7 | description = "avalanche-ops spec"
8 | repository = "https://github.com/ava-labs/avalanche-ops"
9 | readme = "README.md"
10 | license = "Apache-2.0"
11 |
12 | [dependencies]
13 | avalanche-types = { version = "0.1.4", features = ["avalanchego"] } # https://crates.io/crates/avalanche-types
14 | aws-manager = { version = "0.30.2", features = ["ec2", "sts"] } # https://github.com/gyuho/aws-manager/tags
15 | compress-manager = "0.0.10"
16 | dir-manager = "0.0.1"
17 | env_logger = "0.10.0"
18 | id-manager = "0.0.3"
19 | log = "0.4.20"
20 | prefix-manager = "0.0.2"
21 | primitive-types = { version = "0.12.1", features = ["impl-serde"], optional = false } # https://crates.io/crates/primitive-types
22 | prometheus-manager = "0.0.30"
23 | public-ip = "0.2.2"
24 | random-manager = "0.0.5"
25 | rust-embed = "8.0.0"
26 | serde = { version = "1.0.186", features = ["derive"] }
27 | serde_json = "1.0.105" # https://github.com/serde-rs/json/releases
28 | serde_with = { version = "3.2.0", features = ["hex"] }
29 | serde_yaml = "0.9.25" # https://github.com/dtolnay/serde-yaml/releases
30 |
31 | [dev-dependencies]
32 | hex = "0.4.3"
33 | tempfile = "3.8.0"
34 |
--------------------------------------------------------------------------------
/avalanche-ops/README.md:
--------------------------------------------------------------------------------
1 |
2 | avalanche-ops spec.
3 |
4 | 
5 |
6 | https://crates.io/crates/avalanche-ops
7 |
--------------------------------------------------------------------------------
/avalanche-ops/artifacts/default.metrics.rules.yaml:
--------------------------------------------------------------------------------
1 | # example to show just loading from YAML without escaping/raw str just works
2 |
3 | filters:
4 | - regex: ^avalanche_(health|readiness|liveness)_checks_failing$ # "Health Checks Failing"
5 | - regex: ^avalanche_network_peers[\s\S]*$ # covers "avalanche_network_peers_subnet" with subnetID label
6 | - regex: ^avalanche_network_times_(connected|disconnected)$
7 | - regex: ^avalanche_network_accept_failed$
8 | - regex: ^avalanche_network_(codec|proto_codec)[\s\S]*$ # covers "avalanche_network_codec_push_query_decompress_time_sum"*
9 | - regex: ^avalanche_network_(get|get_ancestors|version|ping|pong|(push|pull)_query|put|peerlist)_(received|sent|failed)[\s\S]*$ # "avalanche_network_get_received" and "avalanche_network_get_received_bytes"
10 | - regex: ^avalanche_network_node_uptime_(rewarding_stake|weighted_average)$
11 | - regex: ^avalanche_network_inbound_conn_throttler_(allowed|rate_limited)$
12 | - regex: ^avalanche_network_throttler_outbound_acquire_(failures|successes)$
13 | - regex: ^avalanche_process_(max|open)_fds$
14 | - regex: ^avalanche_process_(resident|virtual)_memory_bytes$
15 | - regex: ^avalanche_requests_average_latency$ # "Average Network Latency"
16 |
17 | - regex: ^avalanche_(C|P|X|(([0-9a-zA-Z]+)+){40,})_blk_builds_failed$ # "Block Build Failures"
18 |
19 | - regex: ^avalanche_(C|P|X|(([0-9a-zA-Z]+)+){40,})_(blks|txs)_(accepted|rejected|built|processing)[\s\S]*$ # "Accept Latency (Seconds)"
20 | - regex: ^avalanche_X_(avalanche|snowman)_(blks|txs)_(accepted|rejected|built|processing)[\s\S]*$ # "Accept Latency (Seconds)"
21 |
22 | - regex: ^avalanche_(C|P|X|(([0-9a-zA-Z]+)+){40,})_vm_metervm_parse_block_(count|sum)$
23 | - regex: ^avalanche_(C|P|X|(([0-9a-zA-Z]+)+){40,})_vm_chain_state_tx_accepted_count$
24 | - regex: ^avalanche_(C|P|X|(([0-9a-zA-Z]+)+){40,})_benchlist_benched_num$
25 | - regex: ^avalanche_(P|(([0-9a-zA-Z]+)+){40,})_vm_percent_connected[\s\S]*$ # covers "vm_percent_connected_subnet" with subnetID label
26 | - regex: ^avalanche_(C|P|X|(([0-9a-zA-Z]+)+){40,})_(blocked|blockers)$ # "avalanche_C_blocked" and "avalanche_C_blockers" for "Consensus Dependencies"
27 |
28 | - regex: ^avalanche_(C|P|X|(([0-9a-zA-Z]+)+){40,})_polls_[\s\S]*$ # "Percentage of Successful Polls"
29 | - regex: ^avalanche_X_(avalanche|snowman)_polls_[\s\S]*$ # "Percentage of Successful Polls"
30 |
31 | - regex: ^avalanche_(C|P|X|(([0-9a-zA-Z]+)+){40,})_(blks|txs)_polls_[\s\S]*$ # "Average Number of Polls Before Accept"
32 | - regex: ^avalanche_X_(avalanche|snowman)_(blks|txs)_polls_[\s\S]*$ # "Average Number of Polls Before Accept"
33 |
34 | - regex: ^avalanche_(C|P|X|(([0-9a-zA-Z]+)+){40,})_handler_(chits|app_gossip|get|get_accepted|get_ancestors|gossip_request|query_failed)_(count|sum)$ # "Percentage of Successful Queries"
35 | - regex: ^avalanche_(C|P|X|(([0-9a-zA-Z]+)+){40,})_handler_unprocessed_msgs_len$ # "Unprocessed Messages"
36 | - regex: ^avalanche_(C|P|X|(([0-9a-zA-Z]+)+){40,})_last_accepted_(height|timestamp)$
37 | - regex: ^avalanche_(C|(([0-9a-zA-Z]+)+){40,})_vm_eth_rpc_(failure|requests|success)$
38 | - regex: ^avalanche_(C|(([0-9a-zA-Z]+)+){40,})_vm_eth_chain_block_gas_used_(accepted|processed)$ # "processedBlockGasUsedCounter" "avalanche_C_vm_eth_chain_block_gas_used_accepted"
39 | - regex: ^avalanche_(C|(([0-9a-zA-Z]+)+){40,})_vm_eth_chain_txs_(accepted|processed)$
40 | - regex: ^avalanche_X_vm_avalanche_(base|create_asset|operation|import|export)_txs_accepted$
41 |
42 | - regex: ^avalanche_db_(put|delete|compact|get|batch_(write|put))_(count|sum)$
43 | - regex: ^avalanche_(C|P|X|(([0-9a-zA-Z]+)+){40,})_db_(put|delete|compact|get|batch_(write|put)|write_size|read_size)_(count|sum)$
44 |
45 | - regex: ^avalanche_X_avalanche_whitelist_[\s\S]*$
46 |
47 | # "summary" type does not need to specify labels
48 | # this matches all quantiles (0.5, 0.75, 0.95, 0.99, 0.999, 0.9999)
49 | - regex: ^avalanche_(([0-9a-zA-Z]+)+){40,}_vm_eth_rpc_duration_all$
50 |
51 | # "counter" type supports either exact match, or sub-match
52 | - regex: ^avalanche_(([0-9a-zA-Z]+)+){40,}_vm_grpc_client_started_total$
53 | # missing grpc_method="*" and grpc_code=OK
54 | # in order to sub-match all possible method values evaluate to true
55 | labels:
56 | grpc_service: rpcdb.Database
57 | - regex: ^avalanche_(([0-9a-zA-Z]+)+){40,}_vm_grpc_client_started_total$
58 | labels:
59 | grpc_service: messenger.Messenger
60 | - regex: ^avalanche_(([0-9a-zA-Z]+)+){40,}_vm_grpc_client_started_total$
61 | labels:
62 | grpc_method: SendAppGossip
63 | grpc_service: appsender.AppSender
64 |
65 | - regex: ^avalanche_(([0-9a-zA-Z]+)+){40,}_vm_grpc_client_handled_total$
66 | labels:
67 | grpc_service: rpcdb.Database
68 | - regex: ^avalanche_(([0-9a-zA-Z]+)+){40,}_vm_grpc_client_handled_total$
69 | labels:
70 | grpc_service: messenger.Messenger
71 | - regex: ^avalanche_(([0-9a-zA-Z]+)+){40,}_vm_grpc_client_handled_total$
72 | labels: # exact match
73 | grpc_code: OK
74 | grpc_method: SendAppGossip
75 | grpc_service: appsender.AppSender
76 | grpc_type: unary
77 |
--------------------------------------------------------------------------------
/avalanche-ops/src/artifacts.rs:
--------------------------------------------------------------------------------
1 | use rust_embed::RustEmbed;
2 |
3 | pub fn prometheus_rules() -> prometheus_manager::Rules {
4 | #[derive(RustEmbed)]
5 | #[folder = "artifacts/"]
6 | #[prefix = "artifacts/"]
7 | struct Asset;
8 |
9 | let filters_raw = Asset::get("artifacts/default.metrics.rules.yaml").unwrap();
10 | let filters_raw = std::str::from_utf8(filters_raw.data.as_ref()).unwrap();
11 | serde_yaml::from_str(filters_raw).unwrap()
12 | }
13 |
--------------------------------------------------------------------------------
/avalanche-ops/src/aws/artifacts.rs:
--------------------------------------------------------------------------------
1 | use std::io::{self, Error, ErrorKind};
2 |
3 | use rust_embed::RustEmbed;
4 |
5 | pub fn asg_ubuntu_yaml() -> io::Result {
6 | #[derive(RustEmbed)]
7 | #[folder = "src/aws/cfn-templates/"]
8 | #[prefix = "src/aws/cfn-templates/"]
9 | struct Asset;
10 | let f = Asset::get("src/aws/cfn-templates/asg_ubuntu.yaml").unwrap();
11 | let s = std::str::from_utf8(f.data.as_ref()).map_err(|e| {
12 | Error::new(
13 | ErrorKind::InvalidInput,
14 | format!("failed to convert embed file to str {}", e),
15 | )
16 | })?;
17 | Ok(s.to_string())
18 | }
19 |
20 | pub fn ec2_instance_role_yaml() -> io::Result {
21 | #[derive(RustEmbed)]
22 | #[folder = "src/aws/cfn-templates/"]
23 | #[prefix = "src/aws/cfn-templates/"]
24 | struct Asset;
25 | let f = Asset::get("src/aws/cfn-templates/ec2_instance_role.yaml").unwrap();
26 | let s = std::str::from_utf8(f.data.as_ref()).map_err(|e| {
27 | Error::new(
28 | ErrorKind::InvalidInput,
29 | format!("failed to convert embed file to str {}", e),
30 | )
31 | })?;
32 | Ok(s.to_string())
33 | }
34 |
35 | pub fn ssm_install_subnet_chain_yaml() -> io::Result {
36 | #[derive(RustEmbed)]
37 | #[folder = "src/aws/cfn-templates/"]
38 | #[prefix = "src/aws/cfn-templates/"]
39 | struct Asset;
40 | let f = Asset::get("src/aws/cfn-templates/ssm_install_subnet_chain.yaml").unwrap();
41 | let s = std::str::from_utf8(f.data.as_ref()).map_err(|e| {
42 | Error::new(
43 | ErrorKind::InvalidInput,
44 | format!("failed to convert embed file to str {}", e),
45 | )
46 | })?;
47 | Ok(s.to_string())
48 | }
49 |
50 | pub fn vpc_yaml() -> io::Result {
51 | #[derive(RustEmbed)]
52 | #[folder = "src/aws/cfn-templates/"]
53 | #[prefix = "src/aws/cfn-templates/"]
54 | struct Asset;
55 | let f = Asset::get("src/aws/cfn-templates/vpc.yaml").unwrap();
56 | let s = std::str::from_utf8(f.data.as_ref()).map_err(|e| {
57 | Error::new(
58 | ErrorKind::InvalidInput,
59 | format!("failed to convert embed file to str {}", e),
60 | )
61 | })?;
62 | Ok(s.to_string())
63 | }
64 |
--------------------------------------------------------------------------------
/avalanche-ops/src/aws/avalanched.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | /// Defines flag options.
4 | #[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
5 | #[serde(rename_all = "snake_case")]
6 | pub struct Flags {
7 | pub log_level: String,
8 | pub use_default_config: bool,
9 | #[serde(skip_serializing_if = "Option::is_none")]
10 | pub publish_periodic_node_info: Option,
11 | }
12 |
13 | impl Flags {
14 | pub fn to_flags(&self) -> String {
15 | let mut s = format!("--log-level={}", self.log_level);
16 | if self.use_default_config {
17 | s.push_str(" --use-default-config");
18 | }
19 | if let Some(v) = &self.publish_periodic_node_info {
20 | if *v {
21 | s.push_str(" --publish-periodic-node-info");
22 | }
23 | }
24 | s
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/avalanche-ops/src/aws/cfn-templates/ec2_instance_role.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: "2010-09-09"
3 | Description: "IAM instance role"
4 |
5 | # takes about 3-minute
6 |
7 | # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html
8 | Parameters:
9 | RoleName:
10 | Type: String
11 | Description: Role name.
12 |
13 | RoleProfileName:
14 | Type: String
15 | Description: Role profile name.
16 |
17 | Id:
18 | Type: String
19 | Description: Unique identifier, prefix for all resources created below.
20 |
21 | KmsKeyArn:
22 | Type: String
23 | Description: KMS key ARN that de/encrypts resources.
24 |
25 | S3BucketName:
26 | Type: String
27 | Description: S3 bucket name to store.
28 |
29 | Mappings:
30 | ServicePrincipals:
31 | aws-cn:
32 | ec2: ec2.amazonaws.com.cn
33 | aws:
34 | ec2: ec2.amazonaws.com
35 |
36 | Resources:
37 | # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html
38 | InstanceRole:
39 | Type: AWS::IAM::Role
40 | Properties:
41 | RoleName: !Ref RoleName
42 | AssumeRolePolicyDocument:
43 | Version: "2012-10-17"
44 | Statement:
45 | - Effect: Allow
46 | Principal:
47 | Service:
48 | - Fn::FindInMap:
49 | - ServicePrincipals
50 | - Ref: AWS::Partition
51 | - ec2
52 | Action:
53 | - sts:AssumeRole
54 | ManagedPolicyArns:
55 | - arn:aws:iam::aws:policy/AmazonSSMFullAccess
56 | - arn:aws:iam::aws:policy/CloudWatchFullAccess
57 | Path: /
58 | Policies:
59 | # restrict this better
60 | # ref. https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_ec2_ebs-owner.html
61 | - PolicyName: avalanched-instance-role-policy
62 | PolicyDocument:
63 | Version: "2012-10-17"
64 | Statement:
65 | - Effect: Allow
66 | Action:
67 | - ec2:DescribeInstances # to fetch tags
68 | - ec2:DescribeTags # to find network/resource information
69 | - ec2:DescribeVolumes # to wait for volume attachment
70 | - ec2:CreateTags
71 | - ec2:CreateVolume # to create volume if not exists
72 | - ec2:AttachVolume
73 | - ec2:DetachVolume # to fail fast in case of spot instance-action
74 | - autoscaling:SetInstanceHealth # to fail fast to mark the local instance "Unhealthy"
75 | - ec2:TerminateInstances # to fail fast in case of spot instance-action
76 | Resource: "*"
77 |
78 | - Effect: Allow
79 | Action:
80 | - kms:Encrypt # to generate TLS key and encrypt
81 | - kms:GenerateDataKey* # to encrypt TLS key
82 | - kms:DescribeKey # to describe the KMS key
83 | Resource: { Ref: KmsKeyArn }
84 |
85 | - Effect: Allow
86 | Action:
87 | - s3:List*
88 | Resource: "*"
89 |
90 | - Effect: Allow
91 | Action:
92 | - s3:GetObject # to download artifacts
93 | - s3:PutObject # to upload generated TLS keys
94 | Resource:
95 | - !Join [
96 | "",
97 | [
98 | !Sub "arn:${AWS::Partition}:s3:::",
99 | !Ref S3BucketName,
100 | "/",
101 | !Ref Id,
102 | "/*",
103 | ],
104 | ]
105 | - !Join [
106 | "",
107 | [
108 | !Sub "arn:${AWS::Partition}:s3:::",
109 | !Ref S3BucketName,
110 | "/",
111 | !Ref Id,
112 | "/bootstrap/*",
113 | ],
114 | ]
115 | - !Join [
116 | "",
117 | [
118 | !Sub "arn:${AWS::Partition}:s3:::",
119 | !Ref S3BucketName,
120 | "/",
121 | !Ref Id,
122 | "/pki/*",
123 | ],
124 | ]
125 | - !Join [
126 | "",
127 | [
128 | !Sub "arn:${AWS::Partition}:s3:::",
129 | !Ref S3BucketName,
130 | "/",
131 | !Ref Id,
132 | "/discover/*",
133 | ],
134 | ]
135 | - !Join [
136 | "",
137 | [
138 | !Sub "arn:${AWS::Partition}:s3:::",
139 | !Ref S3BucketName,
140 | "/",
141 | !Ref Id,
142 | "/backups/*",
143 | ],
144 | ]
145 | - !Join [
146 | "",
147 | [
148 | !Sub "arn:${AWS::Partition}:s3:::",
149 | !Ref S3BucketName,
150 | "/",
151 | !Ref Id,
152 | "/events/*",
153 | ],
154 | ]
155 | - !Join [
156 | "",
157 | [
158 | !Sub "arn:${AWS::Partition}:s3:::",
159 | !Ref S3BucketName,
160 | "/",
161 | !Ref Id,
162 | "/ssm-output-logs/*",
163 | ],
164 | ]
165 |
166 | - Effect: Allow
167 | Action:
168 | - cloudwatch:PutMetricData
169 | Resource: "*"
170 |
171 | - Effect: Allow
172 | Action:
173 | - logs:CreateLogGroup
174 | - logs:CreateLogStream
175 | - logs:PutLogEvents
176 | - logs:DescribeLogStreams
177 | - logs:PutRetentionPolicy
178 | Resource:
179 | # Ref: http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-cloudwatch-logs
180 | - !Sub "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:${Id}"
181 | - !Sub "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:${Id}:log-stream:*"
182 |
183 | # for static IP addresses
184 | - Effect: Allow
185 | Action:
186 | - ec2:AllocateAddress # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AllocateAddress.html
187 | - ec2:AssociateAddress # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateAddress.html
188 | - ec2:DescribeAddresses # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAddresses.html
189 | Resource: "*"
190 |
191 | # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html
192 | InstanceProfile:
193 | Type: AWS::IAM::InstanceProfile
194 | Properties:
195 | InstanceProfileName: !Ref RoleProfileName
196 | Path: "/"
197 | Roles:
198 | - !Ref InstanceRole
199 |
200 | Outputs:
201 | InstanceRoleArn:
202 | Value: !GetAtt InstanceRole.Arn
203 | Description: Role ARN
204 |
205 | InstanceProfileArn:
206 | Value: !GetAtt InstanceProfile.Arn
207 | Description: Instance profile ARN
208 |
--------------------------------------------------------------------------------
/avalanche-ops/src/aws/cfn-templates/ssm_install_subnet_chain.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: "2010-09-09"
3 | Description: "SSM document to install subnet and chain"
4 |
5 | Parameters:
6 | DocumentName:
7 | Type: String
8 | Default: InstallSubnetChainV0
9 | Description: SSM document Name.
10 |
11 | Resources:
12 | InstallSubnetChain:
13 | # https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_CreateDocument.html
14 | Type: AWS::SSM::Document
15 | Properties:
16 | DocumentType: Command
17 | Name: !Ref DocumentName
18 | Tags:
19 | - Key: DocumentName
20 | Value: !Ref DocumentName
21 | - Key: DocumentVersion
22 | Value: "v1"
23 | Content:
24 | schemaVersion: "2.2"
25 | description: installSubnetChain
26 | parameters:
27 | executionTimeout:
28 | type: String
29 | description: "timeout for run on all instances"
30 | default: "50000"
31 | scriptTimeout:
32 | type: String
33 | description: "timeout for script on individual instance"
34 | default: "3600"
35 | avalanchedArgs:
36 | type: String
37 | description: New blockchain id to move configuration to.
38 | aliasArgs:
39 | type: String
40 | description: Aliasing arguments for avalanched.
41 | mainSteps:
42 | - action: aws:runShellScript
43 | name: installSubnetChain
44 | inputs:
45 | timeoutSeconds: "{{ scriptTimeout }}"
46 | runCommand:
47 | - |
48 | #!/bin/bash
49 | set -xeu
50 |
51 | /usr/local/bin/avalanched-aws --version
52 |
53 | # to download vm binary, write/update subnet/chain config
54 | /usr/local/bin/avalanched-aws {{ avalanchedArgs }}
55 |
56 | # to set the chain alias
57 | /usr/local/bin/avalanched-aws {{ aliasArgs }}
58 |
59 | # to reload updated configs
60 | sudo systemctl restart --no-block avalanchego.service
61 | sleep 7
62 | sudo tail -50 /var/log/avalanchego/avalanchego.log || true
63 |
64 | # to check the status
65 | # sudo find /var/log/avalanchego/
66 | # sudo tail /var/log/avalanchego/avalanchego.log
67 |
--------------------------------------------------------------------------------
/avalanche-ops/src/aws/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod artifacts;
2 | pub mod avalanched;
3 | pub mod spec;
4 |
--------------------------------------------------------------------------------
/avalanche-ops/src/dev_machine_artifacts.rs:
--------------------------------------------------------------------------------
1 | use std::io::{self, Error, ErrorKind};
2 |
3 | use rust_embed::RustEmbed;
4 |
5 | pub fn asg_ubuntu_yaml() -> io::Result {
6 | #[derive(RustEmbed)]
7 | #[folder = "src/dev-machines/cfn-templates/"]
8 | #[prefix = "src/dev-machines/cfn-templates/"]
9 | struct Asset;
10 | let f = Asset::get("src/dev-machines/cfn-templates/asg_ubuntu.yaml").unwrap();
11 | let s = std::str::from_utf8(f.data.as_ref()).map_err(|e| {
12 | Error::new(
13 | ErrorKind::InvalidInput,
14 | format!("failed to convert embed file to str {}", e),
15 | )
16 | })?;
17 | Ok(s.to_string())
18 | }
19 |
--------------------------------------------------------------------------------
/avalanche-ops/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod artifacts;
2 | pub mod aws;
3 | pub mod dev_machine_artifacts;
4 |
--------------------------------------------------------------------------------
/avalanched-aws/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "avalanched-aws"
3 | version = "1.0.0" # https://github.com/ava-labs/avalanche-ops/releases
4 | edition = "2021"
5 | rust-version = "1.70"
6 |
7 | [[bin]]
8 | name = "avalanched-aws"
9 | path = "src/main.rs"
10 |
11 | [dependencies]
12 | avalanche-installer = "0.0.77" # https://crates.io/crates/avalanche-installer
13 | avalanche-ops = { path = "../avalanche-ops" }
14 | avalanche-telemetry-cloudwatch-installer = "0.0.107" # https://crates.io/crates/avalanche-telemetry-cloudwatch-installer
15 | avalanche-types = { version = "0.1.4", features = ["avalanchego", "jsonrpc_client", "subnet_evm"] } # https://crates.io/crates/avalanche-types
16 | aws-ip-provisioner-installer = "0.0.96" # https://crates.io/crates/aws-ip-provisioner-installer
17 | aws-manager = { version = "0.30.2", features = ["autoscaling", "cloudwatch", "ec2", "s3"] } # https://github.com/gyuho/aws-manager/tags
18 | aws-sdk-cloudwatch = "0.30.0" # https://github.com/awslabs/aws-sdk-rust/releases
19 | aws-sdk-ec2 = "0.30.0" # https://github.com/awslabs/aws-sdk-rust/releases
20 | aws-sdk-s3 = "0.30.0" # https://github.com/awslabs/aws-sdk-rust/releases
21 | aws-volume-provisioner-installer = "0.0.121" # https://crates.io/crates/aws-volume-provisioner-installer
22 | clap = { version = "4.4.0", features = ["cargo", "derive"] } # https://github.com/clap-rs/clap/releases
23 | command-manager = "0.0.3"
24 | compress-manager = "0.0.10"
25 | env_logger = "0.10.0"
26 | log = "0.4.20"
27 | prometheus-manager = "0.0.30"
28 | random-manager = "0.0.5"
29 | serde = { version = "1.0.186", features = ["derive"] }
30 | serde_json = "1.0.105"
31 | tempfile = "3.8.0"
32 | tokio = { version = "1.32.0", features = ["full"] } # https://github.com/tokio-rs/tokio/releases
33 |
34 | # https://github.com/cross-rs/cross/wiki/Recipes#openssl
35 | openssl = { version = "0.10", features = ["vendored"] }
36 |
--------------------------------------------------------------------------------
/avalanched-aws/src/agent/cloudwatch.rs:
--------------------------------------------------------------------------------
1 | use std::{io, path::Path};
2 |
3 | use avalanche_types::node;
4 | use aws_manager::cloudwatch;
5 |
6 | /// Sets up log collection agent (e.g., cloudwatch agent)
7 | /// using the systemd service.
8 | pub struct ConfigManager {
9 | /// Used for "log_group_name" and metrics "namespace".
10 | pub id: String,
11 |
12 | /// Used for naming CloudWatch log name.
13 | pub node_kind: node::Kind,
14 |
15 | /// Directory where avalanche outputs chain logs.
16 | pub log_dir: String,
17 |
18 | /// Set "true" to collect instance-level system logs.
19 | /// Useful to check OOMs via "oom-kill" or "Out of memory: Killed process 8266 (...)"
20 | pub instance_system_logs: bool,
21 | /// Required if metrics collection is abled.
22 | /// Only used when metrics collection interval is >0.
23 | pub data_volume_path: Option,
24 |
25 | /// CloudWatch agent configuration file path.
26 | pub config_file_path: String,
27 | }
28 |
29 | /// ref. https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-Configuration-File-Details.html
30 | impl ConfigManager {
31 | /// Set "log_files" to track extra log files via CloudWatch.
32 | /// e.g., "/var/log/avalanched-aws.log"
33 | pub fn sync(
34 | &self,
35 | logs_auto_removal: bool,
36 | log_files: Option>,
37 | metrics_collect_interval: u32,
38 | ) -> io::Result<()> {
39 | log::info!("syncing CloudWatch configuration JSON file with {metrics_collect_interval}");
40 |
41 | let mut log_collect_list = vec![
42 | // e.g., collect all .log files in the "/var/log/avalanchego" tree
43 | cloudwatch::Collect {
44 | log_group_name: self.id.clone(),
45 | log_stream_name: format!("{{instance_id}}-{}-all", self.node_kind.as_str()),
46 |
47 | // use "**.log" for match all logs in the directory
48 | // ref. /var/log/avalanchego/avalanchego.log contains logs from all chains
49 | file_path: format!("{}/avalanchego.log", self.log_dir),
50 |
51 | // If a log continuously writes to a single file, it is not removed.
52 | // TODO: subnet VM logs are disappearing with this...
53 | auto_removal: Some(false),
54 | retention_in_days: Some(5),
55 |
56 | ..cloudwatch::Collect::default()
57 | },
58 | ];
59 |
60 | if let Some(v) = log_files {
61 | for file_path in v {
62 | // e.g., "/var/log/avalanched-aws.log" becomes "avalanched-aws.log"
63 | let fname = Path::new(&file_path)
64 | .file_name()
65 | .unwrap()
66 | .to_os_string()
67 | .into_string()
68 | .unwrap();
69 |
70 | log_collect_list.push(cloudwatch::Collect {
71 | log_group_name: self.id.clone(),
72 | log_stream_name: format!(
73 | "{{instance_id}}-{}-{}",
74 | self.node_kind.as_str(),
75 | fname
76 | ),
77 |
78 | file_path,
79 |
80 | // If a log continuously writes to a single file, it is not removed.
81 | auto_removal: Some(logs_auto_removal),
82 | retention_in_days: Some(5),
83 |
84 | ..cloudwatch::Collect::default()
85 | });
86 | }
87 | }
88 |
89 | if self.instance_system_logs {
90 | // to check OOMs via "oom-kill" or "Out of memory: Killed process 8266 (srEXiWaHuhNyGwP)"
91 | log_collect_list.push(cloudwatch::Collect {
92 | log_group_name: self.id.clone(),
93 | log_stream_name: format!("{{instance_id}}-{}-syslog", self.node_kind.as_str()),
94 | file_path: String::from("/var/log/syslog"),
95 |
96 | // If a log continuously writes to a single file, it is not removed.
97 | auto_removal: Some(logs_auto_removal),
98 | retention_in_days: Some(5),
99 |
100 | ..cloudwatch::Collect::default()
101 | });
102 | // to check device layer logs
103 | log_collect_list.push(cloudwatch::Collect {
104 | log_group_name: self.id.clone(),
105 | log_stream_name: format!("{{instance_id}}-{}-dmesg", self.node_kind.as_str()),
106 | file_path: String::from("/var/log/dmesg"),
107 |
108 | // If a log continuously writes to a single file, it is not removed.
109 | auto_removal: Some(logs_auto_removal),
110 | retention_in_days: Some(5),
111 |
112 | ..cloudwatch::Collect::default()
113 | });
114 | }
115 |
116 | let mut cloudwatch_config = cloudwatch::Config::default();
117 | cloudwatch_config.logs = Some(cloudwatch::Logs {
118 | force_flush_interval: Some(60),
119 | logs_collected: Some(cloudwatch::LogsCollected {
120 | files: Some(cloudwatch::Files {
121 | collect_list: Some(log_collect_list),
122 | }),
123 | }),
124 | });
125 |
126 | // max "metrics_collect_interval" is 2-day
127 | // "Error : Must be less than or equal to 172800" (2-day)
128 | if metrics_collect_interval > 172800 {
129 | log::warn!("invalid metrics_collect_interval {metrics_collect_interval} so disabling instance_system_metrics");
130 | }
131 | if metrics_collect_interval == 0 {
132 | log::warn!("zero metrics_collect_interval, so disabling instance_system_metrics")
133 | }
134 | if metrics_collect_interval > 0 && metrics_collect_interval <= 172800 {
135 | let mut cw_metrics = cloudwatch::Metrics::new(metrics_collect_interval);
136 | cw_metrics.namespace = self.id.clone();
137 | cw_metrics.metrics_collected.disk = Some(cloudwatch::Disk::new_with_resources(
138 | vec![self.data_volume_path.clone().unwrap()],
139 | metrics_collect_interval,
140 | ));
141 | cloudwatch_config.metrics = Some(cw_metrics);
142 | }
143 |
144 | cloudwatch_config.sync(&self.config_file_path)
145 | }
146 | }
147 |
--------------------------------------------------------------------------------
/avalanched-aws/src/alias_chain/mod.rs:
--------------------------------------------------------------------------------
1 | use std::{collections::HashMap, fs::File, io, path::Path};
2 |
3 | use clap::{Arg, Command};
4 | use serde::{Deserialize, Serialize};
5 |
6 | pub const NAME: &str = "alias-chain";
7 |
8 | /// Defines "alias-chain" option.
9 | #[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
10 | pub struct Flags {
11 | pub log_level: String,
12 |
13 | pub chain_id: String,
14 | pub chain_name: String,
15 | }
16 |
17 | pub fn command() -> Command {
18 | Command::new(NAME)
19 | .about("Sets chain alias (WARN: ALWAYS OVERWRITES)")
20 | .arg(
21 | Arg::new("LOG_LEVEL")
22 | .long("log-level")
23 | .short('l')
24 | .help("Sets the log level")
25 | .required(false)
26 | .num_args(1)
27 | .value_parser(["debug", "info"])
28 | .default_value("info"),
29 | )
30 | .arg(
31 | Arg::new("CHAIN_ID")
32 | .long("chain-id")
33 | .help("Chain ID")
34 | .required(false)
35 | .num_args(1),
36 | )
37 | .arg(
38 | Arg::new("CHAIN_NAME")
39 | .long("chain-name")
40 | .help("Chain name to use as an alias")
41 | .required(false)
42 | .num_args(1),
43 | )
44 | }
45 |
46 | pub async fn execute(opts: Flags) -> io::Result<()> {
47 | // ref.
48 | env_logger::init_from_env(
49 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, opts.log_level),
50 | );
51 |
52 | // Create alias.json file
53 | // TODO: import type from avalanche-rust
54 | pub type Aliases = HashMap>;
55 | let mut aliases = Aliases::new();
56 | aliases.insert(opts.chain_id, Vec::from([opts.chain_name]));
57 |
58 | // Write it to default location
59 | // TODO: import location from avalanche-rust
60 | pub const DEFAULT_CHAIN_ALIASES_PATH: &str = "/data/avalanche-configs/chains/aliases.json";
61 | let path = Path::new(DEFAULT_CHAIN_ALIASES_PATH);
62 |
63 | let file = File::create(path)?;
64 | ::serde_json::to_writer(file, &aliases)?;
65 |
66 | // At this point avalanchego should be restarted to notice the new alias.
67 | // This is done via the ssm_install_subnet_chain SSM document under src/aws/cfn-templates.
68 | Ok(())
69 | }
70 |
--------------------------------------------------------------------------------
/avalanched-aws/src/install_chain/mod.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | fs,
3 | io::{self, Error, ErrorKind},
4 | path::Path,
5 | };
6 |
7 | use aws_manager::{self, s3};
8 | use clap::{Arg, Command};
9 | use serde::{Deserialize, Serialize};
10 | use tokio::time::Duration;
11 |
12 | pub const NAME: &str = "install-chain";
13 |
14 | /// Defines "install-subnet" option.
15 | #[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
16 | pub struct Flags {
17 | pub log_level: String,
18 |
19 | pub s3_region: String,
20 | pub s3_bucket: String,
21 |
22 | pub chain_config_s3_key: String,
23 | pub chain_config_local_path: String,
24 | }
25 |
26 | pub fn command() -> Command {
27 | Command::new(NAME)
28 | .about("Update chain config (WARN: ALWAYS OVERWRITES)")
29 | .arg(
30 | Arg::new("LOG_LEVEL")
31 | .long("log-level")
32 | .short('l')
33 | .help("Sets the log level")
34 | .required(false)
35 | .num_args(1)
36 | .value_parser(["debug", "info"])
37 | .default_value("info"),
38 | )
39 | .arg(
40 | Arg::new("S3_REGION")
41 | .long("s3-region")
42 | .help("Sets the AWS S3 region")
43 | .required(true)
44 | .num_args(1),
45 | )
46 | .arg(
47 | Arg::new("S3_BUCKET")
48 | .long("s3-bucket")
49 | .help("Sets the S3 bucket")
50 | .required(true)
51 | .num_args(1),
52 | )
53 | .arg(
54 | Arg::new("CHAIN_CONFIG_S3_KEY")
55 | .long("chain-config-s3-key")
56 | .help("Sets the S3 key for the chain config")
57 | .required(true)
58 | .num_args(1),
59 | )
60 | .arg(
61 | Arg::new("CHAIN_CONFIG_LOCAL_PATH")
62 | .long("chain-config-local-path")
63 | .help("Chain configuration local file path")
64 | .required(true)
65 | .num_args(1),
66 | )
67 | }
68 |
69 | pub async fn execute(opts: Flags) -> io::Result<()> {
70 | // ref.
71 | env_logger::init_from_env(
72 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, opts.log_level),
73 | );
74 |
75 | let shared_config = aws_manager::load_config(
76 | Some(opts.s3_region.clone()),
77 | None,
78 | Some(Duration::from_secs(30)),
79 | )
80 | .await;
81 | let s3_manager = s3::Manager::new(&shared_config);
82 |
83 | let path = Path::new(&opts.chain_config_local_path);
84 | if path.exists() {
85 | log::warn!(
86 | "about to overwrite subnet chain config path {}",
87 | opts.chain_config_local_path
88 | );
89 | }
90 | if let Some(parent_dir) = path.parent() {
91 | log::info!(
92 | "creating parent dir '{}' for subnet chain config",
93 | parent_dir.display()
94 | );
95 | fs::create_dir_all(parent_dir)?;
96 | }
97 |
98 | let exists = s3_manager
99 | .download_executable_with_retries(
100 | &opts.s3_bucket,
101 | &opts.chain_config_s3_key,
102 | &opts.chain_config_local_path,
103 | true,
104 | Duration::from_secs(30),
105 | Duration::from_secs(1),
106 | )
107 | .await
108 | .unwrap();
109 | if !exists {
110 | return Err(Error::new(
111 | ErrorKind::Other,
112 | "chain config s3 file not found",
113 | ));
114 | }
115 |
116 | Ok(())
117 | }
118 |
--------------------------------------------------------------------------------
/avalanched-aws/src/install_subnet/mod.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | fs,
3 | io::{self, Error, ErrorKind},
4 | path::Path,
5 | str::FromStr,
6 | };
7 |
8 | use avalanche_types::{avalanchego::config as avalanchego_config, ids};
9 | use aws_manager::{self, s3};
10 | use clap::{Arg, Command};
11 | use serde::{Deserialize, Serialize};
12 | use tokio::time::Duration;
13 |
14 | pub const NAME: &str = "install-subnet";
15 |
16 | /// Defines "install-subnet" option.
17 | #[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
18 | pub struct Flags {
19 | pub log_level: String,
20 |
21 | pub s3_region: String,
22 | pub s3_bucket: String,
23 |
24 | pub subnet_config_s3_key: String,
25 | pub subnet_config_local_path: String,
26 |
27 | pub vm_binary_s3_key: String,
28 | pub vm_binary_local_path: String,
29 |
30 | pub subnet_id_to_track: String,
31 | pub avalanchego_config_path: String,
32 | }
33 |
34 | pub fn command() -> Command {
35 | Command::new(NAME)
36 | .about(
37 | "Download Vm binary, track subnet Id, update subnet config (WARN: ALWAYS OVERWRITES)",
38 | )
39 | .arg(
40 | Arg::new("LOG_LEVEL")
41 | .long("log-level")
42 | .short('l')
43 | .help("Sets the log level")
44 | .required(false)
45 | .num_args(1)
46 | .value_parser(["debug", "info"])
47 | .default_value("info"),
48 | )
49 | .arg(
50 | Arg::new("S3_REGION")
51 | .long("s3-region")
52 | .help("Sets the AWS S3 region")
53 | .required(true)
54 | .num_args(1),
55 | )
56 | .arg(
57 | Arg::new("S3_BUCKET")
58 | .long("s3-bucket")
59 | .help("Sets the S3 bucket")
60 | .required(true)
61 | .num_args(1),
62 | )
63 | .arg(
64 | Arg::new("SUBNET_CONFIG_S3_KEY")
65 | .long("subnet-config-s3-key")
66 | .help("Sets the S3 key for the subnet config (if empty, do not download)")
67 | .required(false)
68 | .num_args(1),
69 | )
70 | .arg(
71 | Arg::new("SUBNET_CONFIG_LOCAL_PATH")
72 | .long("subnet-config-local-path")
73 | .help("Subnet configuration local file path (if empty, do not download)")
74 | .required(false)
75 | .num_args(1),
76 | )
77 | .arg(
78 | Arg::new("VM_BINARY_S3_KEY")
79 | .long("vm-binary-s3-key")
80 | .help("Download VM binary from S3")
81 | .required(true)
82 | .num_args(1),
83 | )
84 | .arg(
85 | Arg::new("VM_BINARY_LOCAL_PATH")
86 | .long("vm-binary-local-path")
87 | .help("VM binary local file path")
88 | .required(true)
89 | .num_args(1),
90 | )
91 | .arg(
92 | Arg::new("SUBNET_ID_TO_TRACK")
93 | .long("subnet-id-to-track")
94 | .help("Subnet Id to track via avalanchego config file")
95 | .required(true)
96 | .num_args(1),
97 | )
98 | .arg(
99 | Arg::new("AVALANCHEGO_CONFIG_PATH")
100 | .long("avalanchego-config-path")
101 | .help("avalanchego config file path")
102 | .required(true)
103 | .num_args(1),
104 | )
105 | }
106 |
107 | pub async fn execute(opts: Flags) -> io::Result<()> {
108 | // ref.
109 | env_logger::init_from_env(
110 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, opts.log_level),
111 | );
112 |
113 | let shared_config = aws_manager::load_config(
114 | Some(opts.s3_region.clone()),
115 | None,
116 | Some(Duration::from_secs(30)),
117 | )
118 | .await;
119 | let s3_manager = s3::Manager::new(&shared_config);
120 |
121 | if !opts.subnet_config_s3_key.is_empty() && !opts.subnet_config_s3_key.is_empty() {
122 | let path = Path::new(&opts.subnet_config_local_path);
123 | if path.exists() {
124 | log::warn!(
125 | "about to overwrite subnet config path {}",
126 | opts.subnet_config_local_path
127 | );
128 | }
129 | if let Some(parent_dir) = path.parent() {
130 | log::info!(
131 | "creating parent dir '{}' for subnet config",
132 | parent_dir.display()
133 | );
134 | fs::create_dir_all(parent_dir)?;
135 | }
136 |
137 | let exists = s3_manager
138 | .download_executable_with_retries(
139 | &opts.s3_bucket,
140 | &opts.subnet_config_s3_key,
141 | &opts.subnet_config_local_path,
142 | true,
143 | Duration::from_secs(30),
144 | Duration::from_secs(1),
145 | )
146 | .await
147 | .unwrap();
148 | if !exists {
149 | return Err(Error::new(
150 | ErrorKind::Other,
151 | "subnet config s3 file not found",
152 | ));
153 | }
154 | } else {
155 | log::info!("skipping downloading subnet config since empty");
156 | }
157 |
158 | {
159 | let path = Path::new(&opts.vm_binary_local_path);
160 | if path.exists() {
161 | log::warn!(
162 | "about to overwrite VM binary path {}",
163 | opts.vm_binary_local_path
164 | );
165 | }
166 | if let Some(parent_dir) = path.parent() {
167 | log::info!(
168 | "creating parent dir '{}' for vm binary",
169 | parent_dir.display()
170 | );
171 | fs::create_dir_all(parent_dir)?;
172 | }
173 |
174 | let exists = s3_manager
175 | .download_executable_with_retries(
176 | &opts.s3_bucket,
177 | &opts.vm_binary_s3_key,
178 | &opts.vm_binary_local_path,
179 | true,
180 | Duration::from_secs(30),
181 | Duration::from_secs(1),
182 | )
183 | .await
184 | .unwrap();
185 | if !exists {
186 | return Err(Error::new(ErrorKind::Other, "vm binary s3 file not found"));
187 | }
188 | }
189 |
190 | {
191 | log::info!(
192 | "adding a subnet-id '{}' to track-subnets flag in {}",
193 | opts.subnet_id_to_track,
194 | opts.avalanchego_config_path,
195 | );
196 | let converted = ids::Id::from_str(&opts.subnet_id_to_track)?;
197 | log::info!("validated a subnet-id '{}'", converted);
198 |
199 | let mut config = avalanchego_config::Config::load(&opts.avalanchego_config_path)?;
200 | if let Some(existing_config_path) = &config.config_file {
201 | if existing_config_path.ne(&opts.avalanchego_config_path) {
202 | log::warn!(
203 | "overwriting existing config-file {} to {}",
204 | existing_config_path,
205 | opts.avalanchego_config_path
206 | );
207 | config.config_file = Some(opts.avalanchego_config_path.clone());
208 | }
209 | }
210 | config.add_track_subnets(Some(converted.to_string()));
211 |
212 | config.sync(None)?;
213 | }
214 |
215 | Ok(())
216 | }
217 |
--------------------------------------------------------------------------------
/avalanched-aws/src/main.rs:
--------------------------------------------------------------------------------
1 | mod agent;
2 | mod alias_chain;
3 | mod install_artifacts;
4 | mod install_chain;
5 | mod install_subnet;
6 |
7 | use clap::{crate_version, Command};
8 |
9 | pub const APP_NAME: &str = "avalanched-aws";
10 |
11 | #[tokio::main]
12 | async fn main() {
13 | let matches = Command::new(APP_NAME)
14 | .version(crate_version!())
15 | .about("Runs an Avalanche agent (daemon) on AWS")
16 | .subcommands(vec![
17 | agent::command(),
18 | install_artifacts::command(),
19 | install_chain::command(),
20 | install_subnet::command(),
21 | alias_chain::command(),
22 | ])
23 | .get_matches();
24 |
25 | println!("{} version: {}", APP_NAME, crate_version!());
26 |
27 | match matches.subcommand() {
28 | Some((agent::NAME, sub_matches)) => {
29 | let opts = agent::Flags {
30 | log_level: sub_matches
31 | .get_one::("LOG_LEVEL")
32 | .unwrap_or(&String::from("info"))
33 | .clone(),
34 | use_default_config: sub_matches.get_flag("USE_DEFAULT_CONFIG"),
35 | publish_periodic_node_info: sub_matches.get_flag("PUBLISH_PERIODIC_NODE_INFO"),
36 | };
37 | agent::execute(opts).await.unwrap();
38 | }
39 |
40 | Some((install_artifacts::NAME, sub_matches)) => {
41 | let v = sub_matches
42 | .get_one::("AVALANCHEGO_RELEASE_TAG")
43 | .unwrap_or(&String::new())
44 | .clone();
45 | let avalanchego_release_tag = if v.is_empty() { None } else { Some(v.clone()) };
46 |
47 | install_artifacts::execute(
48 | &sub_matches
49 | .get_one::("LOG_LEVEL")
50 | .unwrap_or(&String::from("info"))
51 | .clone(),
52 | sub_matches.get_one::("S3_REGION").unwrap(),
53 | sub_matches
54 | .get_one::("S3_BUCKET")
55 | .unwrap_or(&String::new()),
56 | sub_matches
57 | .get_one::("AVALANCHEGO_S3_KEY")
58 | .unwrap_or(&String::new()),
59 | sub_matches
60 | .get_one::("AVALANCHEGO_LOCAL_PATH")
61 | .unwrap_or(&String::new()),
62 | avalanchego_release_tag,
63 | sub_matches
64 | .get_one::("OS_TYPE")
65 | .unwrap_or(&String::from("ubuntu20.04")),
66 | sub_matches
67 | .get_one::("AWS_VOLUME_PROVISIONER_S3_KEY")
68 | .unwrap_or(&String::new()),
69 | sub_matches
70 | .get_one::("AWS_VOLUME_PROVISIONER_LOCAL_PATH")
71 | .unwrap_or(&String::new()),
72 | sub_matches
73 | .get_one::("AWS_IP_PROVISIONER_S3_KEY")
74 | .unwrap_or(&String::new()),
75 | sub_matches
76 | .get_one::("AWS_IP_PROVISIONER_LOCAL_PATH")
77 | .unwrap_or(&String::new()),
78 | sub_matches
79 | .get_one::("AVALANCHE_TELEMETRY_CLOUDWATCH_S3_KEY")
80 | .unwrap_or(&String::new()),
81 | sub_matches
82 | .get_one::("AVALANCHE_TELEMETRY_CLOUDWATCH_LOCAL_PATH")
83 | .unwrap_or(&String::new()),
84 | )
85 | .await
86 | .unwrap();
87 | }
88 |
89 | Some((install_subnet::NAME, sub_matches)) => {
90 | install_subnet::execute(install_subnet::Flags {
91 | log_level: sub_matches
92 | .get_one::("LOG_LEVEL")
93 | .unwrap_or(&String::from("info"))
94 | .to_string(),
95 | s3_region: sub_matches
96 | .get_one::("S3_REGION")
97 | .unwrap()
98 | .to_string(),
99 | s3_bucket: sub_matches
100 | .get_one::("S3_BUCKET")
101 | .unwrap()
102 | .to_string(),
103 | subnet_config_s3_key: sub_matches
104 | .get_one::("SUBNET_CONFIG_S3_KEY")
105 | .unwrap_or(&String::new())
106 | .to_string(),
107 | subnet_config_local_path: sub_matches
108 | .get_one::("SUBNET_CONFIG_LOCAL_PATH")
109 | .unwrap_or(&String::new())
110 | .to_string(),
111 | vm_binary_s3_key: sub_matches
112 | .get_one::("VM_BINARY_S3_KEY")
113 | .unwrap()
114 | .to_string(),
115 | vm_binary_local_path: sub_matches
116 | .get_one::("VM_BINARY_LOCAL_PATH")
117 | .unwrap()
118 | .to_string(),
119 | subnet_id_to_track: sub_matches
120 | .get_one::("SUBNET_ID_TO_TRACK")
121 | .unwrap()
122 | .to_string(),
123 | avalanchego_config_path: sub_matches
124 | .get_one::("AVALANCHEGO_CONFIG_PATH")
125 | .unwrap()
126 | .to_string(),
127 | })
128 | .await
129 | .unwrap();
130 | }
131 |
132 | Some((install_chain::NAME, sub_matches)) => {
133 | install_chain::execute(install_chain::Flags {
134 | log_level: sub_matches
135 | .get_one::("LOG_LEVEL")
136 | .unwrap_or(&String::from("info"))
137 | .to_string(),
138 | s3_region: sub_matches
139 | .get_one::("S3_REGION")
140 | .unwrap()
141 | .to_string(),
142 | s3_bucket: sub_matches
143 | .get_one::("S3_BUCKET")
144 | .unwrap()
145 | .to_string(),
146 | chain_config_s3_key: sub_matches
147 | .get_one::("CHAIN_CONFIG_S3_KEY")
148 | .unwrap()
149 | .to_string(),
150 | chain_config_local_path: sub_matches
151 | .get_one::("CHAIN_CONFIG_LOCAL_PATH")
152 | .unwrap()
153 | .to_string(),
154 | })
155 | .await
156 | .unwrap();
157 | }
158 |
159 | Some((alias_chain::NAME, sub_matches)) => {
160 | alias_chain::execute(alias_chain::Flags {
161 | log_level: sub_matches
162 | .get_one::("LOG_LEVEL")
163 | .unwrap_or(&String::from("info"))
164 | .to_string(),
165 | chain_name: sub_matches
166 | .get_one::("CHAIN_NAME")
167 | .unwrap()
168 | .to_string(),
169 | chain_id: sub_matches
170 | .get_one::("CHAIN_ID")
171 | .unwrap()
172 | .to_string(),
173 | })
174 | .await
175 | .unwrap();
176 | }
177 |
178 | _ => unreachable!("unknown subcommand"),
179 | }
180 | }
181 |
--------------------------------------------------------------------------------
/avalancheup-aws/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "avalancheup-aws"
3 | version = "1.0.0" # https://github.com/ava-labs/avalanche-ops/releases
4 | edition = "2021"
5 | rust-version = "1.70"
6 |
7 | [[bin]]
8 | name = "avalancheup-aws"
9 | path = "src/main.rs"
10 |
11 | [dependencies]
12 | avalanche-ops = { path = "../avalanche-ops" }
13 | avalanche-types = { version = "0.1.4", features = ["avalanchego", "jsonrpc_client", "wallet", "subnet", "subnet_evm", "kms_aws"] } # https://crates.io/crates/avalanche-types
14 | aws-dev-machine = "0.0.17"
15 | aws-manager = { version = "0.30.2", features = ["cloudformation", "cloudwatch", "ec2", "s3", "ssm", "sts"] } # https://github.com/gyuho/aws-manager/tags
16 | aws-sdk-cloudformation = "0.30.0" # https://github.com/awslabs/aws-sdk-rust/releases
17 | aws-sdk-ec2 = "0.30.0" # https://github.com/awslabs/aws-sdk-rust/releases
18 | aws-sdk-s3 = "0.30.0" # https://github.com/awslabs/aws-sdk-rust/releases
19 | aws-sdk-ssm = "0.30.0" # https://github.com/awslabs/aws-sdk-rust/releases
20 | clap = { version = "4.4.0", features = ["cargo", "derive"] } # https://github.com/clap-rs/clap/releases
21 | compress-manager = "0.0.10"
22 | crossterm = "0.27.0"
23 | dialoguer = "0.10.4"
24 | dir-manager = "0.0.1"
25 | env_logger = "0.10.0"
26 | id-manager = "0.0.3"
27 | log = "0.4.20"
28 | prefix-manager = "0.0.2"
29 | primitive-types = { version = "0.12.1", features = ["impl-serde"], optional = false } # https://crates.io/crates/primitive-types
30 | prometheus-manager = "0.0.30"
31 | random-manager = "0.0.5"
32 | serde = { version = "1.0.186", features = ["derive"] }
33 | serde_json = "1.0.105" # https://github.com/serde-rs/json/releases
34 | serde_yaml = "0.9.25" # https://github.com/dtolnay/serde-yaml/releases
35 | signal-hook = "0.3.17"
36 | ssh-scp-manager = "0.0.4"
37 | tokio = { version = "1.32.0", features = ["full"] } # https://github.com/tokio-rs/tokio/releases
38 |
39 | [dev-dependencies]
40 | tempfile = "3.8.0"
41 |
--------------------------------------------------------------------------------
/avalancheup-aws/example-aws.md:
--------------------------------------------------------------------------------
1 |
2 | ## Example: set up custom network on AWS
3 |
4 | *See ["Custom network with NO initial database state, with subnet-evm"](recipes-aws.md#custom-network-with-no-initial-database-state-with-subnet-evm) for the full commands.*
5 |
6 | Write the configuration file with some default values:
7 |
8 | 
9 |
10 | 
11 |
12 |
13 |
14 | Then apply the configuration:
15 |
16 | 
17 |
18 | 
19 |
20 |
21 |
22 | Wait for anchor nodes to be ready:
23 |
24 | 
25 |
26 | 
27 |
28 |
29 |
30 | Check your S3 bucket for generated artifacts **(all private keys are encrypted using KMS/envelope encryption)**:
31 |
32 | 
33 |
34 |
35 |
36 | Check the anchor nodes:
37 |
38 | 
39 |
40 | 
41 |
42 | 
43 |
44 | 
45 |
46 | 
47 |
48 |
49 |
50 | Check non-anchor nodes created in a separate Auto Scaling Groups:
51 |
52 | 
53 |
54 | 
55 |
56 |
57 |
58 | Check how non-anchor nodes discovered other anchor nodes and publish non-anchor nodes information:
59 |
60 | 
61 |
62 | 
63 |
64 | 
65 |
66 | 
67 |
68 |
69 |
70 | Check logs and metrics from nodes are being published:
71 |
72 | 
73 |
74 | 
75 |
76 |
77 |
78 | Now that the network is ready, check the metrics and health URL (or access via public IPv4 address):
79 |
80 | 
81 |
82 | 
83 |
84 | 
85 |
86 | 
87 |
88 |
89 |
90 | Now the custom network is ready! Check out the genesis file:
91 |
92 | 
93 |
94 | 
95 |
96 |
97 |
98 | To interact with C-chain via MetaMask, add the DNS RPC endpoint as a custom network as follows:
99 |
100 | 
101 |
102 | 
103 |
104 | 
105 |
106 | Or use [Core wallet](https://chrome.google.com/webstore/detail/core/agoakfejjabomempkjlepdflaleeobhb):
107 |
108 | 
109 |
110 | 
111 |
112 | 
113 |
114 |
115 |
116 | Import the test keys for pre-funded wallets:
117 |
118 | 
119 |
120 | 
121 |
122 | 
123 |
124 | 
125 |
126 |
127 |
128 | ### Optional: install `subnet-evm` in the custom network
129 |
130 | To set up [`subnet-evm`](https://github.com/ava-labs/subnet-evm), use [`subnet-cli`](https://github.com/ava-labs/subnet-cli) to add two non-anchor nodes as validators:
131 |
132 | 
133 |
134 | 
135 |
136 | To create a custom blockchain for `subnet-evm`:
137 |
138 | 
139 |
140 | 
141 |
142 | Restart the nodes with the tracked subnet ID as instructed **(this will be automated in future `avalanche-ops` releases)**:
143 |
144 | 
145 |
146 | 
147 |
148 | 
149 |
150 | 
151 |
152 | To add `subnet-evm` network to MetaMask, use the newly created blockchain ID for RPC endpoints:
153 |
154 | 
155 |
156 | Note that the existing test keys are pre-funded (as in C-chain):
157 |
158 | 
159 |
160 | To look at the `subnet-vm` logs:
161 |
162 | 
163 |
164 | 
165 |
166 |
167 |
168 | To shut down the network, run `avalancheup-aws delete` command:
169 |
170 | 
171 |
172 | 
173 |
174 | 
175 |
176 |
177 |
--------------------------------------------------------------------------------
/avalancheup-aws/img/avalancheup.drawio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/avalancheup.drawio.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/01.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/02.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/03.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/03.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/04.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/04.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/05.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/05.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/06.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/06.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/07.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/07.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/08.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/08.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/09.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/09.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/10.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/11.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/12.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/13.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/14.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/15.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/16.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/17.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/18.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/19.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/20.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/21.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/22.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/23.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/24.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/25.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/26.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/27.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/28.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/29.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/30.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/31.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/31.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/32.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/33.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/33.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/34.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/34.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/35.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/35.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/36.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/36.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/37.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/37.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/38.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/38.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/39.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/39.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/40.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/40.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/41.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/41.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/42.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/42.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/43.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/43.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/44.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/44.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/45.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/45.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/46.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/46.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/47.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/47.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/48.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/48.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/core-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/core-1.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/core-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/core-2.png
--------------------------------------------------------------------------------
/avalancheup-aws/img/example-aws/core-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/avalancheup-aws/img/example-aws/core-3.png
--------------------------------------------------------------------------------
/avalancheup-aws/recipes-aws.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | To download from release, visit https://github.com/ava-labs/avalanche-ops/releases.
4 |
5 | To compile from source:
6 |
7 | ```bash
8 | # if you don't have rust on your local
9 | curl -sSf https://sh.rustup.rs | sh -s -- -y \
10 | && . ${HOME}/.cargo/env \
11 | && rustc --version && cargo --version \
12 | && which rustc && which cargo
13 | ```
14 |
15 | ```bash
16 | # to build binaries
17 | ./scripts/build.release.sh
18 | ```
19 |
20 | ```bash
21 | # 1. simple, default spot instance + elastic IP
22 | # all plugins/binaries are downloaded automatic in the hosts
23 | avalancheup-aws default-spec --network-name custom
24 | ```
25 |
26 | ```bash
27 | # 2. simple, default spot instance + elastic IP, subnet-evm
28 | # all plugins/binaries are downloaded automatic in the hosts
29 | avalancheup-aws default-spec --network-name custom --subnet-evms 1
30 | ```
31 |
32 | ```bash
33 | # 3. simple, subnet-evm with custom binaries
34 | # some plugins/binaries are downloaded automatic from S3 to the hosts
35 | avalancheup-aws default-spec \
36 | --upload-artifacts-avalanchego-local-bin ${AVALANCHE_BIN_PATH} \
37 | --upload-artifacts-plugin-local-dir ${AVALANCHE_PLUGIN_DIR_PATH} \
38 | --instance-mode=on-demand \
39 | --ip-mode=elastic \
40 | --network-name custom \
41 | --keys-to-generate 5 \
42 | --subnet-evms 1
43 | ```
44 |
45 | ```bash
46 | # 4. advanced, subnet-evm with custom binaries
47 | # all plugins/binaries are downloaded automatic from S3 to the hosts
48 | AVALANCHED_BIN_PATH=/home/ubuntu/avalanche-ops/target/release/avalanched-aws
49 | AWS_VOLUME_PROVISIONER_BIN_PATH=/tmp/aws-volume-provisioner-new
50 | AWS_IP_PROVISIONER_BIN_PATH=/tmp/aws-ip-provisioner-new
51 | AVALANCHE_TELEMETRY_CLOUDWATCH_BIN_PATH=/tmp/avalanche-telemetry-cloudwatch
52 | AVALANCHE_BIN_PATH=/home/ubuntu/go/src/github.com/ava-labs/avalanchego/build/avalanchego
53 | AVALANCHE_PLUGIN_DIR_PATH=/home/ubuntu/go/src/github.com/ava-labs/avalanchego/build/plugin
54 |
55 | cd /home/ubuntu/avalanche-ops
56 | avalancheup-aws default-spec \
57 | --region ap-northeast-2 \
58 | --upload-artifacts-avalanched-aws-local-bin ${AVALANCHED_BIN_PATH} \
59 | --upload-artifacts-aws-volume-provisioner-local-bin ${AWS_VOLUME_PROVISIONER_BIN_PATH} \
60 | --upload-artifacts-aws-ip-provisioner-local-bin ${AWS_IP_PROVISIONER_BIN_PATH} \
61 | --upload-artifacts-avalanche-telemetry-cloudwatch-local-bin ${AVALANCHE_TELEMETRY_CLOUDWATCH_BIN_PATH} \
62 | --upload-artifacts-avalanchego-local-bin ${AVALANCHE_BIN_PATH} \
63 | --upload-artifacts-plugin-local-dir ${AVALANCHE_PLUGIN_DIR_PATH} \
64 | --instance-mode=on-demand \
65 | --ip-mode=elastic \
66 | --network-name custom \
67 | --keys-to-generate 50 \
68 | --keys-to-generate-type hot \
69 | --subnet-evms 1
70 | ```
71 |
--------------------------------------------------------------------------------
/avalancheup-aws/src/README.md:
--------------------------------------------------------------------------------
1 |
2 | To write some default subnet configuration:
3 |
4 | ```bash
5 | ./target/release/avalancheup-aws subnet-config \
6 | --proposer-min-block-delay 1000000000 \
7 | --file-path /tmp/subnet-config.json
8 |
9 | cat /tmp/subnet-config.json
10 | ```
11 |
12 | To write some default subnet-evm chain configuration:
13 |
14 | ```bash
15 | ./target/release/avalancheup-aws subnet-evm chain-config \
16 | --file-path /tmp/subnet-evm.chain-config.json
17 | ```
18 |
19 | To write some default subnet-evm genesis:
20 |
21 | ```bash
22 | ./target/release/avalancheup-aws subnet-evm genesis \
23 | --seed-eth-addresses 0x75E3DC1926Ca033Ee06B0C378B0079241921e2AA,0x557FDFCAEff5daDF7287344f4E30172e56EC7aec \
24 | --file-path /tmp/subnet-evm.genesis.json
25 | ```
26 |
--------------------------------------------------------------------------------
/avalancheup-aws/src/apply/dev_machine.rs:
--------------------------------------------------------------------------------
1 | //! Module for dev machine specific functionality.
2 | use std::io::{Error, ErrorKind};
3 | use std::path::PathBuf;
4 |
5 | /// Parses the user-provided path to a script for the dev machine to execute.
6 | pub fn validate_path(provided_path: PathBuf) -> Result {
7 | if !provided_path.exists() {
8 | return Err(Error::new(ErrorKind::NotFound, "{provided_path} not found"));
9 | }
10 | if !provided_path.is_file() {
11 | return Err(Error::new(
12 | ErrorKind::InvalidInput,
13 | "{provided_path} is not a file",
14 | ));
15 | }
16 |
17 | Ok(provided_path)
18 | }
19 |
20 | mod test {
21 |
22 | #[test]
23 | fn test_parse_path() {
24 | let dir = tempfile::tempdir().unwrap();
25 | let file_path = dir.path().join("my-script.sh");
26 | let _ = std::fs::File::create(file_path.clone()).unwrap();
27 |
28 | let result = super::validate_path(file_path);
29 | assert!(result.is_ok());
30 |
31 | // create subdirectory and try again (error)
32 | let result = super::validate_path(dir.path().to_path_buf());
33 | assert!(result.is_err());
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/avalancheup-aws/src/endpoints/mod.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | collections::{BTreeMap, BTreeSet},
3 | io::{self, stdout},
4 | };
5 |
6 | use avalanche_types::{ids, jsonrpc};
7 | use clap::{Arg, Command};
8 | use crossterm::{
9 | execute,
10 | style::{Color, Print, ResetColor, SetForegroundColor},
11 | };
12 | use serde::{Deserialize, Serialize};
13 |
14 | pub const NAME: &str = "endpoints";
15 |
16 | pub fn command() -> Command {
17 | Command::new(NAME)
18 | .about("Queries RPC endpoints")
19 | .arg(
20 | Arg::new("LOG_LEVEL")
21 | .long("log-level")
22 | .short('l')
23 | .help("Sets the log level")
24 | .required(false)
25 | .num_args(1)
26 | .value_parser(["debug", "info"])
27 | .default_value("info"),
28 | )
29 | .arg(
30 | Arg::new("CHAIN_RPC_URLS")
31 | .long("chain-rpc-urls")
32 | .help("Comma-separated chain RPC URLs")
33 | .required(false)
34 | .num_args(1)
35 | .default_value("http://localhost:9650/ext/C/rpc"),
36 | )
37 | }
38 |
39 | #[derive(Serialize, Deserialize, Debug, Clone)]
40 | pub struct Peer {
41 | pub http_rpc: String,
42 |
43 | #[serde(flatten)]
44 | pub peer: jsonrpc::info::Peer,
45 | }
46 |
47 | pub async fn execute(log_level: &str, chain_rpc_urls: Vec) -> io::Result<()> {
48 | // ref.
49 | env_logger::init_from_env(
50 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, log_level),
51 | );
52 |
53 | let mut all_nodes_set = BTreeSet::new();
54 | let mut all_blockchains: BTreeSet = BTreeSet::new();
55 | for u in chain_rpc_urls.iter() {
56 | let resp = jsonrpc::client::info::get_node_id(u).await.unwrap();
57 | log::info!(
58 | "chain rpc url '{u}' node id: {}",
59 | serde_json::to_string_pretty(&resp).unwrap()
60 | );
61 | all_nodes_set.insert(resp.result.unwrap().node_id);
62 |
63 | let resp = jsonrpc::client::p::get_blockchains(u).await.unwrap();
64 | log::info!(
65 | "blockchains at '{u}': {}",
66 | serde_json::to_string_pretty(&resp).unwrap()
67 | );
68 | if let Some(rs) = &resp.result {
69 | if let Some(bs) = &rs.blockchains {
70 | for b in bs.iter() {
71 | all_blockchains.insert(b.clone());
72 | }
73 | }
74 | }
75 | }
76 | let mut all_node_ids = Vec::new();
77 | for n in all_nodes_set.iter() {
78 | all_node_ids.push(*n);
79 | }
80 |
81 | let mut node_id_to_peer: BTreeMap = BTreeMap::new();
82 | let mut tracked_subnet_id_to_node_ids: BTreeMap> =
83 | BTreeMap::new();
84 | for u in chain_rpc_urls.iter() {
85 | let resp = jsonrpc::client::info::peers(u, Some(all_node_ids.clone()))
86 | .await
87 | .unwrap();
88 | log::info!(
89 | "peers at '{u}': {}",
90 | serde_json::to_string_pretty(&resp).unwrap()
91 | );
92 |
93 | if let Some(rs) = &resp.result {
94 | if let Some(ps) = &rs.peers {
95 | for p in ps.iter() {
96 | if !all_nodes_set.contains(&p.node_id) {
97 | continue;
98 | }
99 |
100 | node_id_to_peer.insert(
101 | p.node_id,
102 | Peer {
103 | http_rpc: format!("http://{}:9650", p.ip),
104 |
105 | peer: p.clone(),
106 | },
107 | );
108 |
109 | for tracked_subnet_id in &p.tracked_subnets {
110 | if let Some(v) = tracked_subnet_id_to_node_ids.get_mut(tracked_subnet_id) {
111 | v.insert(p.node_id);
112 | continue;
113 | }
114 |
115 | let mut ss = BTreeSet::new();
116 | ss.insert(p.node_id);
117 | tracked_subnet_id_to_node_ids.insert(*tracked_subnet_id, ss);
118 | }
119 | }
120 | }
121 | }
122 | }
123 |
124 | println!();
125 | execute!(
126 | stdout(),
127 | SetForegroundColor(Color::Green),
128 | Print("\n\n\nALL TRACKED SUBNETS\n"),
129 | ResetColor
130 | )?;
131 | for p in tracked_subnet_id_to_node_ids.iter() {
132 | println!();
133 | println!("subnet id '{}' are tracked by", p.0);
134 | for node_id in p.1.iter() {
135 | println!("{}", node_id);
136 | }
137 | }
138 |
139 | println!();
140 | execute!(
141 | stdout(),
142 | SetForegroundColor(Color::Green),
143 | Print("\n\n\nALL PEERS\n"),
144 | ResetColor
145 | )?;
146 | for p in node_id_to_peer.iter() {
147 | println!();
148 | println!("{}:\n{}", p.0, serde_yaml::to_string(&p.1).unwrap());
149 | }
150 |
151 | println!();
152 | execute!(
153 | stdout(),
154 | SetForegroundColor(Color::Green),
155 | Print("\n\n\nALL BLOCKCHAINS\n"),
156 | ResetColor
157 | )?;
158 | for blkc in all_blockchains.iter() {
159 | println!("{}", serde_yaml::to_string(blkc).unwrap());
160 | }
161 |
162 | Ok(())
163 | }
164 |
--------------------------------------------------------------------------------
/avalancheup-aws/src/subnet_config/mod.rs:
--------------------------------------------------------------------------------
1 | use std::io::{self, stdout};
2 |
3 | use avalanche_types::subnet;
4 | use clap::{value_parser, Arg, Command};
5 | use crossterm::{
6 | execute,
7 | style::{Color, Print, ResetColor, SetForegroundColor},
8 | };
9 | use serde::{Deserialize, Serialize};
10 |
11 | pub const NAME: &str = "subnet-config";
12 |
13 | /// Defines "subnet-evm subnet-config" option.
14 | #[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
15 | pub struct Flags {
16 | pub log_level: String,
17 | pub proposer_min_block_delay: u64,
18 | pub file_path: String,
19 | }
20 |
21 | pub fn command() -> Command {
22 | Command::new(NAME)
23 | .about("Writes a default subnet configuration")
24 | .arg(
25 | Arg::new("LOG_LEVEL")
26 | .long("log-level")
27 | .short('l')
28 | .help("Sets the log level")
29 | .required(false)
30 | .num_args(1)
31 | .value_parser(["debug", "info"])
32 | .default_value("info"),
33 | )
34 | .arg(
35 | Arg::new("PROPOSER_MIN_BLOCK_DELAY")
36 | .long("proposer-min-block-delay")
37 | .help("Sets to subnet-evm proposer-min-block-delay in nano seconds (in subnet config)")
38 | .required(false)
39 | .num_args(1)
40 | .value_parser(value_parser!(u64))
41 | .default_value("1000000000"), // 1-second
42 | )
43 | .arg(
44 | Arg::new("FILE_PATH")
45 | .long("file-path")
46 | .short('s')
47 | .help("The config file to create")
48 | .required(false)
49 | .num_args(1),
50 | )
51 | }
52 |
53 | pub fn execute(opts: Flags) -> io::Result<()> {
54 | // ref.
55 | env_logger::init_from_env(
56 | env_logger::Env::default()
57 | .filter_or(env_logger::DEFAULT_FILTER_ENV, opts.clone().log_level),
58 | );
59 |
60 | execute!(
61 | stdout(),
62 | SetForegroundColor(Color::Blue),
63 | Print(format!("\nSaving subnet config to '{}'\n", opts.file_path)),
64 | ResetColor
65 | )?;
66 | let mut subnet_config = subnet::config::Config::default();
67 |
68 | log::info!(
69 | "setting proposer_min_block_delay to {}",
70 | opts.proposer_min_block_delay
71 | );
72 | subnet_config.proposer_min_block_delay = opts.proposer_min_block_delay;
73 |
74 | subnet_config.sync(&opts.file_path)?;
75 | let d = subnet_config.encode_json().expect("failed encode_json");
76 | println!("{d}");
77 |
78 | execute!(
79 | stdout(),
80 | SetForegroundColor(Color::Blue),
81 | Print(format!("\nSaved subnet config to '{}'\n", opts.file_path)),
82 | ResetColor
83 | )?;
84 |
85 | Ok(())
86 | }
87 |
--------------------------------------------------------------------------------
/avalancheup-aws/src/subnet_evm/chain_config.rs:
--------------------------------------------------------------------------------
1 | use std::io::{self, stdout};
2 |
3 | use avalanche_types::subnet_evm::chain_config as subnet_evm_chain_config;
4 | use clap::{value_parser, Arg, Command};
5 | use crossterm::{
6 | execute,
7 | style::{Color, Print, ResetColor, SetForegroundColor},
8 | };
9 | use serde::{Deserialize, Serialize};
10 |
11 | pub const NAME: &str = "chain-config";
12 |
13 | /// Defines "subnet-evm chain-config" option.
14 | #[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
15 | pub struct Flags {
16 | pub log_level: String,
17 |
18 | pub tx_pool_account_slots: u64,
19 | pub tx_pool_global_slots: u64,
20 | pub tx_pool_account_queue: u64,
21 | pub tx_pool_global_queue: u64,
22 | pub local_txs_enabled: bool,
23 | pub priority_regossip_frequency: i64,
24 | pub priority_regossip_max_txs: i32,
25 | pub priority_regossip_txs_per_address: i32,
26 | pub priority_regossip_addresses: Vec,
27 |
28 | pub file_path: String,
29 | }
30 |
31 | pub fn command() -> Command {
32 | Command::new(NAME)
33 | .about("Writes a default chain configuration for subnet-evm")
34 | .arg(
35 | Arg::new("LOG_LEVEL")
36 | .long("log-level")
37 | .short('l')
38 | .help("Sets the log level")
39 | .required(false)
40 | .num_args(1)
41 | .value_parser(["debug", "info"])
42 | .default_value("info"),
43 | )
44 | .arg(
45 | Arg::new("TX_POOL_ACCOUNT_SLOTS")
46 | .long("tx-pool-account-slots")
47 | .help("Sets non-zero to set tx-pool-account-slots (in chain config)")
48 | .required(false)
49 | .num_args(1)
50 | .value_parser(value_parser!(u64))
51 | .default_value("0"),
52 | )
53 | .arg(
54 | Arg::new("TX_POOL_GLOBAL_SLOTS")
55 | .long("tx-pool-global-slots")
56 | .help("Sets non-zero to set tx-pool-global-slots (in chain config)")
57 | .required(false)
58 | .num_args(1)
59 | .value_parser(value_parser!(u64))
60 | .default_value("0"),
61 | )
62 | .arg(
63 | Arg::new("TX_POOL_ACCOUNT_QUEUE")
64 | .long("tx-pool-account-queue")
65 | .help("Sets non-zero to set tx-pool-account-queue (in chain config)")
66 | .required(false)
67 | .num_args(1)
68 | .value_parser(value_parser!(u64))
69 | .default_value("0"),
70 | )
71 | .arg(
72 | Arg::new("TX_POOL_GLOBAL_QUEUE")
73 | .long("tx-pool-global-queue")
74 | .help("Sets non-zero to set tx-pool-global-queue (in chain config)")
75 | .required(false)
76 | .num_args(1)
77 | .value_parser(value_parser!(u64))
78 | .default_value("0"),
79 | )
80 | .arg(
81 | Arg::new("LOCAL_TXS_ENABLED")
82 | .long("local-txs-enabled")
83 | .help("Sets to enable local txs for subnet-evm")
84 | .required(false)
85 | .num_args(0),
86 | )
87 | .arg(
88 | Arg::new("PRIORITY_REGOSSIP_FREQUENCY")
89 | .long("priority-regossip-frequency")
90 | .help("Sets non-zero to set priority-regossip-frequency (in nano-seconds, in chain config)")
91 | .required(false)
92 | .num_args(1)
93 | .value_parser(value_parser!(i64))
94 | .default_value("0"),
95 | )
96 | .arg(
97 | Arg::new("PRIORITY_REGOSSIP_MAX_TXS")
98 | .long("priority-regossip-max-txs")
99 | .help("Sets non-zero to set priority-regossip-max-txs (in chain config)")
100 | .required(false)
101 | .num_args(1)
102 | .value_parser(value_parser!(i32))
103 | .default_value("0"),
104 | )
105 | .arg(
106 | Arg::new("PRIORITY_REGOSSIP_TXS_PER_ADDRESS")
107 | .long("priority-regossip-txs-per-address")
108 | .help("Sets non-zero to set priority-regossip-txs-per-address (in chain config)")
109 | .required(false)
110 | .num_args(1)
111 | .value_parser(value_parser!(i32))
112 | .default_value("0"),
113 | )
114 | .arg(
115 | Arg::new("PRIORITY_REGOSSIP_ADDRESSES")
116 | .long("priority-regossip-addresses")
117 | .help("Sets the comma-separated priority regossip addresses (in addition to pre-funded test keys, in chain config)")
118 | .required(false)
119 | .num_args(1),
120 | )
121 | .arg(
122 | Arg::new("FILE_PATH")
123 | .long("file-path")
124 | .short('s')
125 | .help("The config file to create")
126 | .required(false)
127 | .num_args(1),
128 | )
129 | }
130 |
131 | pub fn execute(opts: Flags) -> io::Result<()> {
132 | // ref.
133 | env_logger::init_from_env(
134 | env_logger::Env::default()
135 | .filter_or(env_logger::DEFAULT_FILTER_ENV, opts.clone().log_level),
136 | );
137 |
138 | execute!(
139 | stdout(),
140 | SetForegroundColor(Color::Blue),
141 | Print(format!("\nSaving chain config to '{}'\n", opts.file_path)),
142 | ResetColor
143 | )?;
144 | let mut chain_config = subnet_evm_chain_config::Config::default();
145 |
146 | if opts.tx_pool_account_slots > 0 {
147 | chain_config.tx_pool_account_slots = Some(opts.tx_pool_account_slots);
148 | }
149 | if opts.tx_pool_global_slots > 0 {
150 | chain_config.tx_pool_global_slots = Some(opts.tx_pool_global_slots);
151 | }
152 | if opts.tx_pool_account_queue > 0 {
153 | chain_config.tx_pool_account_queue = Some(opts.tx_pool_account_queue);
154 | }
155 | if opts.tx_pool_global_queue > 0 {
156 | chain_config.tx_pool_global_queue = Some(opts.tx_pool_global_queue);
157 | }
158 | if opts.local_txs_enabled {
159 | chain_config.local_txs_enabled = Some(true);
160 | }
161 | if opts.priority_regossip_frequency > 0 {
162 | chain_config.priority_regossip_frequency = Some(opts.priority_regossip_frequency);
163 | }
164 | if opts.priority_regossip_max_txs > 0 {
165 | chain_config.priority_regossip_max_txs = Some(opts.priority_regossip_max_txs);
166 | }
167 | if opts.priority_regossip_txs_per_address > 0 {
168 | chain_config.priority_regossip_txs_per_address =
169 | Some(opts.priority_regossip_txs_per_address);
170 | }
171 | if !opts.priority_regossip_addresses.is_empty() {
172 | chain_config.priority_regossip_addresses = Some(opts.priority_regossip_addresses.clone());
173 | }
174 |
175 | chain_config.sync(&opts.file_path)?;
176 | let d = chain_config.encode_json().expect("failed encode_json");
177 | println!("{d}");
178 |
179 | execute!(
180 | stdout(),
181 | SetForegroundColor(Color::Blue),
182 | Print(format!("\nSaved chain config to '{}'\n", opts.file_path)),
183 | ResetColor
184 | )?;
185 |
186 | Ok(())
187 | }
188 |
--------------------------------------------------------------------------------
/avalancheup-aws/src/subnet_evm/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod chain_config;
2 | pub mod genesis;
3 |
4 | use clap::Command;
5 |
6 | pub const NAME: &str = "subnet-evm";
7 |
8 | pub fn command() -> Command {
9 | Command::new(NAME)
10 | .about("Writes subnet-evm configurations")
11 | .subcommands(vec![chain_config::command(), genesis::command()])
12 | }
13 |
--------------------------------------------------------------------------------
/blizzard-aws/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "blizzard-aws"
3 | version = "1.0.0" # https://github.com/ava-labs/avalanche-ops/releases
4 | edition = "2021"
5 | rust-version = "1.70"
6 |
7 | [[bin]]
8 | name = "blizzard-aws"
9 | path = "src/main.rs"
10 |
11 | [dependencies]
12 | avalanche-types = { version = "0.1.4", features = ["jsonrpc_client", "wallet", "wallet_evm"] } # https://crates.io/crates/avalanche-types
13 | aws-manager = { version = "0.30.2", features = ["cloudwatch", "ec2", "s3"] } # https://github.com/gyuho/aws-manager/tags
14 | aws-sdk-cloudwatch = "0.30.0" # https://github.com/awslabs/aws-sdk-rust/releases
15 | aws-sdk-ec2 = "0.30.0" # https://github.com/awslabs/aws-sdk-rust/releases
16 | aws-sdk-s3 = "0.30.0" # https://github.com/awslabs/aws-sdk-rust/releases
17 | blizzardup-aws = { path = "../blizzardup-aws" }
18 | clap = { version = "4.4.0", features = ["cargo", "derive"] } # https://github.com/clap-rs/clap/releases
19 | env_logger = "0.10.0"
20 | ethers-signers = { version = "=2.0.7", optional = false }
21 | log = "0.4.20"
22 | primitive-types = { version = "0.12.1", optional = false } # https://crates.io/crates/primitive-types
23 | random-manager = "0.0.5"
24 | tokio = { version = "1.32.0", features = ["full"] } # https://github.com/tokio-rs/tokio/releases
25 |
--------------------------------------------------------------------------------
/blizzard-aws/src/cloudwatch.rs:
--------------------------------------------------------------------------------
1 | use std::{io, path::Path};
2 |
3 | use aws_manager::cloudwatch;
4 |
5 | /// Sets up log collection agent (e.g., cloudwatch agent)
6 | /// using the systemd service.
7 | pub struct ConfigManager {
8 | /// Used for "log_group_name" and metrics "namespace".
9 | pub id: String,
10 |
11 | /// Used for naming CloudWatch log name.
12 | pub node_kind: String,
13 |
14 | /// Set "true" to collect instance-level system logs.
15 | /// Useful to check OOMs via "oom-kill" or "Out of memory: Killed process 8266 (...)"
16 | pub instance_system_logs: bool,
17 |
18 | /// CloudWatch agent configuration file path.
19 | pub config_file_path: String,
20 | }
21 |
22 | /// ref. https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-Configuration-File-Details.html
23 | impl ConfigManager {
24 | /// Set "log_files" to track extra log files via CloudWatch.
25 | /// e.g., "/var/log/blizzard.log"
26 | pub fn sync(&self, logs_auto_removal: bool, log_files: Option>) -> io::Result<()> {
27 | log::info!("syncing CloudWatch configuration JSON file");
28 |
29 | let mut log_collect_list = vec![];
30 |
31 | if let Some(v) = log_files {
32 | for file_path in v {
33 | // e.g., "/var/log/blizzard.log" becomes "blizzard.log"
34 | let fname = Path::new(&file_path)
35 | .file_name()
36 | .unwrap()
37 | .to_os_string()
38 | .into_string()
39 | .unwrap();
40 |
41 | log_collect_list.push(cloudwatch::Collect {
42 | log_group_name: self.id.clone(),
43 | log_stream_name: format!(
44 | "{{instance_id}}-{}-{}",
45 | self.node_kind.as_str(),
46 | fname
47 | ),
48 |
49 | file_path,
50 |
51 | // If a log continuously writes to a single file, it is not removed.
52 | auto_removal: Some(logs_auto_removal),
53 | retention_in_days: Some(5),
54 |
55 | ..cloudwatch::Collect::default()
56 | });
57 | }
58 | }
59 |
60 | if self.instance_system_logs {
61 | // to check OOMs via "oom-kill" or "Out of memory: Killed process 8266 (srEXiWaHuhNyGwP)"
62 | log_collect_list.push(cloudwatch::Collect {
63 | log_group_name: self.id.clone(),
64 | log_stream_name: format!("{{instance_id}}-{}-syslog", self.node_kind.as_str()),
65 | file_path: String::from("/var/log/syslog"),
66 |
67 | // If a log continuously writes to a single file, it is not removed.
68 | auto_removal: Some(logs_auto_removal),
69 | retention_in_days: Some(5),
70 |
71 | ..cloudwatch::Collect::default()
72 | });
73 | // to check device layer logs
74 | log_collect_list.push(cloudwatch::Collect {
75 | log_group_name: self.id.clone(),
76 | log_stream_name: format!("{{instance_id}}-{}-dmesg", self.node_kind.as_str()),
77 | file_path: String::from("/var/log/dmesg"),
78 |
79 | // If a log continuously writes to a single file, it is not removed.
80 | auto_removal: Some(logs_auto_removal),
81 | retention_in_days: Some(5),
82 |
83 | ..cloudwatch::Collect::default()
84 | });
85 | }
86 |
87 | let mut cloudwatch_config = cloudwatch::Config::default();
88 | cloudwatch_config.logs = Some(cloudwatch::Logs {
89 | force_flush_interval: Some(60),
90 | logs_collected: Some(cloudwatch::LogsCollected {
91 | files: Some(cloudwatch::Files {
92 | collect_list: Some(log_collect_list),
93 | }),
94 | }),
95 | });
96 |
97 | cloudwatch_config.sync(&self.config_file_path)
98 | }
99 | }
100 |
--------------------------------------------------------------------------------
/blizzard-aws/src/command.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | fs,
3 | io::{self, Error, ErrorKind},
4 | path::Path,
5 | };
6 |
7 | use crate::{cloudwatch as cw, evm, flags, x};
8 | use aws_manager::{self, ec2, s3};
9 | use tokio::time::Duration;
10 |
11 | pub async fn execute(opts: flags::Options) -> io::Result<()> {
12 | println!("starting {} with {:?}", crate::APP_NAME, opts);
13 |
14 | // ref.
15 | env_logger::init_from_env(
16 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, opts.log_level),
17 | );
18 |
19 | let meta = fetch_metadata().await?;
20 | let aws_creds = load_aws_credential(&meta.region, opts.profile_name).await?;
21 | let tags = fetch_tags(&aws_creds.ec2_manager, &meta.ec2_instance_id).await?;
22 |
23 | let spec = download_spec(
24 | &aws_creds.s3_manager,
25 | &tags.s3_bucket,
26 | &tags.id,
27 | &tags.blizzardup_spec_path,
28 | )
29 | .await?;
30 |
31 | if !Path::new(&tags.cloudwatch_config_file_path).exists() {
32 | create_cloudwatch_config(&tags.id, true, &tags.cloudwatch_config_file_path)?;
33 | } else {
34 | log::warn!("skipping writing cloudwatch config (already exists)")
35 | }
36 |
37 | let mut handles = vec![];
38 | for load_kind in spec.blizzard_spec.load_kinds.iter() {
39 | log::info!(
40 | "launching {} workers for {}",
41 | spec.blizzard_spec.workers,
42 | load_kind
43 | );
44 |
45 | match blizzardup_aws::blizzard::LoadKind::from(load_kind.as_str()) {
46 | blizzardup_aws::blizzard::LoadKind::XTransfers => {
47 | for worker_idx in 0..spec.blizzard_spec.workers {
48 | handles.push(tokio::spawn(x::make_transfers(worker_idx, spec.clone())))
49 | }
50 | }
51 | blizzardup_aws::blizzard::LoadKind::EvmTransfers => {
52 | for worker_idx in 0..spec.blizzard_spec.workers {
53 | handles.push(tokio::spawn(evm::make_transfers(worker_idx, spec.clone())));
54 | }
55 | }
56 | blizzardup_aws::blizzard::LoadKind::Unknown(u) => {
57 | return Err(Error::new(
58 | ErrorKind::Other,
59 | format!("invalid load kind {}", u),
60 | ));
61 | }
62 | }
63 | }
64 |
65 | log::info!("STEP: blocking on handles via JoinHandle");
66 | for handle in handles {
67 | handle.await.map_err(|e| {
68 | Error::new(
69 | ErrorKind::Other,
70 | format!("failed await on JoinHandle {}", e),
71 | )
72 | })?;
73 | }
74 |
75 | Ok(())
76 | }
77 |
78 | #[derive(Debug, Clone)]
79 | struct Metadata {
80 | region: String,
81 | ec2_instance_id: String,
82 | }
83 |
84 | async fn fetch_metadata() -> io::Result {
85 | log::info!("STEP: fetching EC2 instance metadata...");
86 |
87 | let az = ec2::metadata::fetch_availability_zone()
88 | .await
89 | .map_err(|e| {
90 | Error::new(
91 | ErrorKind::Other,
92 | format!("failed fetch_availability_zone {}", e),
93 | )
94 | })?;
95 | log::info!("fetched availability zone {}", az);
96 |
97 | let reg = ec2::metadata::fetch_region()
98 | .await
99 | .map_err(|e| Error::new(ErrorKind::Other, format!("failed fetch_region {}", e)))?;
100 | log::info!("fetched region {}", reg);
101 |
102 | let ec2_instance_id = ec2::metadata::fetch_instance_id()
103 | .await
104 | .map_err(|e| Error::new(ErrorKind::Other, format!("failed fetch_instance_id {}", e)))?;
105 | log::info!("fetched EC2 instance Id {}", ec2_instance_id);
106 |
107 | let public_ipv4 = ec2::metadata::fetch_public_ipv4()
108 | .await
109 | .map_err(|e| Error::new(ErrorKind::Other, format!("failed fetch_public_ipv4 {}", e)))?;
110 | log::info!("fetched public ipv4 {}", public_ipv4);
111 |
112 | Ok(Metadata {
113 | region: reg,
114 | ec2_instance_id,
115 | })
116 | }
117 |
118 | #[derive(Debug, Clone)]
119 | struct AwsCreds {
120 | ec2_manager: ec2::Manager,
121 | s3_manager: s3::Manager,
122 | }
123 |
124 | async fn load_aws_credential(reg: &str, profile_name: String) -> io::Result {
125 | log::info!("STEP: loading up AWS credential for region '{}'...", reg);
126 |
127 | let shared_config = aws_manager::load_config(
128 | Some(reg.to_string()),
129 | Some(profile_name.clone()),
130 | Some(Duration::from_secs(30)),
131 | )
132 | .await;
133 |
134 | let ec2_manager = ec2::Manager::new(&shared_config);
135 | let s3_manager = s3::Manager::new(&shared_config);
136 |
137 | Ok(AwsCreds {
138 | ec2_manager,
139 | s3_manager,
140 | })
141 | }
142 |
143 | #[derive(Debug, Clone)]
144 | struct Tags {
145 | id: String,
146 | instance_mode: String,
147 | node_kind: String,
148 | s3_bucket: String,
149 | cloudwatch_config_file_path: String,
150 | blizzardup_spec_path: String,
151 | }
152 |
153 | async fn fetch_tags(ec2_manager: &ec2::Manager, ec2_instance_id: &str) -> io::Result {
154 | log::info!("STEP: fetching tags...");
155 |
156 | let tags = ec2_manager
157 | .fetch_tags(ec2_instance_id)
158 | .await
159 | .map_err(|e| Error::new(ErrorKind::Other, format!("failed fetch_tags {}", e)))?;
160 |
161 | let mut fetched_tags = Tags {
162 | id: String::new(),
163 | instance_mode: String::new(),
164 | node_kind: String::new(),
165 | s3_bucket: String::new(),
166 | cloudwatch_config_file_path: String::new(),
167 | blizzardup_spec_path: String::new(),
168 | };
169 | for c in tags {
170 | let k = c.key().unwrap();
171 | let v = c.value().unwrap();
172 |
173 | log::info!("EC2 tag key='{}', value='{}'", k, v);
174 | match k {
175 | "ID" => {
176 | fetched_tags.id = v.to_string();
177 | }
178 | "INSTANCE_MODE" => {
179 | fetched_tags.instance_mode = v.to_string();
180 | }
181 | "NODE_KIND" => {
182 | fetched_tags.node_kind = v.to_string();
183 | }
184 | "S3_BUCKET_NAME" => {
185 | fetched_tags.s3_bucket = v.to_string();
186 | }
187 | "CLOUDWATCH_CONFIG_FILE_PATH" => {
188 | fetched_tags.cloudwatch_config_file_path = v.to_string();
189 | }
190 | "BLIZZARDUP_SPEC_PATH" => {
191 | fetched_tags.blizzardup_spec_path = v.to_string();
192 | }
193 | _ => {}
194 | }
195 | }
196 |
197 | assert!(!fetched_tags.id.is_empty());
198 | assert!(fetched_tags.node_kind.eq("worker"));
199 | assert!(!fetched_tags.s3_bucket.is_empty());
200 | assert!(!fetched_tags.cloudwatch_config_file_path.is_empty());
201 | assert!(!fetched_tags.blizzardup_spec_path.is_empty());
202 |
203 | Ok(fetched_tags)
204 | }
205 |
206 | async fn download_spec(
207 | s3_manager: &s3::Manager,
208 | s3_bucket: &str,
209 | id: &str,
210 | blizzardup_spec_path: &str,
211 | ) -> io::Result {
212 | log::info!("STEP: downloading blizzardup spec file from S3...");
213 |
214 | let tmp_spec_file_path = random_manager::tmp_path(15, Some(".yaml"))?;
215 |
216 | let exists = s3_manager
217 | .get_object_with_retries(
218 | s3_bucket,
219 | &blizzardup_aws::StorageNamespace::ConfigFile(id.to_string()).encode(),
220 | &tmp_spec_file_path,
221 | true,
222 | Duration::from_secs(30),
223 | Duration::from_secs(1),
224 | )
225 | .await
226 | .map_err(|e| Error::new(ErrorKind::Other, format!("failed spawn_get_object {}", e)))?;
227 | if !exists {
228 | return Err(Error::new(
229 | ErrorKind::Other,
230 | "blizzard spec s3 file not found",
231 | ));
232 | }
233 |
234 | let spec = blizzardup_aws::Spec::load(&tmp_spec_file_path)?;
235 | log::info!("loaded blizzardup_aws::Spec");
236 |
237 | fs::copy(&tmp_spec_file_path, blizzardup_spec_path)?;
238 | fs::remove_file(&tmp_spec_file_path)?; // "blizzard" never updates "spec" file, runs in read-only mode
239 |
240 | Ok(spec)
241 | }
242 |
243 | fn create_cloudwatch_config(
244 | id: &str,
245 | log_auto_removal: bool,
246 | cloudwatch_config_file_path: &str,
247 | ) -> io::Result<()> {
248 | log::info!("STEP: creating CloudWatch JSON config file...");
249 |
250 | let cw_config_manager = cw::ConfigManager {
251 | id: id.to_string(),
252 | node_kind: String::from("worker"),
253 | instance_system_logs: true,
254 | config_file_path: cloudwatch_config_file_path.to_string(),
255 | };
256 | cw_config_manager.sync(
257 | log_auto_removal,
258 | Some(vec![
259 | String::from("/var/log/cloud-init-output.log"),
260 | String::from("/var/log/blizzard.log"),
261 | ]),
262 | )
263 | }
264 |
--------------------------------------------------------------------------------
/blizzard-aws/src/flags.rs:
--------------------------------------------------------------------------------
1 | /// Defines flag options.
2 | #[derive(Debug)]
3 | pub struct Options {
4 | pub log_level: String,
5 | pub profile_name: String,
6 | }
7 |
--------------------------------------------------------------------------------
/blizzard-aws/src/main.rs:
--------------------------------------------------------------------------------
1 | mod cloudwatch;
2 | mod command;
3 | mod evm;
4 | mod flags;
5 | mod x;
6 |
7 | use clap::{crate_version, Arg, Command};
8 |
9 | pub const APP_NAME: &str = "blizzard-aws";
10 |
11 | #[tokio::main]
12 | async fn main() {
13 | let matches = Command::new(APP_NAME)
14 | .version(crate_version!())
15 | .about("Runs a Blizzard agent (daemon) on AWS")
16 | .arg(
17 | Arg::new("LOG_LEVEL")
18 | .long("log-level")
19 | .short('l')
20 | .help("Sets the log level")
21 | .required(false)
22 | .num_args(1)
23 | .value_parser(["debug", "info"])
24 | .default_value("info"),
25 | )
26 | .arg(
27 | Arg::new("PROFILE_NAME")
28 | .long("profile-name")
29 | .help("Sets the AWS credential profile name for API calls/endpoints")
30 | .required(false)
31 | .default_value("default")
32 | .num_args(1),
33 | )
34 | .get_matches();
35 |
36 | println!("{} version: {}", APP_NAME, crate_version!());
37 | let opts = flags::Options {
38 | log_level: matches
39 | .get_one::("LOG_LEVEL")
40 | .unwrap_or(&String::from("info"))
41 | .clone(),
42 | profile_name: matches.get_one::("PROFILE_NAME").unwrap().clone(),
43 | };
44 | command::execute(opts).await.unwrap();
45 | }
46 |
--------------------------------------------------------------------------------
/blizzardup-aws/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "blizzardup-aws"
3 | version = "1.0.0" # https://github.com/ava-labs/avalanche-ops/releases
4 | edition = "2021"
5 | rust-version = "1.70"
6 |
7 | [[bin]]
8 | name = "blizzardup-aws"
9 | path = "src/main.rs"
10 |
11 | [dependencies]
12 | avalanche-types = { version = "0.1.4", features = ["avalanchego", "jsonrpc_client", "subnet_evm"] } # https://crates.io/crates/avalanche-types
13 | aws-manager = { version = "0.30.2", features = ["cloudformation", "cloudwatch", "ec2", "s3", "sts"] } # https://github.com/gyuho/aws-manager/tags
14 | aws-sdk-cloudformation = "0.30.0" # https://github.com/awslabs/aws-sdk-rust/releases
15 | aws-sdk-ec2 = "0.30.0" # https://github.com/awslabs/aws-sdk-rust/releases
16 | aws-sdk-s3 = "0.30.0" # https://github.com/awslabs/aws-sdk-rust/releases
17 | clap = { version = "4.4.0", features = ["cargo", "derive"] } # https://github.com/clap-rs/clap/releases
18 | compress-manager = "0.0.10"
19 | crossterm = "0.27.0"
20 | dialoguer = "0.10.4"
21 | dir-manager = "0.0.1"
22 | env_logger = "0.10.0"
23 | id-manager = "0.0.3"
24 | lazy_static = "1.4.0"
25 | log = "0.4.20"
26 | primitive-types = "0.12.1" # https://crates.io/crates/primitive-types
27 | prometheus-manager = "0.0.30"
28 | random-manager = "0.0.5"
29 | regex = "1.8.1"
30 | reqwest = "0.11.18"
31 | rust-embed = "8.0.0"
32 | serde = { version = "1.0.186", features = ["derive"] }
33 | serde_yaml = "0.9.25" # https://github.com/dtolnay/serde-yaml/releases
34 | signal-hook = "0.3.17"
35 | tokio = { version = "1.32.0", features = ["full"] } # https://github.com/tokio-rs/tokio/releases
36 |
37 | [dev-dependencies]
38 | tempfile = "3.8.0"
39 |
--------------------------------------------------------------------------------
/blizzardup-aws/README.md:
--------------------------------------------------------------------------------
1 |
2 | DO NOT LOOK! THIS IS AN EXPERIMENTATION AND TO BE MOVED TO AVALANCHE-RUST!
3 |
--------------------------------------------------------------------------------
/blizzardup-aws/cfn-templates/ec2_instance_role.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: "2010-09-09"
3 | Description: "IAM instance role"
4 |
5 | # takes about 3-minute
6 |
7 | # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html
8 | Parameters:
9 | Id:
10 | Type: String
11 | Description: Unique identifier, prefix for all resources created below.
12 |
13 | S3BucketName:
14 | Type: String
15 | Description: S3 bucket name to store.
16 |
17 | Mappings:
18 | ServicePrincipals:
19 | aws-cn:
20 | ec2: ec2.amazonaws.com.cn
21 | aws:
22 | ec2: ec2.amazonaws.com
23 |
24 | Resources:
25 | # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html
26 | InstanceRole:
27 | Type: AWS::IAM::Role
28 | Properties:
29 | RoleName: !Join ["-", [!Ref Id, "instance-role"]]
30 | AssumeRolePolicyDocument:
31 | Version: "2012-10-17"
32 | Statement:
33 | - Effect: Allow
34 | Principal:
35 | Service:
36 | - Fn::FindInMap:
37 | - ServicePrincipals
38 | - Ref: AWS::Partition
39 | - ec2
40 | Action:
41 | - sts:AssumeRole
42 | ManagedPolicyArns:
43 | - arn:aws:iam::aws:policy/AmazonSSMFullAccess
44 | - arn:aws:iam::aws:policy/CloudWatchFullAccess
45 | Path: /
46 | Policies:
47 | - PolicyName: blizzard-instance-role-policy
48 | PolicyDocument:
49 | Version: "2012-10-17"
50 | Statement:
51 | - Effect: Allow
52 | Action:
53 | - ec2:DescribeInstances # to fetch tags
54 | - ec2:DescribeTags # to find network/resource information
55 | # restrict this better
56 | # ref. https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_ec2_ebs-owner.html
57 | Resource: "*"
58 | - Effect: Allow
59 | Action:
60 | - s3:List*
61 | Resource: "*"
62 | - Effect: Allow
63 | Action:
64 | - s3:GetObject # to download artifacts
65 | - s3:PutObject # to upload generated TLS keys
66 | Resource:
67 | - !Join [
68 | "",
69 | [
70 | !Sub "arn:${AWS::Partition}:s3:::",
71 | !Ref S3BucketName,
72 | "/",
73 | !Ref Id,
74 | "/*",
75 | ],
76 | ]
77 | - Effect: Allow
78 | Action:
79 | - cloudwatch:PutMetricData
80 | Resource: "*"
81 | - Effect: Allow
82 | Action:
83 | - logs:CreateLogGroup
84 | - logs:CreateLogStream
85 | - logs:PutLogEvents
86 | - logs:DescribeLogStreams
87 | - logs:PutRetentionPolicy
88 | Resource:
89 | # Ref: http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-cloudwatch-logs
90 | - !Sub "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:${Id}"
91 | - !Sub "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:${Id}:log-stream:*"
92 |
93 | # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html
94 | InstanceProfile:
95 | Type: AWS::IAM::InstanceProfile
96 | Properties:
97 | InstanceProfileName: !Join ["-", [!Ref Id, "instance-profile"]]
98 | Path: "/"
99 | Roles:
100 | - !Ref InstanceRole
101 |
102 | Outputs:
103 | InstanceRoleArn:
104 | Value: !GetAtt InstanceRole.Arn
105 | Description: Role ARN
106 |
107 | InstanceProfileArn:
108 | Value: !GetAtt InstanceProfile.Arn
109 | Description: Instance profile ARN
110 |
--------------------------------------------------------------------------------
/blizzardup-aws/cfn-templates/vpc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: "2010-09-09"
3 | Description: "VPC"
4 |
5 | # takes about 6-minute
6 |
7 | # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html
8 | Parameters:
9 | Id:
10 | Type: String
11 | Description: Unique identifier, prefix for all resources created below.
12 |
13 | VpcCidr:
14 | Type: String
15 | Default: 10.0.0.0/16
16 | AllowedPattern: '((\d{1,3})\.){3}\d{1,3}/\d{1,2}'
17 | Description: IP range (CIDR notation) for VPC, must be a valid (RFC 1918) CIDR range (from 10.0.0.0 to 10.0.255.255)
18 |
19 | PublicSubnetCidr1:
20 | Type: String
21 | Default: 10.0.64.0/19
22 | AllowedPattern: '((\d{1,3})\.){3}\d{1,3}/\d{1,2}'
23 | Description: CIDR block for public subnet 1 within the VPC (from 10.0.64.0 to 10.0.95.255)
24 |
25 | PublicSubnetCidr2:
26 | Type: String
27 | Default: 10.0.128.0/19
28 | AllowedPattern: '((\d{1,3})\.){3}\d{1,3}/\d{1,2}'
29 | Description: CIDR block for public subnet 2 within the VPC (from 10.0.128.0 to 10.0.159.255)
30 |
31 | PublicSubnetCidr3:
32 | Type: String
33 | Default: 10.0.192.0/19
34 | AllowedPattern: '((\d{1,3})\.){3}\d{1,3}/\d{1,2}'
35 | Description: CIDR block for public subnet 2 within the VPC (from 10.0.192.0 to 10.0.223.255)
36 |
37 | SshPortIngressIpv4Range:
38 | Type: String
39 | Default: 0.0.0.0/0
40 | AllowedPattern: '((\d{1,3})\.){3}\d{1,3}/\d{1,2}'
41 | Description: IP range for SSH inbound traffic
42 |
43 | Conditions:
44 | Has2Azs:
45 | Fn::Or:
46 | - Fn::Equals:
47 | - { Ref: "AWS::Region" }
48 | - ap-south-1
49 | - Fn::Equals:
50 | - { Ref: "AWS::Region" }
51 | - ap-northeast-2
52 | - Fn::Equals:
53 | - { Ref: "AWS::Region" }
54 | - ca-central-1
55 | - Fn::Equals:
56 | - { Ref: "AWS::Region" }
57 | - cn-north-1
58 | - Fn::Equals:
59 | - { Ref: "AWS::Region" }
60 | - sa-east-1
61 | - Fn::Equals:
62 | - { Ref: "AWS::Region" }
63 | - us-west-1
64 |
65 | HasMoreThan2Azs:
66 | Fn::Not:
67 | - Condition: Has2Azs
68 |
69 | Resources:
70 | InternetGateway:
71 | Type: AWS::EC2::InternetGateway
72 | Properties:
73 | Tags:
74 | - Key: Name
75 | Value: !Join ["-", [!Ref Id, "igw"]]
76 |
77 | VPC:
78 | Type: AWS::EC2::VPC
79 | Properties:
80 | CidrBlock: !Ref VpcCidr
81 | EnableDnsSupport: true
82 | EnableDnsHostnames: true
83 | Tags:
84 | - Key: Name
85 | Value: !Join ["-", [!Ref Id, "vpc"]]
86 |
87 | VPCGatewayAttachment:
88 | Type: AWS::EC2::VPCGatewayAttachment
89 | DependsOn:
90 | - VPC
91 | - InternetGateway
92 | Properties:
93 | InternetGatewayId: !Ref InternetGateway
94 | VpcId: !Ref VPC
95 |
96 | # The instances must be in a subnet with outbound internet access.
97 | # Can be a public subnet with an auto-assigned public ipv4 address.
98 | # Or it can be a private subnet with a NAT Gateway.
99 | PublicSubnet1:
100 | Type: AWS::EC2::Subnet
101 | DependsOn:
102 | - VPC
103 | - VPCGatewayAttachment
104 | Metadata:
105 | Comment: Public Subnet 1
106 | Properties:
107 | AvailabilityZone: !Select [0, !GetAZs ]
108 | CidrBlock: !Ref PublicSubnetCidr1
109 | MapPublicIpOnLaunch: true
110 | VpcId: !Ref VPC
111 | Tags:
112 | - Key: Name
113 | Value: !Join ["-", [!Ref Id, "public-subnet-1"]]
114 | - Key: Network
115 | Value: Public
116 |
117 | PublicSubnet2:
118 | Type: AWS::EC2::Subnet
119 | DependsOn:
120 | - VPC
121 | - VPCGatewayAttachment
122 | Metadata:
123 | Comment: Public Subnet 2
124 | Properties:
125 | AvailabilityZone: !Select [1, !GetAZs ]
126 | CidrBlock: !Ref PublicSubnetCidr2
127 | MapPublicIpOnLaunch: true
128 | VpcId: !Ref VPC
129 | Tags:
130 | - Key: Name
131 | Value: !Join ["-", [!Ref Id, "public-subnet-2"]]
132 | - Key: Network
133 | Value: Public
134 |
135 | PublicSubnet3:
136 | Condition: HasMoreThan2Azs
137 | Type: AWS::EC2::Subnet
138 | DependsOn:
139 | - VPC
140 | - VPCGatewayAttachment
141 | Metadata:
142 | Comment: Public Subnet 3
143 | Properties:
144 | AvailabilityZone: !Select [2, !GetAZs ]
145 | CidrBlock: !Ref PublicSubnetCidr3
146 | MapPublicIpOnLaunch: true
147 | VpcId: !Ref VPC
148 | Tags:
149 | - Key: Name
150 | Value: !Join ["-", [!Ref Id, "public-subnet-3"]]
151 | - Key: Network
152 | Value: Public
153 |
154 | PublicRouteTable:
155 | Type: AWS::EC2::RouteTable
156 | DependsOn:
157 | - VPC
158 | Properties:
159 | VpcId: !Ref VPC
160 | Tags:
161 | - Key: Name
162 | Value: !Join ["-", [!Ref Id, "public-round-table"]]
163 | - Key: Network
164 | Value: Public
165 |
166 | PublicRoute:
167 | Type: AWS::EC2::Route
168 | DependsOn:
169 | - VPC
170 | - VPCGatewayAttachment
171 | Properties:
172 | RouteTableId: !Ref PublicRouteTable
173 | DestinationCidrBlock: 0.0.0.0/0
174 | GatewayId: !Ref InternetGateway
175 |
176 | PublicSubnet1RouteTableAssociation:
177 | Type: AWS::EC2::SubnetRouteTableAssociation
178 | DependsOn:
179 | - VPC
180 | - VPCGatewayAttachment
181 | - PublicSubnet1
182 | Properties:
183 | SubnetId: !Ref PublicSubnet1
184 | RouteTableId: !Ref PublicRouteTable
185 |
186 | PublicSubnet2RouteTableAssociation:
187 | Type: AWS::EC2::SubnetRouteTableAssociation
188 | DependsOn:
189 | - VPC
190 | - VPCGatewayAttachment
191 | - PublicSubnet2
192 | Properties:
193 | SubnetId: !Ref PublicSubnet2
194 | RouteTableId: !Ref PublicRouteTable
195 |
196 | PublicSubnet3RouteTableAssociation:
197 | Condition: HasMoreThan2Azs
198 | Type: AWS::EC2::SubnetRouteTableAssociation
199 | DependsOn:
200 | - VPC
201 | - VPCGatewayAttachment
202 | - PublicSubnet3
203 | Properties:
204 | SubnetId: !Ref PublicSubnet3
205 | RouteTableId: !Ref PublicRouteTable
206 |
207 | # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group.html
208 | SecurityGroup:
209 | Type: AWS::EC2::SecurityGroup
210 | DependsOn:
211 | - VPC
212 | - VPCGatewayAttachment
213 | Properties:
214 | GroupName: !Join ["-", [!Ref Id, "security-group"]]
215 | GroupDescription: Secured communication
216 | VpcId: !Ref VPC
217 |
218 | # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-ingress.html
219 | SshIngress:
220 | Type: AWS::EC2::SecurityGroupIngress
221 | Properties:
222 | GroupId: !Ref SecurityGroup
223 | IpProtocol: tcp
224 | FromPort: 22
225 | ToPort: 22
226 | CidrIp: !Ref SshPortIngressIpv4Range
227 |
228 | # TODO: can this be more strict
229 | # allow all outbound traffic
230 | Egress:
231 | Type: AWS::EC2::SecurityGroupEgress
232 | Properties:
233 | GroupId: !Ref SecurityGroup
234 | IpProtocol: "-1"
235 | FromPort: "1"
236 | ToPort: "65535"
237 | CidrIp: "0.0.0.0/0"
238 |
239 | Outputs:
240 | VpcId:
241 | Description: VPC ID
242 | Value: !Ref VPC
243 |
244 | SecurityGroupId:
245 | Description: Security group ID
246 | Value: !Ref SecurityGroup
247 |
248 | PublicSubnetIds:
249 | Description: All public subnet IDs in the VPC
250 | Value:
251 | Fn::If:
252 | - HasMoreThan2Azs
253 | - !Join [
254 | ",",
255 | [!Ref PublicSubnet1, !Ref PublicSubnet2, !Ref PublicSubnet3],
256 | ]
257 | - !Join [",", [!Ref PublicSubnet1, !Ref PublicSubnet2]]
258 |
--------------------------------------------------------------------------------
/blizzardup-aws/src/aws.rs:
--------------------------------------------------------------------------------
1 | use aws_manager::sts;
2 | use serde::{Deserialize, Serialize};
3 |
4 | /// Represents the current AWS resource status.
5 | #[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
6 | #[serde(rename_all = "snake_case")]
7 | pub struct Resources {
8 | /// AWS STS caller loaded from its local environment.
9 | /// READ ONLY.
10 | #[serde(skip_serializing_if = "Option::is_none")]
11 | pub identity: Option,
12 |
13 | /// AWS region to create resources.
14 | /// MUST BE NON-EMPTY.
15 | #[serde(default)]
16 | pub region: String,
17 |
18 | /// Name of the bucket to store (or download from)
19 | /// the configuration and resources (e.g., S3).
20 | /// If not exists, it creates automatically.
21 | /// If exists, it skips creation and uses the existing one.
22 | /// MUST BE NON-EMPTY.
23 | #[serde(default)]
24 | pub s3_bucket: String,
25 |
26 | /// EC2 key pair name for SSH access to EC2 instances.
27 | /// READ ONLY -- DO NOT SET.
28 | #[serde(skip_serializing_if = "Option::is_none")]
29 | pub ec2_key_name: Option,
30 | /// Only updated after creation.
31 | /// READ ONLY -- DO NOT SET.
32 | #[serde(skip_serializing_if = "Option::is_none")]
33 | pub ec2_key_path: Option,
34 |
35 | /// CloudFormation stack name for EC2 instance role.
36 | /// READ ONLY -- DO NOT SET.
37 | #[serde(skip_serializing_if = "Option::is_none")]
38 | pub cloudformation_ec2_instance_role: Option,
39 | /// Instance profile ARN from "cloudformation_ec2_instance_role".
40 | /// Only updated after creation.
41 | /// READ ONLY -- DO NOT SET.
42 | #[serde(skip_serializing_if = "Option::is_none")]
43 | pub cloudformation_ec2_instance_profile_arn: Option,
44 |
45 | /// CloudFormation stack name for VPC.
46 | /// READ ONLY -- DO NOT SET.
47 | #[serde(skip_serializing_if = "Option::is_none")]
48 | pub cloudformation_vpc: Option,
49 | /// VPC ID from "cloudformation_vpc".
50 | /// Only updated after creation.
51 | /// READ ONLY -- DO NOT SET.
52 | #[serde(skip_serializing_if = "Option::is_none")]
53 | pub cloudformation_vpc_id: Option,
54 | /// Security group ID from "cloudformation_vpc".
55 | /// Only updated after creation.
56 | /// READ ONLY -- DO NOT SET.
57 | #[serde(skip_serializing_if = "Option::is_none")]
58 | pub cloudformation_vpc_security_group_id: Option,
59 | /// Public subnet IDs from "cloudformation_vpc".
60 | /// Only updated after creation.
61 | /// READ ONLY -- DO NOT SET.
62 | #[serde(skip_serializing_if = "Option::is_none")]
63 | pub cloudformation_vpc_public_subnet_ids: Option>,
64 |
65 | /// CloudFormation stack name of Auto Scaling Group (ASG)
66 | /// for anchor nodes.
67 | /// None if mainnet.
68 | /// READ ONLY -- DO NOT SET.
69 | #[serde(skip_serializing_if = "Option::is_none")]
70 | pub cloudformation_asg_blizzards: Option,
71 | /// Only updated after creation.
72 | /// READ ONLY -- DO NOT SET.
73 | #[serde(skip_serializing_if = "Option::is_none")]
74 | pub cloudformation_asg_blizzards_logical_id: Option,
75 | }
76 |
77 | impl Default for Resources {
78 | fn default() -> Self {
79 | Self::default()
80 | }
81 | }
82 |
83 | impl Resources {
84 | pub fn default() -> Self {
85 | Self {
86 | identity: None,
87 |
88 | region: String::from("us-west-2"),
89 |
90 | s3_bucket: String::new(),
91 |
92 | ec2_key_name: None,
93 | ec2_key_path: None,
94 |
95 | cloudformation_ec2_instance_role: None,
96 | cloudformation_ec2_instance_profile_arn: None,
97 |
98 | cloudformation_vpc: None,
99 | cloudformation_vpc_id: None,
100 | cloudformation_vpc_security_group_id: None,
101 | cloudformation_vpc_public_subnet_ids: None,
102 |
103 | cloudformation_asg_blizzards: None,
104 | cloudformation_asg_blizzards_logical_id: None,
105 | }
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/blizzardup-aws/src/blizzard.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | /// Defines flag options.
4 | #[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
5 | #[serde(rename_all = "snake_case")]
6 | pub struct Spec {
7 | pub log_level: String,
8 |
9 | pub chain_rpc_urls: Vec,
10 | pub load_kinds: Vec,
11 | pub keys_to_generate: usize,
12 |
13 | pub workers: usize,
14 | }
15 |
16 | /// Defines the node type.
17 | #[derive(
18 | std::clone::Clone,
19 | std::cmp::Eq,
20 | std::cmp::Ord,
21 | std::cmp::PartialEq,
22 | std::cmp::PartialOrd,
23 | std::fmt::Debug,
24 | std::hash::Hash,
25 | )]
26 | pub enum LoadKind {
27 | XTransfers,
28 | EvmTransfers,
29 | Unknown(String),
30 | }
31 |
32 | impl std::convert::From<&str> for LoadKind {
33 | fn from(s: &str) -> Self {
34 | match s {
35 | "x-transfers" => LoadKind::XTransfers,
36 | "evm-transfers" => LoadKind::EvmTransfers,
37 |
38 | other => LoadKind::Unknown(other.to_owned()),
39 | }
40 | }
41 | }
42 |
43 | impl std::str::FromStr for LoadKind {
44 | type Err = std::convert::Infallible;
45 |
46 | fn from_str(s: &str) -> std::result::Result {
47 | Ok(LoadKind::from(s))
48 | }
49 | }
50 |
51 | impl LoadKind {
52 | /// Returns the `&str` value of the enum member.
53 | pub fn as_str(&self) -> &str {
54 | match self {
55 | LoadKind::XTransfers => "x-transfers",
56 | LoadKind::EvmTransfers => "evm-transfers",
57 |
58 | LoadKind::Unknown(s) => s.as_ref(),
59 | }
60 | }
61 |
62 | /// Returns all the `&str` values of the enum members.
63 | pub fn values() -> &'static [&'static str] {
64 | &[
65 | "x-transfers", //
66 | "evm-transfers", //
67 | ]
68 | }
69 | }
70 |
71 | impl AsRef for LoadKind {
72 | fn as_ref(&self) -> &str {
73 | self.as_str()
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/blizzardup-aws/src/default_spec/mod.rs:
--------------------------------------------------------------------------------
1 | use std::io::{self, stdout};
2 |
3 | use clap::{value_parser, Arg, Command};
4 | use crossterm::{
5 | execute,
6 | style::{Color, Print, ResetColor, SetForegroundColor},
7 | };
8 |
9 | pub const NAME: &str = "default-spec";
10 |
11 | pub fn command() -> Command {
12 | Command::new(NAME)
13 | .about("Writes a default configuration")
14 | .arg(
15 | Arg::new("LOG_LEVEL")
16 | .long("log-level")
17 | .short('l')
18 | .help("Sets the log level")
19 | .required(false)
20 | .num_args(1)
21 | .value_parser(["debug", "info"])
22 | .default_value("info"),
23 | )
24 | .arg(
25 | Arg::new("FUNDED_KEYS")
26 | .long("funded-keys")
27 | .help("Sets the number of pre-funded keys to load from avalanche-types TEST_KEYS")
28 | .required(false)
29 | .num_args(1)
30 | .value_parser(value_parser!(usize))
31 | .default_value("50"),
32 | )
33 | .arg(
34 | Arg::new("REGION")
35 | .long("region")
36 | .short('r')
37 | .help("Sets the AWS region for API calls/endpoints")
38 | .required(true)
39 | .num_args(1)
40 | .default_value("us-west-2"),
41 | )
42 | .arg(
43 | Arg::new("INSTANCE_MODE")
44 | .long("instance-mode")
45 | .help("Sets instance mode")
46 | .required(false)
47 | .num_args(1)
48 | .value_parser(["spot", "on-demand"])
49 | .default_value("spot"),
50 | )
51 | .arg(
52 | Arg::new("NODES")
53 | .long("nodes")
54 | .help("Sets the number of blizzards nodes to create")
55 | .required(false)
56 | .num_args(1)
57 | .value_parser(value_parser!(usize))
58 | .default_value("10"),
59 | )
60 | .arg(
61 | Arg::new("UPLOAD_ARTIFACTS_BLIZZARD_BIN")
62 | .long("upload-artifacts-blizzard-bin")
63 | .help("Sets the Blizzard binary path in the local machine to be shared with remote machines (if empty, it downloads the latest from github)")
64 | .required(false)
65 | .num_args(1),
66 | )
67 | .arg(
68 | Arg::new("BLIZZARD_LOG_LEVEL")
69 | .long("blizzard-log-level")
70 | .help("Sets the log level for 'blizzard'")
71 | .required(false)
72 | .num_args(1)
73 | .value_parser(["debug", "info"])
74 | .default_value("info"),
75 | )
76 | .arg(
77 | Arg::new("BLIZZARD_CHAIN_RPC_URLS")
78 | .long("blizzard-chain-rpc-urls")
79 | .help("Comma-separated chain RPC URLs")
80 | .required(false)
81 | .num_args(1)
82 | .default_value("http://localhost:9650/ext/C/rpc"),
83 | )
84 | .arg(
85 | Arg::new("BLIZZARD_LOAD_KINDS")
86 | .long("blizzard-load-kinds")
87 | .help("Comma-separated 'blizzard' load kinds (e.g., x-transfers,evm-transfers)")
88 | .required(false)
89 | .num_args(1)
90 | .default_value("x-transfers,evm-transfers"),
91 | )
92 | .arg(
93 | Arg::new("BLIZZARD_KEYS_TO_GENERATE")
94 | .long("blizzard-keys-to-generate")
95 | .help("Number of keys to generate per each blizzard agent")
96 | .required(false)
97 | .num_args(1)
98 | .value_parser(value_parser!(usize))
99 | .default_value("100"),
100 | )
101 | .arg(
102 | Arg::new("BLIZZARD_WORKERS")
103 | .long("blizzard-workers")
104 | .help("Sets the number of concurrent blizzard workers to launch on the host")
105 | .required(false)
106 | .num_args(1)
107 | .value_parser(value_parser!(usize))
108 | .default_value("30"),
109 | )
110 | .arg(
111 | Arg::new("SPEC_FILE_PATH")
112 | .long("spec-file-path")
113 | .short('s')
114 | .help("The config file to create")
115 | .required(false)
116 | .num_args(1),
117 | )
118 | }
119 |
120 | pub fn execute(opts: blizzardup_aws::DefaultSpecOption) -> io::Result<()> {
121 | // ref.
122 | env_logger::init_from_env(
123 | env_logger::Env::default()
124 | .filter_or(env_logger::DEFAULT_FILTER_ENV, opts.clone().log_level),
125 | );
126 |
127 | let spec = blizzardup_aws::Spec::default_aws(opts.clone());
128 | spec.validate()?;
129 |
130 | let spec_file_path = {
131 | if opts.spec_file_path.is_empty() {
132 | dir_manager::home::named(&spec.id, Some(".yaml"))
133 | } else {
134 | opts.spec_file_path
135 | }
136 | };
137 | spec.sync(&spec_file_path)?;
138 |
139 | execute!(
140 | stdout(),
141 | SetForegroundColor(Color::Blue),
142 | Print(format!("\nSaved spec: '{}'\n", spec_file_path)),
143 | ResetColor
144 | )?;
145 | let spec_contents = spec.encode_yaml().expect("failed spec.encode_yaml");
146 | println!("{}", spec_contents);
147 |
148 | println!();
149 | println!("# run the following to create resources");
150 | execute!(
151 | stdout(),
152 | SetForegroundColor(Color::Magenta),
153 | Print(format!("vi {}\n\n", spec_file_path)),
154 | ResetColor
155 | )?;
156 | let exec_path = std::env::current_exe().expect("unexpected None current_exe");
157 | execute!(
158 | stdout(),
159 | SetForegroundColor(Color::Green),
160 | Print(format!(
161 | "{} apply \\\n--spec-file-path {}\n",
162 | exec_path.display(),
163 | spec_file_path
164 | )),
165 | ResetColor
166 | )?;
167 | println!();
168 | println!("# run the following to delete resources");
169 | execute!(
170 | stdout(),
171 | SetForegroundColor(Color::Green),
172 | Print(format!(
173 | "{} delete \\\n--delete-cloudwatch-log-group \\\n--delete-s3-objects \\\n--spec-file-path {}\n\n",
174 | exec_path.display(),
175 | spec_file_path
176 | )),
177 | ResetColor
178 | )?;
179 |
180 | Ok(())
181 | }
182 |
--------------------------------------------------------------------------------
/blizzardup-aws/src/main.rs:
--------------------------------------------------------------------------------
1 | mod apply;
2 | mod default_spec;
3 | mod delete;
4 | mod query;
5 |
6 | use clap::{crate_version, Command};
7 |
8 | const APP_NAME: &str = "blizzardup-aws";
9 |
10 | /// Should be able to run with idempotency
11 | /// (e.g., multiple restarts should not recreate the same CloudFormation stacks)
12 | #[tokio::main]
13 | async fn main() {
14 | let matches = Command::new(APP_NAME)
15 | .version(crate_version!())
16 | .about("Blizzard control plane on AWS (requires blizzard)")
17 | .subcommands(vec![
18 | default_spec::command(),
19 | apply::command(),
20 | delete::command(),
21 | query::command(),
22 | ])
23 | .get_matches();
24 |
25 | match matches.subcommand() {
26 | Some((default_spec::NAME, sub_matches)) => {
27 | let funded_keys = *sub_matches.get_one::("FUNDED_KEYS").unwrap_or(&5);
28 | let blizzard_keys_to_generate = *sub_matches
29 | .get_one::("BLIZZARD_KEYS_TO_GENERATE")
30 | .unwrap_or(&5);
31 |
32 | let nodes = *sub_matches.get_one::("NODES").unwrap_or(&2);
33 |
34 | let s = sub_matches
35 | .get_one::("BLIZZARD_CHAIN_RPC_URLS")
36 | .unwrap()
37 | .clone();
38 | let ss: Vec<&str> = s.split(',').collect();
39 | let mut blizzard_chain_rpc_urls: Vec = Vec::new();
40 | for rpc in ss.iter() {
41 | let trimmed = rpc.trim().to_string();
42 | if !trimmed.is_empty() {
43 | blizzard_chain_rpc_urls.push(trimmed);
44 | }
45 | }
46 |
47 | let blizzard_load_kinds_str = sub_matches
48 | .get_one::("BLIZZARD_LOAD_KINDS")
49 | .unwrap()
50 | .clone();
51 | let blizzard_load_kinds_str: Vec<&str> = blizzard_load_kinds_str.split(',').collect();
52 | let mut blizzard_load_kinds: Vec = Vec::new();
53 | for lk in blizzard_load_kinds_str.iter() {
54 | blizzard_load_kinds.push(lk.to_string());
55 | }
56 |
57 | let blizzard_workers = *sub_matches
58 | .get_one::("BLIZZARD_WORKERS")
59 | .unwrap_or(&5);
60 |
61 | let opt = blizzardup_aws::DefaultSpecOption {
62 | log_level: sub_matches
63 | .get_one::("LOG_LEVEL")
64 | .unwrap_or(&String::from("info"))
65 | .clone(),
66 |
67 | funded_keys,
68 |
69 | region: sub_matches.get_one::("REGION").unwrap().clone(),
70 | instance_mode: sub_matches
71 | .get_one::("INSTANCE_MODE")
72 | .unwrap()
73 | .clone(),
74 |
75 | nodes,
76 |
77 | upload_artifacts_blizzard_bin: sub_matches
78 | .get_one::("UPLOAD_ARTIFACTS_BLIZZARD_BIN")
79 | .unwrap_or(&String::new())
80 | .to_string(),
81 | blizzard_log_level: sub_matches
82 | .get_one::("BLIZZARD_LOG_LEVEL")
83 | .unwrap_or(&String::from("info"))
84 | .to_string(),
85 | blizzard_chain_rpc_urls,
86 | blizzard_load_kinds,
87 | blizzard_keys_to_generate,
88 | blizzard_workers,
89 |
90 | spec_file_path: sub_matches
91 | .get_one::("SPEC_FILE_PATH")
92 | .unwrap_or(&String::new())
93 | .clone(),
94 | };
95 | default_spec::execute(opt).expect("failed to execute 'default-spec'");
96 | }
97 |
98 | Some((apply::NAME, sub_matches)) => {
99 | apply::execute(
100 | &sub_matches
101 | .get_one::("LOG_LEVEL")
102 | .unwrap_or(&String::from("info"))
103 | .clone(),
104 | &sub_matches
105 | .get_one::("SPEC_FILE_PATH")
106 | .unwrap()
107 | .clone(),
108 | sub_matches.get_flag("SKIP_PROMPT"),
109 | sub_matches
110 | .get_one::("PROFILE_NAME")
111 | .unwrap()
112 | .clone(),
113 | )
114 | .await
115 | .expect("failed to execute 'apply'");
116 | }
117 |
118 | Some((delete::NAME, sub_matches)) => {
119 | delete::execute(
120 | &sub_matches
121 | .get_one::("LOG_LEVEL")
122 | .unwrap_or(&String::from("info"))
123 | .clone(),
124 | &sub_matches
125 | .get_one::("SPEC_FILE_PATH")
126 | .unwrap()
127 | .clone(),
128 | sub_matches.get_flag("DELETE_CLOUDWATCH_LOG_GROUP"),
129 | sub_matches.get_flag("DELETE_S3_OBJECTS"),
130 | sub_matches.get_flag("DELETE_S3_BUCKET"),
131 | sub_matches.get_flag("SKIP_PROMPT"),
132 | sub_matches
133 | .get_one::("PROFILE_NAME")
134 | .unwrap()
135 | .clone(),
136 | )
137 | .await
138 | .expect("failed to execute 'delete'");
139 | }
140 |
141 | Some((query::NAME, sub_matches)) => {
142 | query::execute(
143 | &sub_matches
144 | .get_one::("LOG_LEVEL")
145 | .unwrap_or(&String::from("info"))
146 | .clone(),
147 | &sub_matches
148 | .get_one::("SPEC_FILE_PATH")
149 | .unwrap()
150 | .clone(),
151 | )
152 | .await
153 | .expect("failed to execute 'delete'");
154 | }
155 |
156 | _ => unreachable!("unknown subcommand"),
157 | }
158 | }
159 |
--------------------------------------------------------------------------------
/blizzardup-aws/src/status.rs:
--------------------------------------------------------------------------------
1 | use primitive_types::U256;
2 | use serde::{Deserialize, Serialize};
3 |
4 | #[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
5 | #[serde(rename_all = "snake_case")]
6 | pub struct Status {
7 | pub network_id: u32,
8 | pub chain_id: U256,
9 | }
10 |
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/.gitignore:
--------------------------------------------------------------------------------
1 | *.js
2 | !jest.config.js
3 | *.d.ts
4 | node_modules
5 |
6 | # CDK asset staging directory
7 | .cdk.staging
8 | cdk.out
9 |
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/.npmignore:
--------------------------------------------------------------------------------
1 | *.ts
2 | !*.d.ts
3 |
4 | # CDK asset staging directory
5 | .cdk.staging
6 | cdk.out
7 |
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/bin/avalancheup-aws.ts:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 | import 'source-map-support/register';
3 | import * as path from 'path';
4 | import * as cdk from 'aws-cdk-lib';
5 | import * as cfn_include from 'aws-cdk-lib/cloudformation-include';
6 | // import * as cfn_ec2 from 'aws-cdk-lib/aws-ec2';
7 |
8 | export class AvalancheupInstanceRoleStack extends cdk.Stack {
9 | public readonly instanceRoleArn: cdk.CfnOutput;
10 | public readonly instanceProfileArn: cdk.CfnOutput;
11 |
12 | constructor(scope: cdk.App, id: string, props: cdk.StackProps) {
13 | super(scope, id, props);
14 | console.log("CDK_ACCOUNT:", process.env.CDK_ACCOUNT);
15 | console.log("CDK_REGION:", process.env.CDK_REGION);
16 |
17 | // ref. https://docs.aws.amazon.com/cdk/api/v1/docs/cloudformation-include-readme.html#non-resource-template-elements
18 | const tmplAsg = new cfn_include.CfnInclude(this, `included-template-instance-role-${process.env.CLUSTER_ID || ''}`, {
19 | templateFile: path.join('..', '..', 'avalancheup', 'src', 'aws', 'cfn-templates', 'ec2_instance_role.yaml'),
20 | });
21 |
22 | // mutate default parameters
23 | const paramId: cdk.CfnParameter = tmplAsg.getParameter('Id');
24 | paramId.default = process.env.ID;
25 |
26 | const paramKmsKeyArn: cdk.CfnParameter = tmplAsg.getParameter('KmsKeyArn');
27 | paramKmsKeyArn.default = process.env.KMS_KEY_ARN;
28 |
29 | const paramS3BucketName: cdk.CfnParameter = tmplAsg.getParameter('S3BucketName');
30 | paramS3BucketName.default = process.env.S3_BUCKET_NAME;
31 |
32 | this.instanceRoleArn = tmplAsg.getOutput('InstanceRoleArn');
33 | this.instanceProfileArn = tmplAsg.getOutput('InstanceProfileArn');
34 | }
35 | }
36 |
37 | export class AvalancheupInstanceVpcStack extends cdk.Stack {
38 | public readonly vpcId: cdk.CfnOutput;
39 | public readonly securityGroupId: cdk.CfnOutput;
40 | public readonly publicSubnetIds: cdk.CfnOutput;
41 |
42 | constructor(scope: cdk.App, id: string, props: cdk.StackProps) {
43 | super(scope, id, props);
44 |
45 | // ref. https://docs.aws.amazon.com/cdk/api/v1/docs/cloudformation-include-readme.html#non-resource-template-elements
46 | const tmplVpc = new cfn_include.CfnInclude(this, `included-template-vpc-${process.env.CLUSTER_ID || ''}`, {
47 | templateFile: path.join('..', '..', 'avalancheup', 'src', 'aws', 'cfn-templates', 'vpc.yaml'),
48 | });
49 |
50 | // mutate default parameters
51 | const paramId: cdk.CfnParameter = tmplVpc.getParameter('Id');
52 | paramId.default = process.env.ID;
53 |
54 | this.vpcId = tmplVpc.getOutput('VpcId');
55 | this.securityGroupId = tmplVpc.getOutput('SecurityGroupId');
56 | this.publicSubnetIds = tmplVpc.getOutput('PublicSubnetIds');
57 | }
58 | }
59 |
60 | interface AvalancheupAsgProps extends cdk.StackProps {
61 | instanceRoleArn: String;
62 | instanceProfileArn: String;
63 | vpcId: String;
64 | securityGroupId: String;
65 | publicSubnetIds: String;
66 | }
67 |
68 | export class AvalancheupInstanceAsgStack extends cdk.Stack {
69 | public readonly asgLogicalId: cdk.CfnOutput;
70 |
71 | constructor(scope: cdk.App, id: string, props: AvalancheupAsgProps) {
72 | super(scope, id, props);
73 |
74 | // ref. https://docs.aws.amazon.com/cdk/api/v1/docs/cloudformation-include-readme.html#non-resource-template-elements
75 | const tmplAsg = new cfn_include.CfnInclude(this, `included-template-asg-${process.env.CLUSTER_ID || ''}`, {
76 | templateFile: path.join('..', '..', 'avalancheup', 'src', 'aws', 'cfn-templates', 'asg_ubuntu.yaml'),
77 | });
78 |
79 | // mutate default parameters
80 | const paramId: cdk.CfnParameter = tmplAsg.getParameter('Id');
81 | paramId.default = process.env.ID;
82 |
83 | const paramKmsKeyArn: cdk.CfnParameter = tmplAsg.getParameter('KmsKeyArn');
84 | paramKmsKeyArn.default = process.env.KMS_KEY_ARN;
85 |
86 | const paramS3BucketName: cdk.CfnParameter = tmplAsg.getParameter('S3BucketName');
87 | paramS3BucketName.default = process.env.S3_BUCKET_NAME;
88 |
89 | const paramEc2KeyPairName: cdk.CfnParameter = tmplAsg.getParameter('Ec2KeyPairName');
90 | paramEc2KeyPairName.default = process.env.EC2_KEY_PAIR_NAME;
91 |
92 | const paramAadTag: cdk.CfnParameter = tmplAsg.getParameter('AadTag');
93 | paramAadTag.default = process.env.AAD_TAG;
94 |
95 | const paramInstanceProfileArn: cdk.CfnParameter = tmplAsg.getParameter('InstanceProfileArn');
96 | paramInstanceProfileArn.default = process.env.INSTANCE_PROFILE_ARN;
97 | // TODO: not working...
98 | // paramInstanceProfileArn.default = props.instanceProfileArn.toString();
99 |
100 | const paramPublicSubnetIds: cdk.CfnParameter = tmplAsg.getParameter('PublicSubnetIds');
101 | paramPublicSubnetIds.default = process.env.PUBLIC_SUBNET_IDS;
102 | // TODO: not working...
103 | // paramPublicSubnetIds.default = props.publicSubnetIds.toString();
104 |
105 | const paramSecurityGroupId: cdk.CfnParameter = tmplAsg.getParameter('SecurityGroupId');
106 | paramSecurityGroupId.default = process.env.SECURITY_GROUP_ID;
107 | // TODO: not working...
108 | // paramSecurityGroupId.default = props.securityGroupId.toString();
109 |
110 | // only support non-anchor nodes for now...
111 | const paramNodeKind: cdk.CfnParameter = tmplAsg.getParameter('NodeKind');
112 | paramNodeKind.default = 'non-anchor';
113 |
114 | const paramAsgName: cdk.CfnParameter = tmplAsg.getParameter('AsgName');
115 | paramAsgName.default = process.env.ID + '-non-anchor-amd64';
116 |
117 | // since we don't use S3 to upload avalanched binary
118 | const paramAvalanchedAwsDownloadSource: cdk.CfnParameter = tmplAsg.getParameter('AvalanchedAwsDownloadSource');
119 | paramAvalanchedAwsDownloadSource.default = 'github';
120 |
121 | // to skip s3 uploads for node discovery
122 | const paramAvalanchedAwsArgs: cdk.CfnParameter = tmplAsg.getParameter('AvalanchedAwsArgs');
123 | paramAvalanchedAwsArgs.default = 'agent --use-default-config';
124 |
125 | // "mainnet" is 1, "fuji" is 5
126 | const paramNetworkId: cdk.CfnParameter = tmplAsg.getParameter('NetworkId');
127 | paramNetworkId.default = process.env.NETWORK_ID;
128 |
129 | const paramNlbVpcId: cdk.CfnParameter = tmplAsg.getParameter('NlbVpcId');
130 | paramNlbVpcId.default = process.env.NLB_VPC_ID;
131 |
132 | // TODO: "InstanceMode=spot" and "OnDemandPercentageAboveBaseCapacity=0" for spot instance...
133 |
134 | this.asgLogicalId = tmplAsg.getOutput('AsgLogicalId');
135 | }
136 | }
137 |
138 | const app = new cdk.App();
139 |
140 | const instanceRoleStack = new AvalancheupInstanceRoleStack(app, 'avalancheup-aws-instance-role-stack',
141 | {
142 | stackName: 'avalancheup-aws-instance-role-stack',
143 | env: {
144 | account: process.env.CDK_ACCOUNT || process.env.CDK_DEFAULT_ACCOUNT,
145 | region: process.env.CDK_REGION || process.env.CDK_DEFAULT_REGION
146 | },
147 | }
148 | );
149 |
150 | const vpcStack = new AvalancheupInstanceVpcStack(app, 'avalancheup-aws-vpc-stack',
151 | {
152 | stackName: 'avalancheup-aws-vpc-stack',
153 | env: {
154 | account: process.env.CDK_ACCOUNT || process.env.CDK_DEFAULT_ACCOUNT,
155 | region: process.env.CDK_REGION || process.env.CDK_DEFAULT_REGION
156 | },
157 | }
158 | );
159 |
160 | const asgStack = new AvalancheupInstanceAsgStack(app, 'avalancheup-aws-asg-stack',
161 | {
162 | stackName: 'avalancheup-aws-asg-stack',
163 | env: {
164 | account: process.env.CDK_ACCOUNT || process.env.CDK_DEFAULT_ACCOUNT,
165 | region: process.env.CDK_REGION || process.env.CDK_DEFAULT_REGION
166 | },
167 | instanceRoleArn: instanceRoleStack.instanceRoleArn.value.toString(),
168 | instanceProfileArn: instanceRoleStack.instanceProfileArn.value.toString(),
169 | vpcId: vpcStack.vpcId.value.toString(),
170 | securityGroupId: vpcStack.securityGroupId.value.toString(),
171 | publicSubnetIds: vpcStack.publicSubnetIds.value.toString(),
172 | }
173 | );
174 | // asgStack.node.addDependency([instanceRoleStack, vpcStack]);
175 |
176 | app.synth();
177 |
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/cdk-avalanche-ops.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/cdk/avalancheup-aws/cdk-avalanche-ops.png
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/cdk.json:
--------------------------------------------------------------------------------
1 | {
2 | "app": "npx ts-node --prefer-ts-exts bin/avalancheup-aws.ts",
3 | "watch": {
4 | "include": [
5 | "**"
6 | ],
7 | "exclude": [
8 | "README.md",
9 | "cdk*.json",
10 | "**/*.d.ts",
11 | "**/*.js",
12 | "tsconfig.json",
13 | "package*.json",
14 | "yarn.lock",
15 | "node_modules",
16 | "test"
17 | ]
18 | },
19 | "context": {
20 | "@aws-cdk/aws-apigateway:usagePlanKeyOrderInsensitiveId": true,
21 | "@aws-cdk/core:stackRelativeExports": true,
22 | "@aws-cdk/aws-rds:lowercaseDbIdentifier": true,
23 | "@aws-cdk/aws-lambda:recognizeVersionProps": true,
24 | "@aws-cdk/aws-lambda:recognizeLayerVersion": true,
25 | "@aws-cdk/aws-cloudfront:defaultSecurityPolicyTLSv1.2_2021": true,
26 | "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true,
27 | "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true,
28 | "@aws-cdk/core:checkSecretUsage": true,
29 | "@aws-cdk/aws-iam:minimizePolicies": true,
30 | "@aws-cdk/core:validateSnapshotRemovalPolicy": true,
31 | "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true,
32 | "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true,
33 | "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true,
34 | "@aws-cdk/core:target-partitions": [
35 | "aws",
36 | "aws-cn"
37 | ]
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/img/demo1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/cdk/avalancheup-aws/img/demo1.png
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/img/demo10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/cdk/avalancheup-aws/img/demo10.png
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/img/demo2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/cdk/avalancheup-aws/img/demo2.png
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/img/demo3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/cdk/avalancheup-aws/img/demo3.png
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/img/demo4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/cdk/avalancheup-aws/img/demo4.png
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/img/demo5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/cdk/avalancheup-aws/img/demo5.png
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/img/demo6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/cdk/avalancheup-aws/img/demo6.png
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/img/demo7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/cdk/avalancheup-aws/img/demo7.png
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/img/demo8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/cdk/avalancheup-aws/img/demo8.png
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/img/demo9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/cdk/avalancheup-aws/img/demo9.png
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/jest.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | testEnvironment: 'node',
3 | roots: ['/test'],
4 | testMatch: ['**/*.test.ts'],
5 | transform: {
6 | '^.+\\.tsx?$': 'ts-jest'
7 | }
8 | };
9 |
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/lib/avalancheup-aws-stack.ts:
--------------------------------------------------------------------------------
1 | import { Stack, StackProps } from 'aws-cdk-lib';
2 | import { Construct } from 'constructs';
3 | // import * as sqs from 'aws-cdk-lib/aws-sqs';
4 |
5 | export class AvalancheupAwsStack extends Stack {
6 | constructor(scope: Construct, id: string, props?: StackProps) {
7 | super(scope, id, props);
8 |
9 | // The code that defines your stack goes here
10 |
11 | // example resource
12 | // const queue = new sqs.Queue(this, 'AvalancheupAwsQueue', {
13 | // visibilityTimeout: cdk.Duration.seconds(300)
14 | // });
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "avalancheup-aws",
3 | "version": "0.1.0",
4 | "bin": {
5 | "avalancheup-aws": "bin/avalancheup-aws.js"
6 | },
7 | "scripts": {
8 | "build": "tsc",
9 | "watch": "tsc -w",
10 | "test": "jest",
11 | "cdk": "cdk"
12 | },
13 | "devDependencies": {
14 | "@types/jest": "^27.5.2",
15 | "@types/node": "10.17.27",
16 | "@types/prettier": "2.6.0",
17 | "jest": "^27.5.1",
18 | "ts-jest": "^27.1.4",
19 | "aws-cdk": "2.34.2",
20 | "ts-node": "^10.9.1",
21 | "typescript": "~3.9.7"
22 | },
23 | "dependencies": {
24 | "aws-cdk-lib": "2.34.2",
25 | "constructs": "^10.0.0",
26 | "source-map-support": "^0.5.21"
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/test/avalancheup-aws.test.ts:
--------------------------------------------------------------------------------
1 | // import * as cdk from 'aws-cdk-lib';
2 | // import { Template } from 'aws-cdk-lib/assertions';
3 | // import * as AvalancheupAws from '../lib/avalancheup-aws-stack';
4 |
5 | // example test. To run these tests, uncomment this file along with the
6 | // example resource in lib/avalancheup-aws-stack.ts
7 | test('SQS Queue Created', () => {
8 | // const app = new cdk.App();
9 | // // WHEN
10 | // const stack = new AvalancheupAws.AvalancheupAwsStack(app, 'MyTestStack');
11 | // // THEN
12 | // const template = Template.fromStack(stack);
13 |
14 | // template.hasResourceProperties('AWS::SQS::Queue', {
15 | // VisibilityTimeout: 300
16 | // });
17 | });
18 |
--------------------------------------------------------------------------------
/cdk/avalancheup-aws/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ES2018",
4 | "module": "commonjs",
5 | "lib": [
6 | "es2018"
7 | ],
8 | "declaration": true,
9 | "strict": true,
10 | "noImplicitAny": true,
11 | "strictNullChecks": true,
12 | "noImplicitThis": true,
13 | "alwaysStrict": true,
14 | "noUnusedLocals": false,
15 | "noUnusedParameters": false,
16 | "noImplicitReturns": true,
17 | "noFallthroughCasesInSwitch": false,
18 | "inlineSourceMap": true,
19 | "inlineSources": true,
20 | "experimentalDecorators": true,
21 | "strictPropertyInitialization": false,
22 | "typeRoots": [
23 | "./node_modules/@types"
24 | ]
25 | },
26 | "exclude": [
27 | "node_modules",
28 | "cdk.out"
29 | ]
30 | }
31 |
--------------------------------------------------------------------------------
/devnet-faucet/.gitignore:
--------------------------------------------------------------------------------
1 | /dist
--------------------------------------------------------------------------------
/devnet-faucet/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "devnet-faucet"
3 | version = "1.0.0" # https://github.com/ava-labs/avalanche-ops/releases
4 | edition = "2021"
5 | license = "MIT OR Apache-2.0"
6 |
7 | [[bin]]
8 | name = "devnet-faucet"
9 | path = "src/main.rs"
10 |
11 | [dependencies]
12 | avalanche-types = { version = "0.1.4", features = ["evm", "jsonrpc_client", "wallet", "wallet_evm"] } # https://crates.io/crates/avalanche-types
13 | bytes = "1.4.0"
14 | clap = { version = "4.4.0", features = ["cargo", "derive"] } # https://github.com/clap-rs/clap/releases
15 | env_logger = "0.10.0"
16 | ethers = { version = "=2.0.7" }
17 | ethers-core = { version = "=2.0.7", features = ["eip712"] }
18 | ethers-providers = { version = "=2.0.7" }
19 | ethers-signers = { version = "=2.0.7" }
20 | futures-util = { version = "0.3", default-features = false, features = ["sink"] }
21 | governor = "0.6.0"
22 | log = "0.4.20"
23 | nonzero_ext = "0.3.0"
24 | primitive-types = "0.12.1" # https://crates.io/crates/primitive-types
25 | random-manager = "0.0.5"
26 | serde = { version = "1.0.186", features = ["derive"] } # https://github.com/serde-rs/serde/releases
27 | serde_json = "1.0.105" # https://github.com/serde-rs/json/releases
28 | serde_with = { version = "3.2.0", features = ["hex"] }
29 | serde_yaml = "0.9.25" # https://github.com/dtolnay/serde-yaml/releases
30 | tokio = { version = "1.32.0", features = ["full"] }
31 | tokio-stream = "0.1.1"
32 | warp = "0.3.5"
33 |
34 | [dev-dependencies]
35 | tempfile = "3.8.0"
36 |
--------------------------------------------------------------------------------
/devnet-faucet/README.md:
--------------------------------------------------------------------------------
1 |
2 | # DEVNET faucet UI
3 |
4 | 
5 |
--------------------------------------------------------------------------------
/devnet-faucet/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ava-labs/avalanche-ops/9c2b4ed994cced63eb59f3239ffbead45f9dbb57/devnet-faucet/demo.png
--------------------------------------------------------------------------------
/devnet-faucet/src/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Example
3 |
4 | ```bash
5 | cargo build \
6 | --release \
7 | --bin devnet-faucet
8 | ```
9 |
10 | ```bash
11 | cat > /tmp/devnet-faucet.keys <,
9 | pub keys_file: String,
10 | }
11 |
--------------------------------------------------------------------------------
/devnet-faucet/src/main.rs:
--------------------------------------------------------------------------------
1 | mod command;
2 | mod flags;
3 |
4 | use std::net::SocketAddr;
5 |
6 | use clap::{crate_version, Arg, Command};
7 |
8 | pub const APP_NAME: &str = "devnet-faucet";
9 |
10 | /// ref.
11 | #[tokio::main]
12 | async fn main() {
13 | let matches = Command::new(APP_NAME)
14 | .version(crate_version!())
15 | .about("Runs a gasless voting demo")
16 | .arg(
17 | Arg::new("LOG_LEVEL")
18 | .long("log-level")
19 | .short('l')
20 | .help("Sets the log level")
21 | .required(false)
22 | .num_args(1)
23 | .value_parser(["debug", "info"])
24 | .default_value("info"),
25 | )
26 | .arg(
27 | Arg::new("HTTP_HOST")
28 | .long("http-host")
29 | .help("Sets the HTTP host/port to serve (0.0.0.0:3031 to open to all)")
30 | .required(false)
31 | .num_args(1)
32 | .default_value("127.0.0.1:3031"),
33 | )
34 | .arg(
35 | Arg::new("CHAIN_RPC_URLS")
36 | .long("chain-rpc-urls")
37 | .help("Comma-separated Chain RPC URLs (e.g., http://[HOST]:[PORT]/ext/C/rpc)")
38 | .required(false) // TODO: make this required
39 | .num_args(1)
40 | .default_value("http://localhost:9650/ext/C/rpc"),
41 | )
42 | .arg(
43 | Arg::new("KEYS_FILE")
44 | .long("keys-file")
45 | .help("Sets the YAML file path that contains the key information (if not exists, retries with interval)")
46 | .required(true)
47 | .num_args(1),
48 | )
49 | .get_matches();
50 |
51 | println!("{} version: {}", APP_NAME, crate_version!());
52 |
53 | let http_host = matches
54 | .get_one::("HTTP_HOST")
55 | .unwrap_or(&String::from("127.0.0.1:3031")) // "0.0.0.0:3031" to open to all IPs
56 | .clone();
57 | let http_host: SocketAddr = http_host.parse().unwrap();
58 |
59 | let s = matches
60 | .get_one::("CHAIN_RPC_URLS")
61 | .unwrap_or(&String::new())
62 | .clone();
63 | let ss: Vec<&str> = s.split(',').collect();
64 | let mut chain_rpc_urls: Vec = Vec::new();
65 | for rpc in ss.iter() {
66 | chain_rpc_urls.push(rpc.to_string());
67 | }
68 |
69 | command::execute(flags::Options {
70 | log_level: matches
71 | .get_one::("LOG_LEVEL")
72 | .unwrap_or(&String::from("info"))
73 | .clone(),
74 | http_host,
75 | chain_rpc_urls,
76 | keys_file: matches
77 | .get_one::("KEYS_FILE")
78 | .unwrap_or(&String::new()) // TODO: make this required
79 | .clone(),
80 | })
81 | .await
82 | .unwrap();
83 | }
84 |
--------------------------------------------------------------------------------
/scripts/build.release.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -xue
3 |
4 | if ! [[ "$0" =~ scripts/build.release.sh ]]; then
5 | echo "must be run from repository root"
6 | exit 255
7 | fi
8 |
9 | # "--bin" can be specified multiple times for each directory in "bin/*" or workspaces
10 | cargo build \
11 | --release \
12 | --bin avalanche-kms \
13 | --bin avalanched-aws \
14 | --bin avalancheup-aws \
15 | --bin blizzard-aws \
16 | --bin blizzardup-aws \
17 | --bin staking-key-cert-s3-downloader \
18 | --bin staking-signer-key-s3-downloader \
19 | --bin devnet-faucet
20 |
21 | ./target/release/avalanche-kms --help
22 |
23 | ./target/release/avalanched-aws --help
24 |
25 | ./target/release/avalancheup-aws --help
26 | ./target/release/avalancheup-aws default-spec --help
27 | ./target/release/avalancheup-aws apply --help
28 | ./target/release/avalancheup-aws delete --help
29 |
30 | ./target/release/blizzard-aws --help
31 |
32 | ./target/release/blizzardup-aws --help
33 | ./target/release/blizzardup-aws default-spec --help
34 | ./target/release/blizzardup-aws apply --help
35 | ./target/release/blizzardup-aws delete --help
36 |
37 | ./target/release/staking-key-cert-s3-downloader --help
38 |
39 | ./target/release/devnet-faucet --help
40 |
--------------------------------------------------------------------------------
/scripts/tests.lint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -xue
3 |
4 | if ! [[ "$0" =~ scripts/tests.lint.sh ]]; then
5 | echo "must be run from repository root"
6 | exit 255
7 | fi
8 |
9 | # https://rust-lang.github.io/rustup/installation/index.html
10 | # rustup toolchain install nightly --allow-downgrade --profile minimal --component clippy
11 | #
12 | # https://github.com/rust-lang/rustfmt
13 | # rustup component add rustfmt
14 | # rustup component add rustfmt --toolchain nightly
15 | # rustup component add clippy
16 | # rustup component add clippy --toolchain nightly
17 |
18 | rustup default stable
19 | cargo fmt --all --verbose -- --check
20 |
21 | # TODO: enable nightly fmt
22 | rustup default nightly
23 | cargo +nightly fmt --all -- --config-path .rustfmt.nightly.toml --verbose --check || true
24 |
25 | # TODO: enable this
26 | cargo +nightly clippy --all --all-features -- -D warnings || true
27 |
28 | rustup default stable
29 |
30 | echo "ALL SUCCESS!"
31 |
--------------------------------------------------------------------------------
/scripts/tests.unit.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -xue
3 |
4 | if ! [[ "$0" =~ scripts/tests.unit.sh ]]; then
5 | echo "must be run from repository root"
6 | exit 255
7 | fi
8 |
9 | RUST_LOG=debug cargo test --all --all-features -- --show-output
10 | # RUST_LOG=debug cargo test --all --all-features -- --show-output --ignored
11 |
12 | echo "ALL SUCCESS!"
13 |
--------------------------------------------------------------------------------
/scripts/tests.unused.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -xue
3 |
4 | if ! [[ "$0" =~ scripts/tests.unused.sh ]]; then
5 | echo "must be run from repository root"
6 | exit 255
7 | fi
8 |
9 | # cargo install cargo-udeps --locked
10 | # https://github.com/est31/cargo-udeps
11 | cargo install cargo-udeps --locked
12 |
13 | # TODO: re-enable
14 | cargo +nightly udeps --all-targets
15 |
16 | echo "ALL SUCCESS!"
17 |
--------------------------------------------------------------------------------
/staking-key-cert-s3-downloader/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "staking-key-cert-s3-downloader"
3 | version = "1.0.0" # https://github.com/ava-labs/avalanche-ops/releases
4 | edition = "2021"
5 | rust-version = "1.70"
6 |
7 | [dependencies]
8 | avalanche-types = { version = "0.1.4", features = [] } # https://crates.io/crates/avalanche-types
9 | aws-manager = { version = "0.30.2", features = ["kms", "s3"] } # https://github.com/gyuho/aws-manager/tags
10 | clap = { version = "4.4.0", features = ["cargo", "derive"] } # https://github.com/clap-rs/clap/releases
11 | env_logger = "0.10.0"
12 | log = "0.4.20"
13 | tokio = { version = "1.32.0", features = ["full"] } # https://github.com/tokio-rs/tokio/releases
14 |
--------------------------------------------------------------------------------
/staking-key-cert-s3-downloader/src/command.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | io::{self, Error, ErrorKind},
3 | path::Path,
4 | };
5 |
6 | use crate::flags;
7 | use avalanche_types::ids::node;
8 | use aws_manager::{
9 | self,
10 | kms::{self, envelope},
11 | s3,
12 | };
13 | use tokio::time::Duration;
14 |
15 | pub async fn execute(opts: flags::Options) -> io::Result<()> {
16 | println!("starting {} with {:?}", crate::APP_NAME, opts);
17 |
18 | // ref.
19 | env_logger::init_from_env(
20 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, opts.log_level),
21 | );
22 |
23 | let s3_shared_config = aws_manager::load_config(
24 | Some(opts.s3_region.clone()),
25 | Some(opts.profile_name.clone()),
26 | Some(Duration::from_secs(30)),
27 | )
28 | .await;
29 | let s3_manager = s3::Manager::new(&s3_shared_config);
30 |
31 | let kms_shared_config = aws_manager::load_config(
32 | Some(opts.kms_region.clone()),
33 | Some(opts.profile_name.clone()),
34 | Some(Duration::from_secs(30)),
35 | )
36 | .await;
37 | let kms_manager = kms::Manager::new(&kms_shared_config);
38 |
39 | let envelope_manager = envelope::Manager::new(
40 | &kms_manager,
41 | opts.kms_key_id.clone(),
42 | // must've be equal for envelope encryption
43 | // e.g., "cfn-templates" tag "AAD_TAG"
44 | opts.aad_tag.clone(),
45 | );
46 |
47 | let tls_key_exists = Path::new(&opts.tls_key_path).exists();
48 | log::info!(
49 | "staking TLS key {} exists? {}",
50 | opts.tls_key_path,
51 | tls_key_exists
52 | );
53 |
54 | let tls_cert_exists = Path::new(&opts.tls_cert_path).exists();
55 | log::info!(
56 | "staking TLS cert {} exists? {}",
57 | opts.tls_cert_path,
58 | tls_cert_exists
59 | );
60 |
61 | if tls_key_exists || tls_cert_exists {
62 | return Err(Error::new(
63 | ErrorKind::Other,
64 | format!(
65 | "TLS key {} or cert {} already exists on disk",
66 | opts.tls_key_path, opts.tls_cert_path
67 | ),
68 | ));
69 | }
70 |
71 | log::info!("downloading key file {}", opts.tls_key_path);
72 | envelope_manager
73 | .get_object_unseal_decompress(
74 | &s3_manager,
75 | &opts.s3_bucket,
76 | &opts.s3_key_tls_key,
77 | &opts.tls_key_path,
78 | false,
79 | )
80 | .await
81 | .map_err(|e| {
82 | Error::new(
83 | ErrorKind::Other,
84 | format!("failed get_object_unseal_decompress tls_key_path: {}", e),
85 | )
86 | })?;
87 |
88 | log::info!("downloading cert file {}", opts.tls_cert_path);
89 | envelope_manager
90 | .get_object_unseal_decompress(
91 | &s3_manager,
92 | &opts.s3_bucket,
93 | &opts.s3_key_tls_cert,
94 | &opts.tls_cert_path,
95 | false,
96 | )
97 | .await
98 | .map_err(|e| {
99 | Error::new(
100 | ErrorKind::Other,
101 | format!("failed get_object_unseal_decompress tls_cert_path: {}", e),
102 | )
103 | })?;
104 |
105 | let node_id = node::Id::from_cert_pem_file(&opts.tls_cert_path)?;
106 |
107 | log::info!(
108 | "downloaded the node Id '{}' cert in '{}' and '{}'",
109 | node_id,
110 | opts.tls_key_path,
111 | opts.tls_cert_path
112 | );
113 |
114 | Ok(())
115 | }
116 |
--------------------------------------------------------------------------------
/staking-key-cert-s3-downloader/src/flags.rs:
--------------------------------------------------------------------------------
1 | /// Defines flag options.
2 | #[derive(Debug)]
3 | pub struct Options {
4 | pub log_level: String,
5 | pub s3_region: String,
6 | pub s3_bucket: String,
7 | pub s3_key_tls_key: String,
8 | pub s3_key_tls_cert: String,
9 | pub kms_region: String,
10 | pub kms_key_id: String,
11 | pub aad_tag: String,
12 | pub tls_key_path: String,
13 | pub tls_cert_path: String,
14 | pub profile_name: String,
15 | }
16 |
--------------------------------------------------------------------------------
/staking-key-cert-s3-downloader/src/main.rs:
--------------------------------------------------------------------------------
1 | mod command;
2 | mod flags;
3 |
4 | use clap::{crate_version, Arg, Command};
5 |
6 | pub const APP_NAME: &str = "staking-key-cert-s3-downloader";
7 |
8 | #[tokio::main]
9 | async fn main() {
10 | let matches = Command::new(APP_NAME)
11 | .version(crate_version!())
12 | .about("Staking certs downloader from S3")
13 | .long_about(
14 | "
15 |
16 | Downloads the avalanched-aws generated certificates from S3.
17 |
18 | staking-key-cert-s3-downloader \
19 | --log-level=info \
20 | --aws-region=us-west-2 \
21 | --s3-bucket=info \
22 | --s3-key-tls-key=pki/NodeABCDE.key.zstd.encrypted \
23 | --s3-key-tls-cert=pki/NodeABCDE.crt.zstd.encrypted \
24 | --kms-key-id=abc-abc-abc \
25 | --aad-tag=mytag \
26 | --tls-key-path=pki/NodeABCDE.key \
27 | --tls-cert-path=pki/NodeABCDE.crt
28 |
29 | ",
30 | )
31 | .arg(
32 | Arg::new("LOG_LEVEL")
33 | .long("log-level")
34 | .short('l')
35 | .help("Sets the log level")
36 | .required(false)
37 | .num_args(1)
38 | .value_parser(["debug", "info"])
39 | .default_value("info"),
40 | )
41 | .arg(
42 | Arg::new("S3_REGION")
43 | .long("s3-region")
44 | .help("Sets the AWS S3 region")
45 | .required(true)
46 | .num_args(1),
47 | )
48 | .arg(
49 | Arg::new("S3_BUCKET")
50 | .long("s3-bucket")
51 | .help("Sets the S3 bucket")
52 | .required(true)
53 | .num_args(1),
54 | )
55 | .arg(
56 | Arg::new("S3_KEY_TLS_KEY")
57 | .long("s3-key-tls-key")
58 | .help("Sets the S3 key for TLS key")
59 | .required(true)
60 | .num_args(1),
61 | )
62 | .arg(
63 | Arg::new("S3_KEY_TLS_CERT")
64 | .long("s3-key-tls-cert")
65 | .help("Sets the S3 key for TLS cert")
66 | .required(true)
67 | .num_args(1),
68 | )
69 | .arg(
70 | Arg::new("KMS_REGION")
71 | .long("kms-region")
72 | .help("Sets the AWS KMS region")
73 | .required(true)
74 | .num_args(1),
75 | )
76 | .arg(
77 | Arg::new("KMS_KEY_ID")
78 | .long("kms-key-id")
79 | .help("Sets the KMS key Id to envelope-decrypt the files from S3")
80 | .required(true)
81 | .num_args(1),
82 | )
83 | .arg(
84 | Arg::new("AAD_TAG")
85 | .long("aad-tag")
86 | .help("Sets the AAD tag for envelope encryption")
87 | .required(true)
88 | .num_args(1),
89 | )
90 | .arg(
91 | Arg::new("TLS_KEY_PATH")
92 | .long("tls-key-path")
93 | .help("Sets the local file path to save TLS key")
94 | .required(true)
95 | .num_args(1),
96 | )
97 | .arg(
98 | Arg::new("TLS_CERT_PATH")
99 | .long("tls-cert-path")
100 | .help("Sets the local file path to save TLS cert")
101 | .required(true)
102 | .num_args(1),
103 | )
104 | .arg(
105 | Arg::new("PROFILE_NAME")
106 | .long("profile-name")
107 | .help("Sets the AWS credential profile name for API calls/endpoints")
108 | .required(false)
109 | .default_value("default")
110 | .num_args(1),
111 | )
112 | .get_matches();
113 |
114 | let opts = flags::Options {
115 | log_level: matches
116 | .get_one::("LOG_LEVEL")
117 | .unwrap_or(&String::from("info"))
118 | .clone(),
119 | s3_region: matches.get_one::("S3_REGION").unwrap().clone(),
120 | s3_bucket: matches.get_one::("S3_BUCKET").unwrap().clone(),
121 | s3_key_tls_key: matches.get_one::("S3_KEY_TLS_KEY").unwrap().clone(),
122 | s3_key_tls_cert: matches
123 | .get_one::("S3_KEY_TLS_CERT")
124 | .unwrap()
125 | .clone(),
126 | kms_region: matches.get_one::("KMS_REGION").unwrap().clone(),
127 | kms_key_id: matches.get_one::("KMS_KEY_ID").unwrap().clone(),
128 | aad_tag: matches.get_one::("AAD_TAG").unwrap().clone(),
129 | tls_key_path: matches.get_one::("TLS_KEY_PATH").unwrap().clone(),
130 | tls_cert_path: matches.get_one::("TLS_CERT_PATH").unwrap().clone(),
131 | profile_name: matches.get_one::("PROFILE_NAME").unwrap().clone(),
132 | };
133 | command::execute(opts).await.unwrap();
134 | }
135 |
--------------------------------------------------------------------------------
/staking-signer-key-s3-downloader/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "staking-signer-key-s3-downloader"
3 | version = "1.0.0" # https://github.com/ava-labs/avalanche-ops/releases
4 | edition = "2021"
5 | rust-version = "1.70"
6 |
7 | [dependencies]
8 | aws-manager = { version = "0.30.2", features = ["kms", "s3"] } # https://github.com/gyuho/aws-manager/tags
9 | clap = { version = "4.4.0", features = ["cargo", "derive"] } # https://github.com/clap-rs/clap/releases
10 | env_logger = "0.10.0"
11 | log = "0.4.20"
12 | tokio = { version = "1.32.0", features = ["full"] } # https://github.com/tokio-rs/tokio/releases
13 |
--------------------------------------------------------------------------------
/staking-signer-key-s3-downloader/src/command.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | io::{self, Error, ErrorKind},
3 | path::Path,
4 | };
5 |
6 | use crate::flags;
7 | use aws_manager::{
8 | self,
9 | kms::{self, envelope},
10 | s3,
11 | };
12 | use tokio::time::Duration;
13 |
14 | pub async fn execute(opts: flags::Options) -> io::Result<()> {
15 | println!("starting {} with {:?}", crate::APP_NAME, opts);
16 |
17 | // ref.
18 | env_logger::init_from_env(
19 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, opts.log_level),
20 | );
21 |
22 | let s3_shared_config = aws_manager::load_config(
23 | Some(opts.s3_region.clone()),
24 | None,
25 | Some(Duration::from_secs(30)),
26 | )
27 | .await;
28 | let s3_manager = s3::Manager::new(&s3_shared_config);
29 |
30 | let kms_shared_config = aws_manager::load_config(
31 | Some(opts.kms_region.clone()),
32 | None,
33 | Some(Duration::from_secs(30)),
34 | )
35 | .await;
36 | let kms_manager = kms::Manager::new(&kms_shared_config);
37 |
38 | let envelope_manager = envelope::Manager::new(
39 | &kms_manager,
40 | opts.kms_key_id.clone(),
41 | // must've be equal for envelope encryption
42 | // e.g., "cfn-templates" tag "AAD_TAG"
43 | opts.aad_tag.clone(),
44 | );
45 |
46 | let key_exists = Path::new(&opts.key_path).exists();
47 | log::info!("staking TLS key {} exists? {}", opts.key_path, key_exists);
48 |
49 | log::info!("downloading key file {}", opts.key_path);
50 | envelope_manager
51 | .get_object_unseal_decompress(
52 | &s3_manager,
53 | &opts.s3_bucket,
54 | &opts.s3_key,
55 | &opts.key_path,
56 | false,
57 | )
58 | .await
59 | .map_err(|e| {
60 | Error::new(
61 | ErrorKind::Other,
62 | format!("failed get_object_unseal_decompress key_path: {}", e),
63 | )
64 | })?;
65 |
66 | Ok(())
67 | }
68 |
--------------------------------------------------------------------------------
/staking-signer-key-s3-downloader/src/flags.rs:
--------------------------------------------------------------------------------
1 | /// Defines flag options.
2 | #[derive(Debug)]
3 | pub struct Options {
4 | pub log_level: String,
5 | pub s3_region: String,
6 | pub s3_bucket: String,
7 | pub s3_key: String,
8 | pub kms_region: String,
9 | pub kms_key_id: String,
10 | pub aad_tag: String,
11 | pub key_path: String,
12 | pub profile_name: String,
13 | }
14 |
--------------------------------------------------------------------------------
/staking-signer-key-s3-downloader/src/main.rs:
--------------------------------------------------------------------------------
1 | mod command;
2 | mod flags;
3 |
4 | use clap::{crate_version, Arg, Command};
5 |
6 | pub const APP_NAME: &str = "staking-signer-key-s3-downloader";
7 |
8 | #[tokio::main]
9 | async fn main() {
10 | let matches = Command::new(APP_NAME)
11 | .version(crate_version!())
12 | .about("Staking certs downloader from S3")
13 | .long_about(
14 | "
15 |
16 | Downloads the avalanched-aws generated staking signer key from S3.
17 |
18 | staking-signer-key-s3-downloader \
19 | --log-level=info \
20 | --aws-region=us-west-2 \
21 | --s3-bucket=info \
22 | --s3-key=pki/NodeABCDE.key.zstd.encrypted \
23 | --kms-key-id=abc-abc-abc \
24 | --aad-tag=mytag \
25 | --key-path=pki/NodeABCDE.key
26 |
27 | ",
28 | )
29 | .arg(
30 | Arg::new("LOG_LEVEL")
31 | .long("log-level")
32 | .short('l')
33 | .help("Sets the log level")
34 | .required(false)
35 | .num_args(1)
36 | .value_parser(["debug", "info"])
37 | .default_value("info"),
38 | )
39 | .arg(
40 | Arg::new("S3_REGION")
41 | .long("s3-region")
42 | .help("Sets the AWS S3 region")
43 | .required(true)
44 | .num_args(1),
45 | )
46 | .arg(
47 | Arg::new("S3_BUCKET")
48 | .long("s3-bucket")
49 | .help("Sets the S3 bucket")
50 | .required(true)
51 | .num_args(1),
52 | )
53 | .arg(
54 | Arg::new("S3_KEY")
55 | .long("s3-key")
56 | .help("Sets the S3 key")
57 | .required(true)
58 | .num_args(1),
59 | )
60 | .arg(
61 | Arg::new("KMS_REGION")
62 | .long("kms-region")
63 | .help("Sets the AWS KMS region")
64 | .required(true)
65 | .num_args(1),
66 | )
67 | .arg(
68 | Arg::new("KMS_KEY_ID")
69 | .long("kms-key-id")
70 | .help("Sets the KMS key Id to envelope-decrypt the files from S3")
71 | .required(true)
72 | .num_args(1),
73 | )
74 | .arg(
75 | Arg::new("AAD_TAG")
76 | .long("aad-tag")
77 | .help("Sets the AAD tag for envelope encryption")
78 | .required(true)
79 | .num_args(1),
80 | )
81 | .arg(
82 | Arg::new("KEY_PATH")
83 | .long("key-path")
84 | .help("Sets the local file path to save key")
85 | .required(true)
86 | .num_args(1),
87 | )
88 | .arg(
89 | Arg::new("PROFILE_NAME")
90 | .long("profile-name")
91 | .help("Sets the AWS credential profile name for API calls/endpoints")
92 | .required(false)
93 | .default_value("default")
94 | .num_args(1),
95 | )
96 | .get_matches();
97 |
98 | let opts = flags::Options {
99 | log_level: matches
100 | .get_one::("LOG_LEVEL")
101 | .unwrap_or(&String::from("info"))
102 | .clone(),
103 | s3_region: matches.get_one::("S3_REGION").unwrap().clone(),
104 | s3_bucket: matches.get_one::("S3_BUCKET").unwrap().clone(),
105 | s3_key: matches.get_one::("S3_KEY").unwrap().clone(),
106 | kms_region: matches.get_one::("KMS_REGION").unwrap().clone(),
107 | kms_key_id: matches.get_one::("KMS_KEY_ID").unwrap().clone(),
108 | aad_tag: matches.get_one::("AAD_TAG").unwrap().clone(),
109 | key_path: matches.get_one::("KEY_PATH").unwrap().clone(),
110 | profile_name: matches.get_one::("PROFILE_NAME").unwrap().clone(),
111 | };
112 | command::execute(opts).await.unwrap();
113 | }
114 |
--------------------------------------------------------------------------------