├── .dockerignore ├── .gitignore ├── .gitmodules ├── Dockerfile ├── LICENSE.md ├── README.md ├── Setup.hs ├── app ├── AwsBootstrap.hs └── AwsSpam.hs ├── build ├── push ├── quorum-aws.cabal ├── src └── QuorumTools │ ├── Aws.hs │ └── Mains │ ├── AwsBootstrap.hs │ └── AwsSpam.hs ├── stack.yaml └── terraform ├── bin ├── .bin │ └── env-wrapper ├── .multi-start-cluster ├── .multi-start-tunnels ├── demo ├── global ├── intl-ireland ├── intl-tokyo ├── intl-virginia └── multi-start ├── global ├── main.tf ├── output.tf ├── terraform.tfvars └── variables.tf ├── main.tf ├── multi-region-vars ├── ireland.tfvars ├── tokyo.tfvars └── virginia.tfvars ├── output.tf ├── scripts ├── install │ ├── attach.sh │ ├── follow.sh │ ├── spam.sh │ ├── start-constellation.sh │ ├── start-quorum.sh │ ├── start-tunnels.sh │ └── start.sh └── provision │ ├── fetch-images.sh │ ├── prepare.sh │ └── start-single-region-cluster.sh ├── secrets ├── ec2-keys │ └── .gitkeep └── terraform.tfvars.example ├── state └── .gitkeep ├── terraform.tfvars └── variables.tf /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .gitignore 3 | .stack-work 4 | .DS_Store 5 | gdata 6 | out 7 | .ideaHaskellLib 8 | .idea 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | terraform/cluster-data/ 2 | terraform/secrets/* 3 | terraform/state/* 4 | terraform/.terraform/plugins/ 5 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "dependencies/constellation"] 2 | path = dependencies/constellation 3 | url = https://github.com/jpmorganchase/constellation.git 4 | [submodule "dependencies/quorum"] 5 | path = dependencies/quorum 6 | url = https://github.com/jpmorganchase/quorum.git 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:artful 2 | 3 | RUN apt-get update 4 | 5 | RUN apt-get install -y curl && \ 6 | curl -sSL https://get.haskellstack.org/ | sh 7 | 8 | RUN apt-get install -y libgmp-dev libdb-dev libleveldb-dev libsodium-dev zlib1g-dev libtinfo-dev pkg-config 9 | 10 | ENV SRC /usr/local/src/quorum-aws 11 | WORKDIR $SRC 12 | 13 | # GHC 14 | ADD stack.yaml $SRC/ 15 | RUN stack setup 16 | 17 | # Dependencies 18 | ADD LICENSE.md quorum-aws.cabal $SRC/ 19 | RUN stack build --dependencies-only 20 | 21 | # Project 22 | ADD Setup.hs $SRC/ 23 | COPY app/ $SRC/app/ 24 | COPY src/ $SRC/src/ 25 | RUN stack install --local-bin-path /usr/local/bin 26 | 27 | RUN aws-spam --help 28 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright 2016 Brian Schroeder, Joel Burget 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # quorum-aws 2 | 3 | ## ⚠️ Project Deprecation Notice ⚠️ 4 | 5 | Quorum-AWS has been deprecated, and we are no longer supporting the project. 6 | 7 | It has been replaced by [quorum-terraform](https://github.com/ConsenSys/quorum-terraform) that offers wider compatibility with Quorum products and cloud providers 8 | 9 | We encourage all users with active projects to migrate to [quorum-terraform](https://github.com/ConsenSys/quorum-terraform) 10 | 11 | If you have any questions or concerns, please reach out to the ConsenSys protocol engineering team on [#Discord](https://chat.consensys.net) or by [email](mailto:quorum@consensys.net). 12 | 13 | ### Description 14 | 15 | This repo contains the tools we use to deploy test Quorum clusters to AWS. 16 | 17 | - We use [Docker](https://www.docker.com/) to build images for quorum, constellation, and this codebase (quorum-aws, which extends [quorum-tools](https://github.com/jpmorganchase/quorum-tools)). 18 | - Docker images are pushed to AWS' [ECS repositories](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_Console_Repositories.html). 19 | - We use [Terraform](https://www.terraform.io/) to provision single-region (cross-availability-zone) and multi-region (cross-internet) Quorum clusters using these images. 20 | 21 | With a little bit of time and an AWS account, you should be able to use this project to easily deploy a Quorum cluster to AWS. 22 | 23 | ### Requirements 24 | 25 | - Installed software: [Docker](https://docs.docker.com/engine/installation/), [Terraform](https://www.terraform.io/intro/getting-started/install.html), [stack](https://docs.haskellstack.org/en/stable/README/#how-to-install), [jq](https://stedolan.github.io/jq/download/), and [awscli](https://aws.amazon.com/cli/) 26 | - awscli needs to be configured to talk to AWS (see the [user guide](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html) or use `aws configure help`) 27 | - `vim terraform/secrets/terraform.tfvars` to reflect AWS credentials in `~/.aws/credentials` 28 | 29 | ### Building images 30 | 31 | From the root of this project, you can execute the following two scripts in order to build Docker images for quorum, constellation, and quorum-aws. The latter will be built both locally and in docker (to be deployed to AWS.) We need to build and push these Docker images before we can run a cluster on AWS. 32 | 33 | If we haven't already, we need to pull Quorum and Constellation down into the `dependencies` directory: 34 | 35 | - `git submodule init && git submodule update` 36 | 37 | Then build the Docker images and push them to ECS repositories: 38 | 39 | - `./build && ./push` 40 | 41 | #### Building issues 42 | Error 137 is generally a sign that you should configure Docker with more memory. 43 | 44 | 45 | ### A note on how we are using Terraform 46 | 47 | In order to manage terraformed infrastructure across different regions and clusters, instead of using the `terraform` binary directly, we use (symlinks to) a wrapper script (around the `terraform` binary) to automatically set variables and state output locations per environment. Take a look inside `terraform/bin` to see how this works: 48 | 49 | ``` 50 | > ls -al terraform/bin 51 | total 64 52 | drwxr-xr-x 11 bts staff 374 Oct 11 15:13 . 53 | drwxr-xr-x 13 bts staff 442 Oct 11 15:35 .. 54 | drwxr-xr-x 3 bts staff 102 Oct 11 15:58 .bin 55 | -rwxr-xr-x 1 bts staff 793 Oct 11 14:39 .multi-start-cluster 56 | -rwxr-xr-x 1 bts staff 812 Oct 11 14:39 .multi-start-tunnels 57 | lrwxr-xr-x 1 bts staff 16 Oct 2 11:42 demo -> .bin/env-wrapper 58 | lrwxr-xr-x 1 bts staff 16 Oct 2 11:42 global -> .bin/env-wrapper 59 | lrwxr-xr-x 1 bts staff 16 Oct 2 11:42 intl-ireland -> .bin/env-wrapper 60 | lrwxr-xr-x 1 bts staff 16 Oct 2 11:42 intl-tokyo -> .bin/env-wrapper 61 | lrwxr-xr-x 1 bts staff 16 Oct 2 11:42 intl-virginia -> .bin/env-wrapper 62 | -rwxr-xr-x 1 bts staff 235 Oct 11 15:13 multi-start 63 | ``` 64 | 65 | Here, `demo` is a symlink to the wrapper script that will invoke Terraform in a such a way that it knows we are concerned with the "demo" environment. Instead of using the `terraform` binary directly (e.g. `terraform plan`), we issue the same Terraform CLI commands to the wrapper script (e.g. `bin/demo plan`). 66 | 67 | The pre-supplied binary wrappers have the following purposes: 68 | - `global` environment contains IAM infrastructure that is not particular to any one AWS region, and will be `apply`ed only once. 69 | - `demo` is the default name of a single-region cluster that will be deployed to `us-east-1`. 70 | - `intl-ireland`, `intl-tokyo`, and `intl-virginia` contain the infrastructure respectively for 3 different regions in an international cluster. This infrastructure lives in separate files because Terraform is hard-coded to support at most one region per `main.tf` file. 71 | 72 | If you want, you can simply make a new symlink (in `terraform/bin`) to `terraform/bin/.bin/env-wrapper` named whatever you like (eg. `mycluster`), and then you can use that script to launch a new cluster with that name. 73 | 74 | ### One-time: initialize Terraform plugins (for Terraform 0.10+) 75 | 76 | Because we're using the `aws` and `null` Terraform plugins, we need to initialize them: 77 | 78 | - `terraform init` 79 | 80 | ### One-time: deploy some "global" IAM infrastructure 81 | 82 | The following only needs to be done once to deploy some Identity and Access Management (IAM) infrastructure that we re-use across clusters: 83 | 84 | - `cd terraform` 85 | - `bin/global apply` 86 | 87 | If at some point in the future you want to destroy this infrastructure, you can run `bin/global destroy`. 88 | 89 | ### Deploying a single-region cluster 90 | 91 | - `cd terraform` 92 | 93 | For a given Terraform environment, we can use the normal Terraform commands like `plan`, `show`, `apply`, `destroy`, and `output` to work with a single-region cluster: 94 | 95 | - `bin/demo plan` shows us what infrastructure will be provisioned if we decide to `apply` 96 | - `bin/demo apply` creates the infrastructure. In a single-region setting, this also automatically starts the Quorum cluster. 97 | - `bin/demo show` reports the current Terraform state for the environment 98 | - `bin/demo output` can print the value for an output variable listed in `output.tf`. e.g.: `bin/demo output geth1`. This can be handy to easily SSH into a node in the cluster: e.g. try `ssh ubuntu@$(bin/demo output geth1)` or `ssh ubuntu@$(bin/demo output geth2)`. 99 | 100 | Once SSH'd in to a node, we can use a few utility scripts that have been installed in the `ubuntu` user's homedir to interact with `geth`: 101 | 102 | - `./spam 10` will send in 10 transactions per second until `^C` stops it 103 | - `./follow` shows the end of the (`tail -f`/followed) `geth` log 104 | - `./attach` attaches to the local `geth` process. 105 | - `exit` 106 | 107 | At this point, if we like, we can destroy the cluster: 108 | 109 | - `bin/demo destroy` 110 | 111 | ### Deploying a multi-region cluster 112 | 113 | At the moment, this is slightly more involved than deployment for a single-region cluster. Symlinks (in `terraform/bin`) are currently set up for one multi-region called "intl" that spans three regions. Because `ireland` is set up in this cluster to be "geth 1", it performs the side effect of generating a `cluster-data` directory that will be used for the other two regions. So, we provision `ireland` first: 114 | 115 | - `bin/intl-ireland apply` 116 | 117 | Then we can provision `tokyo` and `virginia`. You can do these two steps in parallel (e.g. in different terminals) if you'd like: 118 | 119 | - `bin/intl-tokyo apply` 120 | - `bin/intl-virginia apply` 121 | 122 | Once all three regions have been provisioned, we need to start the cluster. In single-region clusters this is done automatically, but in multi-region clusters, it's manual. This will set up SSH tunnels between regions for secure communication between them, then start constellation and quorum on each node. Note here we specify the name of the cross-region cluster, `intl`. 123 | 124 | - `bin/multi-start intl` 125 | 126 | At this point, we should be able to log in to one of the nodes and see the cluster in action: 127 | 128 | - `ssh ubuntu@$(bin/intl-virginia output eip)` where `eip` stands for Elastic IP, the static IP address other nodes in the cluster can use to connect to this one. 129 | - `./spam 10` send in 10 transactions per second for a few seconds, then `^C` to stop it 130 | - `./follow` shows the end of the (`tail -f`/followed) `geth` log, or `./attach` attaches to the local node. 131 | -------------------------------------------------------------------------------- /Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /app/AwsBootstrap.hs: -------------------------------------------------------------------------------- 1 | module Main where 2 | 3 | import QuorumTools.Mains.AwsBootstrap 4 | 5 | main :: IO () 6 | main = awsBootstrapMain 7 | -------------------------------------------------------------------------------- /app/AwsSpam.hs: -------------------------------------------------------------------------------- 1 | module Main where 2 | 3 | import QuorumTools.Mains.AwsSpam 4 | 5 | main :: IO () 6 | main = awsSpamMain 7 | -------------------------------------------------------------------------------- /build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | echo "building quorum-aws locally" 6 | stack setup 7 | stack build 8 | 9 | echo "building quorum-aws for AWS" 10 | docker build -t quorum-aws . 11 | 12 | echo "building quorum" 13 | docker build -t quorum ./dependencies/quorum 14 | 15 | echo "building constellation" 16 | docker build -t constellation -f ./dependencies/constellation/build-fedora.dockerfile --build-arg DISTRO_VERSION=26 ./dependencies/constellation 17 | 18 | echo "done building packages" 19 | -------------------------------------------------------------------------------- /push: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Pushes built images to AWS' ECR (Elastic Container Registry). 4 | # 5 | # Requires aws-cli, jq, and docker. 6 | 7 | # 8 | # NOTE: While running this script, if 9 | # 10 | # (1) aws-cli says that it doesn't recognize `--no-include-email` 11 | # or (2) docker requires an email (via `-e`) 12 | # 13 | # then update both aws-cli and docker and try running this again. 14 | # 15 | 16 | set -euo pipefail 17 | 18 | REGION=us-east-1 19 | IMAGES=(quorum constellation quorum-aws) 20 | 21 | # Authenticate docker client with ECR: 22 | eval `aws ecr get-login --region ${REGION} --no-include-email` >/dev/null 23 | 24 | existing_repository() { 25 | name=$1 26 | 27 | (aws ecr describe-repositories --repository-names "$name" 2>/dev/null | jq -r '.repositories[0].repositoryUri') || echo "" 28 | } 29 | 30 | create_repository() { 31 | name=$1 32 | 33 | aws ecr create-repository --repository-name "${image}" | jq -r '.repository.repositoryUri' 34 | } 35 | 36 | # Tag and push each image: 37 | for image in ${IMAGES[@]} 38 | do 39 | echo "pushing $image" 40 | 41 | echo " checking for existing ${image} repository" 42 | repository=$(existing_repository $image) 43 | 44 | if [[ -z "$repository" ]] 45 | then 46 | echo " existing repository not found. creating new repository" 47 | repository=$(create_repository $image) 48 | echo " created repository ${image}" 49 | else 50 | echo " existing repository is $repository" 51 | fi 52 | 53 | echo " tagging image $image" 54 | docker tag ${image}:latest ${repository}:latest 55 | echo " pushing image $image to ECR" 56 | docker push ${repository}:latest 57 | echo " done" 58 | echo 59 | done 60 | -------------------------------------------------------------------------------- /quorum-aws.cabal: -------------------------------------------------------------------------------- 1 | name: quorum-aws 2 | version: 0.0.1 3 | synopsis: Utilities to launch and test Quorum clusters on AWS 4 | license: Apache 5 | license-file: LICENSE.md 6 | build-type: Simple 7 | cabal-version: >=1.10 8 | 9 | library 10 | hs-source-dirs: src 11 | exposed-modules: 12 | QuorumTools.Mains.AwsBootstrap 13 | QuorumTools.Mains.AwsSpam 14 | other-modules: 15 | QuorumTools.Aws 16 | build-depends: 17 | base >= 4.7 && < 5, 18 | containers == 0.5.*, 19 | lens == 4.14.* || == 4.15.*, 20 | mtl == 2.2.*, 21 | quorum-tools >= 0.0.1, 22 | rate-limit == 1.1.*, 23 | time-units == 1.0.*, 24 | turtle == 1.3.* 25 | default-language: Haskell2010 26 | 27 | executable aws-spam 28 | hs-source-dirs: app 29 | main-is: AwsSpam.hs 30 | ghc-options: -threaded -rtsopts -with-rtsopts=-N 31 | build-depends: base, quorum-aws 32 | default-language: Haskell2010 33 | 34 | executable aws-bootstrap 35 | hs-source-dirs: app 36 | main-is: AwsBootstrap.hs 37 | ghc-options: -threaded -rtsopts -with-rtsopts=-N 38 | build-depends: base, quorum-aws 39 | default-language: Haskell2010 40 | 41 | source-repository head 42 | type: git 43 | location: https://github.com/jpmorganchase/quorum-aws.git 44 | -------------------------------------------------------------------------------- /src/QuorumTools/Aws.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE OverloadedStrings #-} 2 | 3 | module QuorumTools.Aws 4 | ( internalAwsIp 5 | , dockerHostIp 6 | , AwsClusterType (..) 7 | ) where 8 | 9 | import Turtle 10 | 11 | import QuorumTools.Types 12 | 13 | data AwsClusterType 14 | = SingleRegion 15 | | MultiRegion 16 | 17 | -- 18 | -- TODO: use newtypes 19 | -- 20 | internalAwsIp :: Int -> GethId -> Ip 21 | internalAwsIp subnets (GethId gid) = 22 | Ip $ format ("10.0."%d%"."%d) subnet lastOctet 23 | where 24 | idx = gid - 1 -- Zero-indexed geth id 25 | subnet = 1 + (idx `mod` subnets) 26 | lastOctet = 101 + (idx `div` subnets) 27 | 28 | dockerHostIp :: Ip 29 | dockerHostIp = Ip "172.17.0.1" 30 | -------------------------------------------------------------------------------- /src/QuorumTools/Mains/AwsBootstrap.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE OverloadedStrings #-} 2 | {-# LANGUAGE TupleSections #-} 3 | 4 | -- | Bootstraps an AWS cluster 5 | module QuorumTools.Mains.AwsBootstrap where 6 | 7 | import Control.Lens ((.~)) 8 | import Control.Monad.Reader (runReaderT) 9 | import Data.Bool (bool) 10 | import Data.Map.Strict (Map) 11 | import Prelude hiding (FilePath) 12 | import Turtle 13 | 14 | import QuorumTools.Aws 15 | import QuorumTools.Cluster 16 | import QuorumTools.Options (consensusParser) 17 | import QuorumTools.Types 18 | 19 | data AwsConfig 20 | = AwsConfig { numSubnets :: Int 21 | , rootDir :: FilePath 22 | , clusterType :: AwsClusterType 23 | , clusterSize :: Int 24 | , clusterConsensus :: Consensus 25 | } 26 | 27 | cliParser :: Parser AwsConfig 28 | cliParser = AwsConfig 29 | <$> optInt "subnets" 's' "Number of subnets in the region" 30 | <*> optPath "path" 'p' "Output path" 31 | <*> fmap (bool SingleRegion MultiRegion) 32 | (switch "multi-region" 'm' "Whether the cluster is multi-region") 33 | <*> optInt "cluster-size" 'n' "Total cluster size across all regions" 34 | <*> consensusParser 35 | 36 | mkBootstrapEnv :: AwsConfig -> Password -> Map GethId AccountKey -> ClusterEnv 37 | mkBootstrapEnv cfg password keys = mkClusterEnv mkIp mkDataDir keys consensus 38 | & clusterGenesisJson .~ dataRoot "genesis.json" 39 | & clusterPrivacySupport .~ PrivacyEnabled 40 | & clusterPassword .~ password 41 | 42 | where 43 | consensus = clusterConsensus cfg 44 | dataRoot = rootDir cfg 45 | subnets = numSubnets cfg 46 | 47 | mkDataDir (GethId gid) = DataDir $ 48 | dataRoot fromText (format ("geth"%d) gid) 49 | 50 | -- In the multi-region setting, since we are connecting to other nodes over 51 | -- the open internet, we do so through local SSH tunnels. 52 | mkIp = case clusterType cfg of 53 | SingleRegion -> internalAwsIp subnets 54 | MultiRegion -> const dockerHostIp 55 | 56 | awsBootstrapMain :: IO () 57 | awsBootstrapMain = awsBootstrap =<< parseConfig 58 | where 59 | parseConfig = options "Bootstraps an AWS cluster" cliParser 60 | 61 | awsBootstrap :: AwsConfig -> IO () 62 | awsBootstrap config = do 63 | keys <- generateClusterKeys gids password 64 | 65 | sh $ flip runReaderT (mkBootstrapEnv config password keys) $ 66 | wipeAndSetupNodes (Just remoteDataDir) (rootDir config) gids 67 | 68 | where 69 | gids = [1..GethId (clusterSize config)] 70 | remoteDataDir = DataDir "/datadir" 71 | password = CleartextPassword "abcd" 72 | -------------------------------------------------------------------------------- /src/QuorumTools/Mains/AwsSpam.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE OverloadedStrings #-} 2 | 3 | module QuorumTools.Mains.AwsSpam where 4 | 5 | import Control.Monad.Reader (runReaderT) 6 | import Control.RateLimit (RateLimit) 7 | import Data.Bool (bool) 8 | import qualified Data.Map.Strict as Map 9 | import Data.Time.Units (Millisecond) 10 | import Turtle 11 | 12 | import QuorumTools.Aws 13 | import QuorumTools.Client (loadNode, perSecond, spamGeth) 14 | import QuorumTools.Cluster 15 | import QuorumTools.Spam 16 | import QuorumTools.Types 17 | 18 | data SpamConfig = SpamConfig { rateLimit :: RateLimit Millisecond 19 | , clusterType :: AwsClusterType 20 | , contract :: Maybe Text 21 | , privateFor :: Maybe Text 22 | } 23 | 24 | cliParser :: Parser SpamConfig 25 | cliParser = SpamConfig 26 | <$> fmap perSecond (optInteger "rps" 'r' "The number of requests per second") 27 | <*> fmap (bool SingleRegion MultiRegion) 28 | (switch "multi-region" 'g' "Whether the cluster is multi-region") 29 | <*> optional contractP 30 | <*> optional privateForP 31 | 32 | mkSingletonEnv :: MonadIO m => AwsClusterType -> GethId -> m ClusterEnv 33 | mkSingletonEnv cType gid = do 34 | key <- readAccountKey dataDir gid 35 | subnets <- readNumSubnetsFromHomedir 36 | return $ mkClusterEnv (mkIp subnets) 37 | (const dataDir) 38 | (Map.singleton gid key) 39 | dummyConsensus 40 | 41 | where 42 | dataDir = DataDir "/datadir" 43 | 44 | mkIp numSubnets = case cType of 45 | SingleRegion -> internalAwsIp numSubnets 46 | MultiRegion -> const dockerHostIp 47 | 48 | -- HACK: I think we can get away with just putting any consensus for now, 49 | -- but we should try to read this from configuration on the box. On this 50 | -- note, instead of having multiple files in the homedir, we should have 51 | -- one single config file: 52 | dummyConsensus = Raft 53 | 54 | readNumSubnetsFromHomedir :: MonadIO m => m Int 55 | readNumSubnetsFromHomedir = liftIO $ read <$> readFile "/home/ubuntu/num-subnets" 56 | 57 | readGidFromHomedir :: IO GethId 58 | readGidFromHomedir = GethId . read <$> readFile "/home/ubuntu/node-id" 59 | 60 | awsSpamMain :: IO () 61 | awsSpamMain = awsSpam =<< parseConfig 62 | where 63 | parseConfig = options "Spams the local node with public transactions" 64 | cliParser 65 | 66 | awsSpam :: SpamConfig -> IO () 67 | awsSpam config = do 68 | gid <- readGidFromHomedir 69 | let benchTx = processContractArgs (contract config) (privateFor config) 70 | cEnv <- mkSingletonEnv (clusterType config) gid 71 | geth <- runReaderT (loadNode gid) cEnv 72 | spamGeth benchTx geth (rateLimit config) 73 | -------------------------------------------------------------------------------- /stack.yaml: -------------------------------------------------------------------------------- 1 | resolver: lts-10.7 2 | 3 | packages: 4 | - '.' 5 | #- '../quorum-tools' 6 | - location: 7 | git: https://github.com/jpmorganchase/quorum-tools.git 8 | commit: 337d92414cac248d10e8ee583eb7adaea5c2d0be 9 | extra-dep: true 10 | 11 | # 12 | # FIXME: duplicated from quorum-tools: 13 | # 14 | - location: 15 | git: https://github.com/jpmorganchase/constellation.git 16 | commit: 14997fbb8d5ae0b2db6ea182fcabad5d69ed7b23 17 | extra-dep: true 18 | 19 | # 20 | # FIXME: duplicated from quorum-tools: 21 | # 22 | extra-deps: 23 | - uri-bytestring-0.2.3.3 24 | - foldl-1.2.5 25 | - turtle-1.3.6 26 | - symmetric-properties-0.1.0.0 27 | - rate-limit-1.1.1 28 | - time-units-1.0.0 29 | - aeson-1.1.2.0 30 | - async-pool-0.9.0.2 31 | - BerkeleyDB-0.8.7 # constellation 32 | - logging-3.0.4 # constellation 33 | - saltine-0.1.0.0 # constellation 34 | - cryptonite-0.24 # constellation 35 | - ansi-terminal-0.6.3.1 36 | - unix-compat-0.4.3.1 37 | 38 | flags: {} 39 | 40 | extra-package-dbs: [] 41 | -------------------------------------------------------------------------------- /terraform/bin/.bin/env-wrapper: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | die() { 6 | echo >&2 "$@" 7 | exit 1 8 | } 9 | 10 | print_usage() { 11 | die "usage: $0 [plan|apply|show|console|refresh|destroy|output[ output-name]|taint resource-name|untaint resource-name]" 12 | } 13 | 14 | [ "$#" -eq 0 ] && print_usage 15 | 16 | # Set up variables 17 | 18 | bin_name=$(basename $0) 19 | bin_components=(${bin_name//-/ }) # Split binary name on a dash 20 | num_components=${#bin_components[@]} 21 | 22 | if [ ${num_components} -eq 2 ] 23 | then 24 | env_name=${bin_components[0]} 25 | region=${bin_components[1]} 26 | state_file="$(pwd)/state/${env_name}-${region}.tfstate" 27 | var_file_flag="-var-file $(pwd)/multi-region-vars/${region}.tfvars" 28 | else 29 | env_name=${bin_name} 30 | state_file="$(pwd)/state/${env_name}.tfstate" 31 | var_file_flag="" 32 | fi 33 | 34 | ssh_key_name="quorum-${env_name}" 35 | ssh_key_path="secrets/ec2-keys/${ssh_key_name}.pem" # private key path 36 | ssh_pubkey_path="secrets/ec2-keys/${ssh_key_name}.pub" 37 | secret_tf_file="secrets/terraform.tfvars" 38 | tunnel_key_name="tunnel" 39 | tunnel_key_path="secrets/${tunnel_key_name}" # private key path 40 | 41 | # Pre-processing/checks before processing command 42 | 43 | check_for_aws_creds() { 44 | test -e "${secret_tf_file}" 45 | } 46 | 47 | check_ssh_key_exists() { 48 | test -e "${ssh_key_path}" 49 | } 50 | 51 | generate_ssh_key() { 52 | echo "generating ec2 SSH key: ${ssh_key_path}" 53 | # aws ec2 create-key-pair --key-name "${ssh_key_name}" | jq -r ".KeyMaterial" >"${ssh_key_path}" 54 | openssl genrsa -out "${ssh_key_path}" 2048 55 | chmod 400 "${ssh_key_path}" 56 | ssh-keygen -y -f "${ssh_key_path}" >"${ssh_pubkey_path}" 57 | } 58 | 59 | ensure_ssh_key_added() { 60 | ssh-add "${ssh_key_path}" 2>/dev/null 61 | } 62 | 63 | check_tunnel_key_exists() { 64 | test -e "${tunnel_key_path}" 65 | } 66 | 67 | generate_tunnel_key() { 68 | echo "generating tunnel SSH key: ${tunnel_key_path}" 69 | ssh-keygen -t ed25519 -N "" -C "quorum ssh tunnels" -f "${tunnel_key_path}" 70 | } 71 | 72 | check_for_aws_creds || die "please create the file secrets/terraform.fvars. see secrets/terraform.tfvars.example for an example." 73 | 74 | if [[ "${env_name}" != "global" ]] 75 | then 76 | check_ssh_key_exists || generate_ssh_key 77 | ensure_ssh_key_added 78 | fi 79 | 80 | check_tunnel_key_exists || generate_tunnel_key 81 | 82 | # Process command 83 | 84 | if [[ "${env_name}" == "global" ]] 85 | then 86 | cd global 87 | fi 88 | 89 | if [[ "$1" == "plan" || "$1" == "apply" || "$1" == "destroy" || "$1" == "console" || "$1" == "refresh" ]] 90 | then 91 | terraform $1 -var "env=${env_name}" ${var_file_flag} -state ${state_file} 92 | elif [[ "$1" == "output" || "$1" == "taint" || "$1" == "untaint" ]] 93 | then 94 | if [[ "$#" -ne 2 ]] 95 | then 96 | print_usage 97 | else 98 | terraform $1 -state ${state_file} $2 99 | fi 100 | elif [[ "$1" == "show" ]] 101 | then 102 | terraform show ${state_file} 103 | else 104 | print_usage 105 | fi 106 | -------------------------------------------------------------------------------- /terraform/bin/.multi-start-cluster: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Starts a multi-region cluster 4 | 5 | set -euo pipefail 6 | 7 | die() { 8 | echo >&2 "ERROR: $@" 9 | exit 1 10 | } 11 | 12 | print_usage() { 13 | die "usage: multi-start-cluster [CLUSTER_NAME]" 14 | } 15 | 16 | [ "$#" -ne 1 ] && print_usage 17 | 18 | env=$1 19 | regions=(ireland tokyo virginia) 20 | 21 | # fetch EIPs 22 | 23 | eips=() 24 | for ((i = 0; i < ${#regions[@]}; ++i)) 25 | do 26 | region=${regions[$i]} 27 | bin="${env}-${region}" 28 | eip=$($bin output eip 2>/dev/null || die "could not get the EIP for $region. have you run '$bin apply' to provision infrastructure in $region?") 29 | eips[$i]="${eip}" 30 | done 31 | 32 | # start tunnels from each region 33 | 34 | for ((i = 0; i < ${#regions[@]}; ++i)) 35 | do 36 | region=${regions[$i]} 37 | eip=${eips[$i]} 38 | 39 | echo "starting geth and constellation in $region" 40 | ssh -oStrictHostKeyChecking=no ubuntu@"${eip}" -- ./start 41 | done 42 | -------------------------------------------------------------------------------- /terraform/bin/.multi-start-tunnels: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Starts tunnels for a multi-region cluster 4 | 5 | set -euo pipefail 6 | 7 | die() { 8 | echo >&2 "ERROR: $@" 9 | exit 1 10 | } 11 | 12 | print_usage() { 13 | die "usage: multi-start-tunnels [CLUSTER_NAME]" 14 | } 15 | 16 | [ "$#" -ne 1 ] && print_usage 17 | 18 | env=$1 19 | regions=(ireland tokyo virginia) 20 | 21 | # fetch EIPs 22 | 23 | eips=() 24 | for ((i = 0; i < ${#regions[@]}; ++i)) 25 | do 26 | region=${regions[$i]} 27 | bin="${env}-${region}" 28 | eip=$($bin output eip 2>/dev/null || die "could not get the EIP for $region. have you run '$bin apply' to provision infrastructure in $region?") 29 | eips[$i]="${eip}" 30 | done 31 | 32 | # start tunnels from each region 33 | 34 | for ((i = 0; i < ${#regions[@]}; ++i)) 35 | do 36 | region=${regions[$i]} 37 | eip=${eips[$i]} 38 | 39 | echo "starting tunnels in $region" 40 | ssh -oStrictHostKeyChecking=no ubuntu@"${eip}" -- ./.start-tunnels "${eips[@]}" 41 | done 42 | -------------------------------------------------------------------------------- /terraform/bin/demo: -------------------------------------------------------------------------------- 1 | .bin/env-wrapper -------------------------------------------------------------------------------- /terraform/bin/global: -------------------------------------------------------------------------------- 1 | .bin/env-wrapper -------------------------------------------------------------------------------- /terraform/bin/intl-ireland: -------------------------------------------------------------------------------- 1 | .bin/env-wrapper -------------------------------------------------------------------------------- /terraform/bin/intl-tokyo: -------------------------------------------------------------------------------- 1 | .bin/env-wrapper -------------------------------------------------------------------------------- /terraform/bin/intl-virginia: -------------------------------------------------------------------------------- 1 | .bin/env-wrapper -------------------------------------------------------------------------------- /terraform/bin/multi-start: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | die() { 6 | echo >&2 "$@" 7 | exit 1 8 | } 9 | 10 | print_usage() { 11 | die "usage: multi-start [MULTI-REGION-ENV-NAME]" 12 | } 13 | 14 | [ "$#" -ne 1 ] && print_usage 15 | 16 | env=$1 17 | 18 | .multi-start-tunnels $env 19 | .multi-start-cluster $env 20 | -------------------------------------------------------------------------------- /terraform/global/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | access_key = "${var.access_key}" 3 | secret_key = "${var.secret_key}" 4 | region = "${var.aws_region}" 5 | } 6 | 7 | resource "aws_iam_role" "ecr_accessor" { 8 | name = "${var.project}.${var.env}.ecrAccessor" 9 | assume_role_policy = <node-id", 283 | "echo 'abcd' >password", 284 | "echo '${var.multi_region ? "multi-region" : "single-region"}' >cluster-type", 285 | "echo '${var.total_cluster_size}' >cluster-size", 286 | "echo '${length(var.subnet_azs)}' >num-subnets" 287 | ] 288 | } 289 | 290 | provisioner "remote-exec" { 291 | scripts = [ 292 | "scripts/provision/prepare.sh", 293 | "scripts/provision/fetch-images.sh", 294 | "scripts/provision/start-single-region-cluster.sh" 295 | ] 296 | } 297 | } 298 | 299 | # 300 | # If this is a multi-region cluster, we allocate an EIP for each instance in the region 301 | # 302 | 303 | resource "aws_eip" "static_ip" { 304 | count = "${ var.multi_region ? "${var.num_instances}" : "0"}" 305 | vpc = true 306 | } 307 | 308 | resource "aws_eip_association" "quorum_eip_association" { 309 | count = "${ var.multi_region ? "${var.num_instances}" : "0"}" 310 | instance_id = "${element(aws_instance.quorum.*.id, count.index)}" 311 | allocation_id = "${element(aws_eip.static_ip.*.id, count.index)}" 312 | } 313 | -------------------------------------------------------------------------------- /terraform/multi-region-vars/ireland.tfvars: -------------------------------------------------------------------------------- 1 | multi_region = "1" 2 | aws_region = "eu-west-1" 3 | subnet_azs = ["eu-west-1a"] 4 | num_instances = 1 5 | first_geth_id = 1 6 | -------------------------------------------------------------------------------- /terraform/multi-region-vars/tokyo.tfvars: -------------------------------------------------------------------------------- 1 | multi_region = "1" 2 | aws_region = "ap-northeast-1" 3 | subnet_azs = ["ap-northeast-1a"] 4 | num_instances = 1 5 | first_geth_id = 2 6 | -------------------------------------------------------------------------------- /terraform/multi-region-vars/virginia.tfvars: -------------------------------------------------------------------------------- 1 | multi_region = "1" 2 | aws_region = "us-east-1" 3 | subnet_azs = ["us-east-1b"] 4 | num_instances = 1 5 | first_geth_id = 3 6 | -------------------------------------------------------------------------------- /terraform/output.tf: -------------------------------------------------------------------------------- 1 | output "multi-region-cluster" { 2 | value = "${var.multi_region}" 3 | } 4 | output "aws-region" { 5 | value = "${var.aws_region}" 6 | } 7 | output "environment" { 8 | value = "${var.env}" 9 | } 10 | output "quorum-private-ips" { 11 | value = ["${aws_instance.quorum.*.private_ip}"] 12 | } 13 | output "quorum-public-ips" { 14 | value = ["${aws_instance.quorum.*.public_ip}"] 15 | } 16 | output "quorum-azs" { 17 | value = ["${aws_instance.quorum.*.availability_zone}"] 18 | } 19 | 20 | # Output for single-region IPs 21 | #output "geth1" { value = "${ var.multi_region ? "" : aws_instance.quorum.0.public_ip }" } 22 | #output "geth2" { value = "${ var.multi_region ? "" : aws_instance.quorum.1.public_ip }" } 23 | #output "geth3" { value = "${ var.multi_region ? "" : aws_instance.quorum.2.public_ip }" } 24 | 25 | # Output for multi-region IP 26 | #output "eip" { 27 | # value = "${element(aws_eip_association.quorum_eip_association.*.public_ip, 0)}" 28 | #} 29 | #output "all_eips" { 30 | # value = ["${aws_eip_association.quorum_eip_association.*.public_ip}"] 31 | #} 32 | -------------------------------------------------------------------------------- /terraform/scripts/install/attach.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | rlwrap sudo docker run -v /home/ubuntu/datadir:/datadir -it quorum attach /datadir/geth.ipc 6 | -------------------------------------------------------------------------------- /terraform/scripts/install/follow.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | sudo docker ps | grep geth | grep -v attach | awk '{print $1}' | xargs sudo docker inspect -f '' | jq -r '.[0].LogPath' | xargs sudo tail -f | jq -r '.log | rtrimstr("\n")' 6 | -------------------------------------------------------------------------------- /terraform/scripts/install/spam.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -u 5 | 6 | die() { 7 | echo >&2 "$@" 8 | exit 1 9 | } 10 | 11 | print_usage() { 12 | die "usage: spam RPS" 13 | } 14 | 15 | [ "$#" -ne 1 ] && print_usage 16 | 17 | rps=$1 18 | cluster_type=$(cat cluster-type) 19 | 20 | if [[ $cluster_type == "multi-region" ]] 21 | then 22 | multi_region_opt="-g" 23 | else 24 | multi_region_opt="" 25 | fi 26 | 27 | sudo docker run -it -v /home/ubuntu/datadir:/datadir -v /home/ubuntu/node-id:/home/ubuntu/node-id -v /home/ubuntu/num-subnets:/home/ubuntu/num-subnets quorum-aws /bin/sh -c "aws-spam -r ${rps} ${multi_region_opt}" 28 | -------------------------------------------------------------------------------- /terraform/scripts/install/start-constellation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | my_gid=$(cat node-id) 6 | my_port=$((9000 + $my_gid)) 7 | 8 | echo "starting constellation ${my_gid}" 9 | 10 | sudo docker run -d \ 11 | -p ${my_port}:${my_port} \ 12 | -v /home/ubuntu/datadir:/datadir \ 13 | constellation \ 14 | /bin/sh -c "constellation-node /datadir/constellation.toml" 15 | -------------------------------------------------------------------------------- /terraform/scripts/install/start-quorum.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | gid=$(cat node-id) 6 | p2p_port=$((30400 + $gid)) 7 | rpc_port=$((40400 + $gid)) 8 | raft_port=$((50400 + $gid)) 9 | 10 | echo "starting geth ${gid}" 11 | 12 | sudo docker run -d -p $p2p_port:$p2p_port -p $rpc_port:$rpc_port -p $raft_port:$raft_port -v /home/ubuntu/datadir:/datadir -v /home/ubuntu/password:/password -e PRIVATE_CONFIG='/datadir/constellation.toml' quorum --datadir /datadir --port $p2p_port --rpcport $rpc_port --raftport $raft_port --networkid 1418 --verbosity 3 --nodiscover --rpc --rpccorsdomain "'*'" --rpcaddr '0.0.0.0' --raft --unlock 0 --password /password 13 | -------------------------------------------------------------------------------- /terraform/scripts/install/start-tunnels.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -u 5 | 6 | my_gid=$(cat node-id) 7 | cluster_type=$(cat cluster-type) 8 | cluster_size=$(cat cluster-size) 9 | phrase="tunnel to geth" # phrase we use in output, and also look for via grep 10 | 11 | die() { 12 | echo >&2 "ERROR: $@" 13 | exit 1 14 | } 15 | 16 | print_usage() { 17 | die "usage: start-tunnels [eip0 eip1 ...]" 18 | } 19 | 20 | [ "$#" -ne "${cluster_size}" ] && print_usage 21 | 22 | eips=("$@") 23 | 24 | start_tunnel() { 25 | gid=$1 26 | eip=$2 27 | port=$3 28 | 29 | echo "starting tunnel to geth ${gid} at ${eip}:${port}" 30 | 31 | # I couldn't figure out how to get docker to cooperate with starting an SSH tunnel inside of it, so we use nohup and background for now: 32 | nohup bash -c "until (ssh -M -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i .ssh/tunnel -N -L 0.0.0.0:${port}:localhost:${port} ubuntu@${eip}); do echo 're-establishing $phrase ${gid}:${port}'; done" >/dev/null 2>/dev/null &2 "ERROR: $@" 7 | exit 1 8 | } 9 | 10 | cluster_type=$(cat cluster-type) 11 | 12 | if [[ $cluster_type == "multi-region" ]] 13 | then 14 | if [[ $(ps aux | grep [t]unnel | wc -l) -eq 0 ]] 15 | then 16 | die "it looks like tunnels have not been started yet for this multi-region cluster. on the external provisioning machine, once all regions have been provisioned, execute multi-start." 17 | fi 18 | fi 19 | 20 | echo "trying to start constellation and quorum..." 21 | 22 | ./.start-constellation 23 | ./.start-quorum 24 | -------------------------------------------------------------------------------- /terraform/scripts/provision/fetch-images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | IMAGES_REGION=us-east-1 6 | IMAGES=(quorum constellation quorum-aws) 7 | 8 | eval `sudo aws ecr get-login --region="${IMAGES_REGION}" | sed 's/^docker/sudo docker/'` >/dev/null 9 | 10 | echo "fetching docker images" 11 | 12 | for image in ${IMAGES[@]} 13 | do 14 | echo " fetching $image" 15 | repo=$(sudo aws ecr describe-repositories --region="${IMAGES_REGION}" --repository-names "${image}" | jq -r '.repositories[0].repositoryUri') 16 | sudo docker pull "${repo}:latest" >/dev/null 17 | sudo docker tag "${repo}:latest" "${image}:latest" 18 | done 19 | 20 | echo "fetching complete" 21 | -------------------------------------------------------------------------------- /terraform/scripts/provision/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -u 5 | 6 | echo "installing docker, the aws cli, rlwrap, and jq" 7 | 8 | sudo apt-get update -y >/dev/null 9 | sudo apt-get install -y apt-transport-https ca-certificates >/dev/null 10 | sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D >/dev/null 11 | sudo sh -c 'echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" > /etc/apt/sources.list.d/docker.list' >/dev/null 12 | sudo apt-get update -y >/dev/null 13 | sudo apt-get install -y linux-aws linux-headers-aws linux-image-aws >/dev/null 14 | sudo apt-get install -y docker-engine awscli rlwrap jq >/dev/null 15 | 16 | echo "configuring awscli" 17 | 18 | region=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq .region -r) 19 | sudo aws configure set region "${region}" 20 | 21 | echo "installation complete" 22 | 23 | # Allow SSH access from other quorum nodes for multi-region setups. 24 | cat /home/ubuntu/.ssh/tunnel.pub >> /home/ubuntu/.ssh/authorized_keys 25 | chmod 600 /home/ubuntu/.ssh/tunnel.pub 26 | chmod 600 /home/ubuntu/.ssh/tunnel 27 | -------------------------------------------------------------------------------- /terraform/scripts/provision/start-single-region-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | cluster_type=$(cat cluster-type) 6 | 7 | if [[ $cluster_type == "multi-region" ]] 8 | then 9 | echo "not starting this multi-region cluster yet. all regions need to be provisioned, tunnels need to be set up, and then ./start can be run on each node" 10 | else 11 | echo "starting this single-region cluster" 12 | ./start 13 | fi 14 | -------------------------------------------------------------------------------- /terraform/secrets/ec2-keys/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Consensys/quorum-aws/55acc518916e894d7184a7a16fef689fac99308c/terraform/secrets/ec2-keys/.gitkeep -------------------------------------------------------------------------------- /terraform/secrets/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | access_key = "... REPLACE ME ..." 2 | secret_key = "... REPLACE ME ..."" 3 | -------------------------------------------------------------------------------- /terraform/state/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Consensys/quorum-aws/55acc518916e894d7184a7a16fef689fac99308c/terraform/state/.gitkeep -------------------------------------------------------------------------------- /terraform/terraform.tfvars: -------------------------------------------------------------------------------- 1 | secrets/terraform.tfvars -------------------------------------------------------------------------------- /terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "env" { 2 | description = "Name of the environment" 3 | # this value is set by bin/.bin/env-wrapper 4 | } 5 | variable "access_key" { 6 | description = "AWS access key" 7 | # 8 | # NOTE: this value comes from terraform.tfvars 9 | # 10 | } 11 | variable "secret_key" { 12 | description = "AWS secret key" 13 | # 14 | # NOTE: this value comes from terraform.tfvars 15 | # 16 | } 17 | variable "ssh_keypair_prefix" { 18 | description = "Prefix of the SSH keypair for logging into instances, to be followed by the env (cluster) name" 19 | default = "quorum-" 20 | } 21 | variable "tunnel_keypair_name" { 22 | description = "Name of the SSH keypair for tunneling between quorum nodes within AWS" 23 | default = "tunnel" 24 | } 25 | variable "project" { 26 | default = "quorum-cluster" 27 | } 28 | variable "instance_types" { 29 | default = { 30 | quorum = "m3.large" 31 | # TODO: other types of nodes, like for metrics or rpc (tx) senders 32 | } 33 | } 34 | variable "volume_types" { 35 | default = { 36 | quorum = "gp2" 37 | } 38 | } 39 | variable "volume_sizes" { 40 | default = { 41 | quorum = "50" 42 | } 43 | } 44 | variable "local_datadir_root" { 45 | default = "cluster-data" 46 | } 47 | variable "remote_user" { 48 | default = "ubuntu" 49 | } 50 | variable "remote_homedir" { 51 | default = "/home/ubuntu" 52 | } 53 | variable "precreated_global_quorum_iam_instance_profile_id" { 54 | default = "quorum-cluster.global.ecrAccessor" # This was the output of provisioning the permanent "global" IAM infrastructure. 55 | } 56 | 57 | # 58 | # Variables that can be overridden by multi-region settings (see: multi-region-vars/*.tfvars): 59 | # 60 | variable "multi_region" { 61 | description = "Whether the cluster spans AWS regions. This is a boolean represented as a string until TF supports first-class booleans." 62 | default = "0" 63 | } 64 | variable "aws_region" { 65 | description = "AWS region" 66 | default = "us-east-1" 67 | } 68 | variable "subnet_azs" { 69 | type = "list" 70 | default = ["us-east-1b", "us-east-1c", "us-east-1d"] 71 | } 72 | variable "total_cluster_size" { 73 | default = 3 # Number of quorum nodes in this cluster across all aws regions. 74 | } 75 | variable "num_instances" { 76 | default = 3 # This is less than total_cluster_size when we are running a multi-region setup. 77 | } 78 | variable "first_geth_id" { 79 | description = "Amount to add to the instance's count.index to calculate gethId. This is primarily for supporting multi-region clusters." 80 | default = 1 81 | } 82 | # 83 | # [End of variables overridden by multi-region settings.] 84 | # 85 | --------------------------------------------------------------------------------