├── e2e-tests ├── .gitignore ├── local-testnet-singlechain-start.sh ├── go_rly_ics_path_config.json ├── local-testnet-singlechain.sh ├── local-testnet-singlechain-restart.sh ├── test_utils.go ├── local-testnet-singlechain-cometbft.sh ├── local-testnet-singlechain-setup.sh ├── main_test.go ├── local-testnet-debug.sh └── local-testnet.sh ├── cometmock ├── utils │ ├── txs.go │ ├── blocks.go │ ├── votes.go │ └── state.go ├── abci_client │ ├── counterparty.go │ └── time_handler.go ├── rpc_server │ ├── rpc_server.go │ ├── websocket.go │ └── routes.go ├── storage │ └── storage.go └── main.go ├── Makefile ├── Dockerfile ├── .github ├── workflows │ ├── e2e-tests.yml │ └── docker-publish.yml └── ISSUE_TEMPLATE │ └── feature_request.md ├── .gitignore ├── .mergify.yml ├── Dockerfile.test ├── go.mod ├── genesis.json ├── README.md └── LICENSE /e2e-tests/.gitignore: -------------------------------------------------------------------------------- 1 | # Scripts that are generated by the testnet setup 2 | start_apps.sh 3 | start_cometmock.sh 4 | cometmock_log -------------------------------------------------------------------------------- /e2e-tests/local-testnet-singlechain-start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## Starts the testnet, assuming that the scripts generated by local-testnet-singlechain-setup.sh 4 | ## are already present in the current directory. 5 | 6 | COMETMOCK_ARGS=$1 7 | 8 | ./start_apps.sh 9 | ./start_cometmock.sh "$1" -------------------------------------------------------------------------------- /e2e-tests/go_rly_ics_path_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "src": { 3 | "chain-id": "consumer", 4 | "client-id": "07-tendermint-0" 5 | }, 6 | "dst": { 7 | "chain-id": "provider", 8 | "client-id": "07-tendermint-0" 9 | }, 10 | "src-channel-filter": { 11 | "rule": "", 12 | "channel-list": [] 13 | } 14 | } -------------------------------------------------------------------------------- /cometmock/utils/txs.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | 6 | cmttypes "github.com/cometbft/cometbft/types" 7 | ) 8 | 9 | // Contains returns true if txs contains tx, false otherwise. 10 | func Contains(txs cmttypes.Txs, tx cmttypes.Tx) bool { 11 | for _, ttx := range txs { 12 | if bytes.Equal([]byte(ttx), []byte(tx)) { 13 | return true 14 | } 15 | } 16 | return false 17 | } 18 | -------------------------------------------------------------------------------- /cometmock/utils/blocks.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "github.com/cometbft/cometbft/types" 4 | 5 | func GetBlockIdFromBlock(block *types.Block) (*types.BlockID, error) { 6 | partSet, err := block.MakePartSet(2) 7 | if err != nil { 8 | return nil, err 9 | } 10 | 11 | partSetHeader := partSet.Header() 12 | blockID := types.BlockID{ 13 | Hash: block.Hash(), 14 | PartSetHeader: partSetHeader, 15 | } 16 | return &blockID, nil 17 | } 18 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | install: 2 | go install ./cometmock 3 | 4 | test-locally: 5 | go test -timeout 600s -p 1 ./e2e-tests -test.v 6 | 7 | test-docker: 8 | # Build the Docker image 9 | docker build -f Dockerfile.test -t cometmock-test . 10 | 11 | # Start a container and execute the test command inside 12 | docker rm cometmock-test-instance || true 13 | docker run --name cometmock-test-instance --workdir /CometMock cometmock-test go test -p 1 -timeout 600s ./e2e-tests -test.v -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.21-alpine 2 | 3 | ENV PACKAGES curl make git libc-dev bash gcc linux-headers 4 | RUN apk add --no-cache $PACKAGES 5 | 6 | ENV CGO_ENABLED=0 7 | ENV GOOS=linux 8 | ENV GOFLAGS="-buildvcs=false" 9 | 10 | # cache gomodules for cometmock 11 | ADD ./go.mod /go.mod 12 | ADD ./go.sum /go.sum 13 | RUN go mod download 14 | 15 | # Add CometMock and install it 16 | ADD . /CometMock 17 | WORKDIR /CometMock 18 | RUN go build -o /usr/local/bin/cometmock ./cometmock 19 | -------------------------------------------------------------------------------- /e2e-tests/local-testnet-singlechain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## This script sets up the local testnet and starts it. 4 | ## To run this, both the application binary and cometmock must be installed. 5 | set -eux 6 | 7 | parent_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P ) 8 | pushd "$parent_path" 9 | 10 | BINARY_NAME=$1 11 | 12 | COMETMOCK_ARGS=$2 13 | 14 | # set up the net 15 | ./local-testnet-singlechain-setup.sh $BINARY_NAME "$COMETMOCK_ARGS" 16 | ./local-testnet-singlechain-start.sh -------------------------------------------------------------------------------- /.github/workflows/e2e-tests.yml: -------------------------------------------------------------------------------- 1 | name: Automated Tests 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - v0.38.x 7 | - v0.37.x 8 | - v0.34.x 9 | pull_request: 10 | branches: 11 | - main 12 | - v0.38.x 13 | - v0.37.x 14 | - v0.34.x 15 | jobs: 16 | Automated_Tests: 17 | runs-on: ubuntu-latest 18 | steps: 19 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 20 | - uses: actions/checkout@v4 21 | - name: Make test 22 | run: make test-docker 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # If you prefer the allow list template instead of the deny list, see community template: 2 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore 3 | # 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.exe~ 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | !Dockerfile.test 14 | 15 | # Output of the go coverage tool, specifically when used with LiteIDE 16 | *.out 17 | 18 | # Dependency directories (remove the comment below to include it) 19 | # vendor/ 20 | 21 | # Go workspace file 22 | go.work 23 | 24 | .vscode 25 | 26 | **/__debug_bin 27 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /e2e-tests/local-testnet-singlechain-restart.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # After the testnet was started, this script can restart it. 4 | # It does so by killing the existing testnet, 5 | # overwriting the node home directories with backups made 6 | # right after initializatio, and then starting the testnet again. 7 | 8 | 9 | BINARY_NAME=$1 10 | 11 | set -eux 12 | 13 | ROOT_DIR=${HOME}/nodes/provider 14 | BACKUP_DIR=${ROOT_DIR}_bkup 15 | 16 | if [ -z "$BINARY_NAME" ]; then 17 | echo "Usage: $0 [cometmock_args]" 18 | exit 1 19 | fi 20 | 21 | # Kill the testnet 22 | pkill -f ^$BINARY_NAME &> /dev/null || true 23 | pkill -f ^cometmock &> /dev/null || true 24 | 25 | # Restore the backup 26 | rm -rf ${ROOT_DIR} 27 | cp -r ${BACKUP_DIR} ${ROOT_DIR} -------------------------------------------------------------------------------- /.mergify.yml: -------------------------------------------------------------------------------- 1 | defaults: 2 | actions: 3 | backport: 4 | assignees: 5 | - "{{ author }}" 6 | 7 | pull_request_rules: 8 | - name: Backport patches to the v0.38.x branch 9 | conditions: 10 | - base=main 11 | - label=A:backport/v0.38.x 12 | actions: 13 | backport: 14 | branches: 15 | - v0.38.x 16 | - name: Backport patches to the v0.37.x branch 17 | conditions: 18 | - base=main 19 | - label=A:backport/v0.37.x 20 | actions: 21 | backport: 22 | branches: 23 | - v0.37.x 24 | - name: Backport patches to the v0.34.x branch 25 | conditions: 26 | - base=main 27 | - label=A:backport/v0.34.x 28 | actions: 29 | backport: 30 | branches: 31 | - v0.34.x 32 | -------------------------------------------------------------------------------- /cometmock/abci_client/counterparty.go: -------------------------------------------------------------------------------- 1 | package abci_client 2 | 3 | import ( 4 | abciclient "github.com/cometbft/cometbft/abci/client" 5 | "github.com/cometbft/cometbft/types" 6 | ) 7 | 8 | // AbciCounterpartyClient is a wrapper around the ABCI client that is used to connect to the abci server. 9 | // We keep extra information: 10 | // * the address of the app 11 | // * whether the app is alive 12 | // * the priv validator associated with that app (i.e. its private key) 13 | type AbciCounterpartyClient struct { 14 | Client abciclient.Client 15 | NetworkAddress string 16 | ValidatorAddress string 17 | PrivValidator types.PrivValidator 18 | } 19 | 20 | // NewAbciCounterpartyClient creates a new AbciCounterpartyClient. 21 | func NewAbciCounterpartyClient(client abciclient.Client, networkAddress, validatorAddress string, privValidator types.PrivValidator) *AbciCounterpartyClient { 22 | return &AbciCounterpartyClient{ 23 | Client: client, 24 | NetworkAddress: networkAddress, 25 | ValidatorAddress: validatorAddress, 26 | PrivValidator: privValidator, 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /cometmock/utils/votes.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "time" 5 | 6 | cmttypes "github.com/cometbft/cometbft/proto/tendermint/types" 7 | "github.com/cometbft/cometbft/types" 8 | ) 9 | 10 | // MakeVote creates a signed vote. 11 | // Adapted from https://github.com/cometbft/cometbft/blob/9267594e0a17c01cc4a97b399ada5eaa8a734db5/internal/test/vote.go#L10. 12 | func MakeVote( 13 | val types.PrivValidator, // PrivValidator is the validator that will sign the vote. 14 | chainID string, 15 | valIndex int32, 16 | height int64, 17 | round int32, 18 | step int, // StepType is the step in the consensus process, see https://github.com/cometbft/cometbft/blob/9267594e0a17c01cc4a97b399ada5eaa8a734db5/proto/tendermint/types/types.pb.go#L68 19 | blockID types.BlockID, 20 | time time.Time, 21 | ) (*types.Vote, error) { 22 | pubKey, err := val.GetPubKey() 23 | if err != nil { 24 | return nil, err 25 | } 26 | 27 | v := &types.Vote{ 28 | ValidatorAddress: pubKey.Address(), 29 | ValidatorIndex: valIndex, 30 | Height: height, 31 | Round: round, 32 | Type: cmttypes.SignedMsgType(step), 33 | BlockID: blockID, 34 | Timestamp: time, 35 | } 36 | 37 | vpb := v.ToProto() 38 | if err := val.SignVote(chainID, vpb); err != nil { 39 | return nil, err 40 | } 41 | 42 | v.Signature = vpb.Signature 43 | return v, nil 44 | } 45 | -------------------------------------------------------------------------------- /Dockerfile.test: -------------------------------------------------------------------------------- 1 | # import simd from ibc-go 2 | FROM ghcr.io/cosmos/simapp:0.50.0-rc.1 AS simapp-builder 3 | 4 | FROM golang:1.21-alpine as cometmock-builder 5 | 6 | ENV PACKAGES curl make git libc-dev bash gcc linux-headers 7 | RUN apk add --no-cache $PACKAGES 8 | 9 | ENV CGO_ENABLED=0 10 | ENV GOOS=linux 11 | ENV GOFLAGS="-buildvcs=false" 12 | 13 | # cache gomodules for cometmock 14 | ADD ./go.mod /go.mod 15 | ADD ./go.sum /go.sum 16 | RUN go mod download 17 | 18 | # Add CometMock and install it 19 | ADD . /CometMock 20 | WORKDIR /CometMock 21 | RUN go build -o /usr/local/bin/cometmock ./cometmock 22 | 23 | RUN apk update 24 | RUN apk add --no-cache which iputils procps-ng tmux net-tools htop jq gcompat 25 | 26 | FROM golang:1.21-alpine as test-env 27 | 28 | ENV PACKAGES curl make git libc-dev bash gcc linux-headers 29 | RUN apk add --no-cache $PACKAGES 30 | RUN apk update 31 | RUN apk add --no-cache which iputils procps-ng tmux net-tools htop jq gcompat 32 | 33 | ENV CGO_ENABLED=0 34 | ENV GOOS=linux 35 | ENV GOFLAGS="-buildvcs=false" 36 | 37 | ADD ./go.mod /go.mod 38 | ADD ./go.sum /go.sum 39 | RUN go mod download 40 | 41 | ADD ./e2e-tests /CometMock/e2e-tests 42 | 43 | COPY --from=simapp-builder /usr/bin/simd /usr/local/bin/simd 44 | 45 | WORKDIR /CometMock/e2e-tests 46 | RUN /CometMock/e2e-tests/local-testnet-singlechain-setup.sh simd "" 47 | 48 | COPY --from=cometmock-builder /usr/local/bin/cometmock /usr/local/bin/cometmock -------------------------------------------------------------------------------- /cometmock/rpc_server/rpc_server.go: -------------------------------------------------------------------------------- 1 | package rpc_server 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "io/ioutil" 9 | "net/http" 10 | 11 | "github.com/cometbft/cometbft/libs/log" 12 | rpcserver "github.com/cometbft/cometbft/rpc/jsonrpc/server" 13 | "github.com/cometbft/cometbft/rpc/jsonrpc/types" 14 | ) 15 | 16 | func StartRPCServer(listenAddr string, logger log.Logger, config *rpcserver.Config) { 17 | mux := http.NewServeMux() 18 | logger.Info("Starting RPC HTTP server on", "address", listenAddr) 19 | rpcLogger := logger.With("module", "rpc-server") 20 | wmLogger := rpcLogger.With("protocol", "websocket") 21 | wm := rpcserver.NewWebsocketManager(Routes, 22 | rpcserver.ReadLimit(config.MaxBodyBytes), 23 | ) 24 | wm.SetLogger(wmLogger) 25 | mux.HandleFunc("/websocket", wm.WebsocketHandler) 26 | rpcserver.RegisterRPCFuncs(mux, Routes, rpcLogger) 27 | listener, err := rpcserver.Listen( 28 | listenAddr, 29 | config.MaxOpenConnections, 30 | ) 31 | if err != nil { 32 | panic(err) 33 | } 34 | 35 | var rootHandler http.Handler = mux 36 | if err := rpcserver.Serve( 37 | listener, 38 | ExtraLogHandler(rootHandler, rpcLogger), 39 | rpcLogger, 40 | config, 41 | ); err != nil { 42 | logger.Error("Error serving server", "err", err) 43 | panic(err) 44 | } 45 | } 46 | 47 | func StartRPCServerWithDefaultConfig(listenAddr string, logger log.Logger) { 48 | StartRPCServer(listenAddr, logger, rpcserver.DefaultConfig()) 49 | } 50 | 51 | // RecoverAndLogHandler wraps an HTTP handler, adding error logging. 52 | // If the inner function panics, the outer function recovers, logs, sends an 53 | // HTTP 500 error response. 54 | func ExtraLogHandler(handler http.Handler, logger log.Logger) http.Handler { 55 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 56 | if r.Body != nil { 57 | body, err := io.ReadAll(r.Body) 58 | if err != nil { 59 | logger.Error("failed to read request body", "err", err) 60 | } else { 61 | logger.Debug("served RPC HTTP request", 62 | "body", string(body), 63 | ) 64 | } 65 | 66 | r.Body = ioutil.NopCloser(bytes.NewBuffer(body)) 67 | } 68 | 69 | handler.ServeHTTP(w, r) 70 | }) 71 | } 72 | 73 | // WriteRPCResponseHTTP marshals res as JSON (with indent) and writes it to w. 74 | func WriteRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error { 75 | return writeRPCResponseHTTP(w, []httpHeader{}, res...) 76 | } 77 | 78 | type httpHeader struct { 79 | name string 80 | value string 81 | } 82 | 83 | func writeRPCResponseHTTP(w http.ResponseWriter, headers []httpHeader, res ...types.RPCResponse) error { 84 | var v interface{} 85 | if len(res) == 1 { 86 | v = res[0] 87 | } else { 88 | v = res 89 | } 90 | 91 | jsonBytes, err := json.Marshal(v) 92 | if err != nil { 93 | return fmt.Errorf("json marshal: %w", err) 94 | } 95 | w.Header().Set("Content-Type", "application/json") 96 | for _, header := range headers { 97 | w.Header().Set(header.name, header.value) 98 | } 99 | w.WriteHeader(200) 100 | _, err = w.Write(jsonBytes) 101 | return err 102 | } 103 | 104 | // WriteRPCResponseHTTPError marshals res as JSON (with indent) and writes it 105 | // to w. 106 | // 107 | // source: https://www.jsonrpc.org/historical/json-rpc-over-http.html 108 | func WriteRPCResponseHTTPError( 109 | w http.ResponseWriter, 110 | httpCode int, 111 | res types.RPCResponse, 112 | ) error { 113 | if res.Error == nil { 114 | panic("tried to write http error response without RPC error") 115 | } 116 | 117 | jsonBytes, err := json.Marshal(res) 118 | if err != nil { 119 | return fmt.Errorf("json marshal: %w", err) 120 | } 121 | 122 | w.Header().Set("Content-Type", "application/json") 123 | w.WriteHeader(httpCode) 124 | _, err = w.Write(jsonBytes) 125 | return err 126 | } 127 | -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Docker 2 | 3 | # This workflow uses actions that are not certified by GitHub. 4 | # They are provided by a third-party and are governed by 5 | # separate terms of service, privacy policy, and support 6 | # documentation. 7 | 8 | on: 9 | schedule: 10 | - cron: '38 20 * * *' 11 | push: 12 | branches: [ "main", "v0.38.x", "v0.37.x", "v0.34.x" ] 13 | # Publish semver tags as releases. 14 | tags: [ 'v*.*.*' ] 15 | pull_request: 16 | branches: [ "main", "v0.38.x", "v0.37.x", "v0.34.x" ] 17 | 18 | env: 19 | # Use docker.io for Docker Hub if empty 20 | REGISTRY: ghcr.io 21 | # github.repository as / 22 | IMAGE_NAME: ${{ github.repository }} 23 | 24 | 25 | jobs: 26 | build: 27 | 28 | runs-on: ubuntu-latest 29 | permissions: 30 | contents: read 31 | packages: write 32 | # This is used to complete the identity challenge 33 | # with sigstore/fulcio when running outside of PRs. 34 | id-token: write 35 | 36 | steps: 37 | - name: Checkout repository 38 | uses: actions/checkout@v3 39 | 40 | # Install the cosign tool except on PR 41 | # https://github.com/sigstore/cosign-installer 42 | - name: Install cosign 43 | if: github.event_name != 'pull_request' 44 | uses: sigstore/cosign-installer@6e04d228eb30da1757ee4e1dd75a0ec73a653e06 #v3.1.1 45 | with: 46 | cosign-release: 'v2.1.1' 47 | 48 | # Workaround: https://github.com/docker/build-push-action/issues/461 49 | - name: Setup Docker buildx 50 | uses: docker/setup-buildx-action@79abd3f86f79a9d68a23c75a09a9a85889262adf 51 | 52 | 53 | # Login against a Docker registry except on PR 54 | # https://github.com/docker/login-action 55 | - name: Log into registry ${{ env.REGISTRY }} 56 | if: github.event_name != 'pull_request' 57 | uses: docker/login-action@28218f9b04b4f3f62068d7b6ce6ca5b26e35336c 58 | with: 59 | registry: ${{ env.REGISTRY }} 60 | username: ${{ github.actor }} 61 | password: ${{ secrets.GITHUB_TOKEN }} 62 | 63 | # Extract metadata (tags, labels) for Docker 64 | # https://github.com/docker/metadata-action 65 | - name: Extract Docker metadata 66 | id: meta 67 | uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 68 | with: 69 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 70 | tags: | 71 | type=raw,value=latest,enable=${{github.ref == 'refs/heads/main'}} 72 | type=semver,pattern=v{{major}}.{{minor}} 73 | type=semver,pattern={{version}},value=v${{ inputs.tags }},enable=${{ inputs.tags != '' }} 74 | flavor: | 75 | latest=false 76 | 77 | # Build and push Docker image with Buildx (don't push on PR) 78 | # https://github.com/docker/build-push-action 79 | - name: Build and push Docker image 80 | id: build-and-push 81 | uses: docker/build-push-action@ac9327eae2b366085ac7f6a2d02df8aa8ead720a 82 | with: 83 | context: . 84 | push: ${{ github.event_name != 'pull_request' }} 85 | tags: ${{ steps.meta.outputs.tags }} 86 | labels: ${{ steps.meta.outputs.labels }} 87 | cache-from: type=gha 88 | cache-to: type=gha,mode=max 89 | platforms: linux/amd64,linux/arm64 90 | 91 | 92 | # Sign the resulting Docker image digest except on PRs. 93 | # This will only write to the public Rekor transparency log when the Docker 94 | # repository is public to avoid leaking data. If you would like to publish 95 | # transparency data even for private images, pass --force to cosign below. 96 | # https://github.com/sigstore/cosign 97 | - name: Sign the published Docker image 98 | if: ${{ github.event_name != 'pull_request' }} 99 | env: 100 | # https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable 101 | TAGS: ${{ steps.meta.outputs.tags }} 102 | DIGEST: ${{ steps.build-and-push.outputs.digest }} 103 | # This step uses the identity token to provision an ephemeral certificate 104 | # against the sigstore community Fulcio instance. 105 | run: echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST} 106 | -------------------------------------------------------------------------------- /cometmock/abci_client/time_handler.go: -------------------------------------------------------------------------------- 1 | package abci_client 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | // A TimeHandler is responsible for 9 | // deciding the timestamps of blocks. 10 | // It will be called by AbciClient.RunBlock 11 | // to decide on a block time. 12 | // It may decide the time based on any number of factors, 13 | // and the parameters of its methods might expand over time as needed. 14 | // The TimeHandler does not have a way to decide the time of the first block, 15 | // which is expected to be done externally, e.g. from the Genesis. 16 | type TimeHandler interface { 17 | // CONTRACT: TimeHandler.GetBlockTime will be called 18 | // precisely once for each block after the first. 19 | // It returns the timestamp of the next block. 20 | GetBlockTime(lastBlockTimestamp time.Time) time.Time 21 | 22 | // AdvanceTime advances the timestamp of all following blocks by 23 | // the given duration. 24 | // The duration needs to be non-negative. 25 | // It returns the timestamp that the next block would have if it 26 | // was produced now. 27 | AdvanceTime(duration time.Duration) time.Time 28 | } 29 | 30 | // The SystemClockTimeHandler uses the system clock 31 | // to decide the timestamps of blocks. 32 | // It will return the system time + offset for each block. 33 | // The offset is calculated by the initial timestamp 34 | // + the sum of all durations passed to AdvanceTime. 35 | type SystemClockTimeHandler struct { 36 | // The offset to add to the system time. 37 | curOffset time.Duration 38 | 39 | // A mutex that ensures that there are no concurrent calls 40 | // to AdvanceTime 41 | mutex sync.Mutex 42 | } 43 | 44 | func NewSystemClockTimeHandler(initialTimestamp time.Time) *SystemClockTimeHandler { 45 | return &SystemClockTimeHandler{ 46 | curOffset: time.Since(initialTimestamp), 47 | } 48 | } 49 | 50 | func (s *SystemClockTimeHandler) GetBlockTime(lastBlockTimestamp time.Time) time.Time { 51 | return time.Now().Add(s.curOffset) 52 | } 53 | 54 | func (s *SystemClockTimeHandler) AdvanceTime(duration time.Duration) time.Time { 55 | s.mutex.Lock() 56 | defer s.mutex.Unlock() 57 | 58 | s.curOffset += duration 59 | return time.Now().Add(s.curOffset) 60 | } 61 | 62 | var _ TimeHandler = (*SystemClockTimeHandler)(nil) 63 | 64 | // The FixedBlockTimeHandler uses a fixed duration 65 | // to advance the timestamp of a block compared to the previous block. 66 | // The block timestamps therefore do not at all depend on the system time, 67 | // but on the time of the previous block. 68 | type FixedBlockTimeHandler struct { 69 | // The fixed duration to add to the last block time 70 | // when deciding the next block timestamp. 71 | blockTime time.Duration 72 | 73 | // The offset to add to the last block time. 74 | // This will be cleared after each block, 75 | // but since the block time of the next block depends 76 | // on the last block, 77 | // this will shift the timestamps of all future blocks. 78 | curBlockOffset time.Duration 79 | 80 | // A mutex that ensures that GetBlockTime and AdvanceTime 81 | // are not called concurrently. 82 | // Otherwise, the block offset might be put into a broken state. 83 | mutex sync.Mutex 84 | 85 | // The timestamp of the last block we produced. 86 | // If this is used before the first block is produced, 87 | // it will be the zero time. 88 | lastBlockTimestamp time.Time 89 | } 90 | 91 | func NewFixedBlockTimeHandler(blockTime time.Duration) *FixedBlockTimeHandler { 92 | return &FixedBlockTimeHandler{ 93 | blockTime: blockTime, 94 | curBlockOffset: 0, 95 | } 96 | } 97 | 98 | func (f *FixedBlockTimeHandler) GetBlockTime(lastBlockTimestamp time.Time) time.Time { 99 | f.mutex.Lock() 100 | defer f.mutex.Unlock() 101 | 102 | res := lastBlockTimestamp.Add(f.blockTime + f.curBlockOffset) 103 | f.curBlockOffset = 0 104 | f.lastBlockTimestamp = res 105 | return res 106 | } 107 | 108 | // FixedBlockTimeHandler.AdvanceTime will only return the correct next block time 109 | // after GetBlockTime has been called once, but it will 110 | // still advance the time correctly before that - only the output will be wrong. 111 | func (f *FixedBlockTimeHandler) AdvanceTime(duration time.Duration) time.Time { 112 | f.mutex.Lock() 113 | defer f.mutex.Unlock() 114 | 115 | f.curBlockOffset += duration 116 | return f.lastBlockTimestamp.Add(f.blockTime + f.curBlockOffset) 117 | } 118 | 119 | var _ TimeHandler = (*FixedBlockTimeHandler)(nil) 120 | -------------------------------------------------------------------------------- /cometmock/utils/state.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | 7 | abci "github.com/cometbft/cometbft/abci/types" 8 | cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" 9 | "github.com/cometbft/cometbft/types" 10 | ) 11 | 12 | // mostly copy-pasted/adjusted from https://github.com/p-offtermatt/cometbft/blob/ph/make-public/state/execution.go#L486 13 | // BuildExtendedCommitInfo populates an ABCI extended commit from the 14 | // corresponding CometBFT extended commit ec, using the stored validator set 15 | // from ec. It requires ec to include the original precommit votes along with 16 | // the vote extensions from the last commit. 17 | // 18 | // For heights below the initial height, for which we do not have the required 19 | // data, it returns an empty record. 20 | // 21 | // Assumes that the commit signatures are sorted according to validator index. 22 | func BuildExtendedCommitInfo(ec *types.ExtendedCommit, valSet *types.ValidatorSet, initialHeight int64, ap types.ABCIParams) abci.ExtendedCommitInfo { 23 | if ec.Height < initialHeight { 24 | // There are no extended commits for heights below the initial height. 25 | return abci.ExtendedCommitInfo{} 26 | } 27 | 28 | var ( 29 | ecSize = ec.Size() 30 | valSetLen = len(valSet.Validators) 31 | ) 32 | 33 | // Ensure that the size of the validator set in the extended commit matches 34 | // the size of the validator set in the state store. 35 | if ecSize != valSetLen { 36 | panic(fmt.Errorf( 37 | "extended commit size (%d) does not match validator set length (%d) at height %d\n\n%v\n\n%v", 38 | ecSize, valSetLen, ec.Height, ec.ExtendedSignatures, valSet.Validators, 39 | )) 40 | } 41 | 42 | votes := make([]abci.ExtendedVoteInfo, ecSize) 43 | for i, val := range valSet.Validators { 44 | ecs := ec.ExtendedSignatures[i] 45 | 46 | // Absent signatures have empty validator addresses, but otherwise we 47 | // expect the validator addresses to be the same. 48 | if ecs.BlockIDFlag != types.BlockIDFlagAbsent && !bytes.Equal(ecs.ValidatorAddress, val.Address) { 49 | panic(fmt.Errorf("validator address of extended commit signature in position %d (%s) does not match the corresponding validator's at height %d (%s)", 50 | i, ecs.ValidatorAddress, ec.Height, val.Address, 51 | )) 52 | } 53 | 54 | // Check if vote extensions were enabled during the commit's height: ec.Height. 55 | // ec is the commit from the previous height, so if extensions were enabled 56 | // during that height, we ensure they are present and deliver the data to 57 | // the proposer. If they were not enabled during this previous height, we 58 | // will not deliver extension data. 59 | if err := ecs.EnsureExtension(ap.VoteExtensionsEnabled(ec.Height)); err != nil { 60 | panic(fmt.Errorf("commit at height %d has problems with vote extension data; err %w", ec.Height, err)) 61 | } 62 | 63 | votes[i] = abci.ExtendedVoteInfo{ 64 | Validator: types.TM2PB.Validator(val), 65 | BlockIdFlag: cmtproto.BlockIDFlag(ecs.BlockIDFlag), 66 | VoteExtension: ecs.Extension, 67 | ExtensionSignature: ecs.ExtensionSignature, 68 | } 69 | } 70 | 71 | return abci.ExtendedCommitInfo{ 72 | Round: ec.Round, 73 | Votes: votes, 74 | } 75 | } 76 | 77 | func BuildLastCommitInfo(block *types.Block, lastValSet *types.ValidatorSet, initialHeight int64) abci.CommitInfo { 78 | if block.Height == initialHeight { 79 | // there is no last commit for the initial height. 80 | // return an empty value. 81 | return abci.CommitInfo{} 82 | } 83 | 84 | var ( 85 | commitSize = block.LastCommit.Size() 86 | valSetLen = len(lastValSet.Validators) 87 | ) 88 | 89 | // ensure that the size of the validator set in the last commit matches 90 | // the size of the validator set in the state store. 91 | if commitSize != valSetLen { 92 | panic(fmt.Sprintf( 93 | "commit size (%d) doesn't match validator set length (%d) at height %d\n\n%v\n\n%v", 94 | commitSize, valSetLen, block.Height, block.LastCommit.Signatures, lastValSet.Validators, 95 | )) 96 | } 97 | 98 | votes := make([]abci.VoteInfo, block.LastCommit.Size()) 99 | for i, val := range lastValSet.Validators { 100 | commitSig := block.LastCommit.Signatures[i] 101 | votes[i] = abci.VoteInfo{ 102 | Validator: types.TM2PB.Validator(val), 103 | BlockIdFlag: cmtproto.BlockIDFlag(commitSig.BlockIDFlag), 104 | } 105 | } 106 | 107 | return abci.CommitInfo{ 108 | Round: block.LastCommit.Round, 109 | Votes: votes, 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /cometmock/rpc_server/websocket.go: -------------------------------------------------------------------------------- 1 | package rpc_server 2 | 3 | // File adapted from https://github.com/cometbft/cometbft/blob/v0.38.x/rpc/core/events.go 4 | 5 | import ( 6 | "context" 7 | "errors" 8 | "fmt" 9 | "time" 10 | 11 | cmtpubsub "github.com/cometbft/cometbft/libs/pubsub" 12 | cmtquery "github.com/cometbft/cometbft/libs/pubsub/query" 13 | ctypes "github.com/cometbft/cometbft/rpc/core/types" 14 | rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" 15 | "github.com/informalsystems/CometMock/cometmock/abci_client" 16 | ) 17 | 18 | const ( 19 | // maxQueryLength is the maximum length of a query string that will be 20 | // accepted. This is just a safety check to avoid outlandish queries. 21 | maxQueryLength = 512 22 | SubscribeTimeout = 10 * time.Second 23 | SubscriptionBufferSize = 100 24 | ) 25 | 26 | // Subscribe for events via WebSocket. 27 | // More: https://docs.cometbft.com/v0.38.x/rpc/#/Websocket/subscribe 28 | func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { 29 | addr := ctx.RemoteAddr() 30 | 31 | client := abci_client.GlobalClient 32 | 33 | client.Logger.Info("Subscribe to query", "remote", addr, "query", query) 34 | 35 | q, err := cmtquery.New(query) 36 | if err != nil { 37 | return nil, fmt.Errorf("failed to parse query: %w", err) 38 | } 39 | 40 | subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) 41 | defer cancel() 42 | 43 | sub, err := client.EventBus.Subscribe(subCtx, addr, q, SubscriptionBufferSize) 44 | if err != nil { 45 | return nil, err 46 | } 47 | 48 | closeIfSlow := false 49 | 50 | // Capture the current ID, since it can change in the future. 51 | subscriptionID := ctx.JSONReq.ID 52 | go func() { 53 | for { 54 | select { 55 | case msg := <-sub.Out(): 56 | var ( 57 | resultEvent = &ctypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} 58 | resp = rpctypes.NewRPCSuccessResponse(subscriptionID, resultEvent) 59 | ) 60 | writeCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 61 | defer cancel() 62 | if err := ctx.WSConn.WriteRPCResponse(writeCtx, resp); err != nil { 63 | client.Logger.Info("Can't write response (slow client)", 64 | "to", addr, "subscriptionID", subscriptionID, "err", err) 65 | 66 | if closeIfSlow { 67 | var ( 68 | err = errors.New("subscription was canceled (reason: slow client)") 69 | resp = rpctypes.RPCServerError(subscriptionID, err) 70 | ) 71 | if !ctx.WSConn.TryWriteRPCResponse(resp) { 72 | client.Logger.Info("Can't write response (slow client)", 73 | "to", addr, "subscriptionID", subscriptionID, "err", err) 74 | } 75 | return 76 | } 77 | } 78 | case <-sub.Canceled(): 79 | if sub.Err() != cmtpubsub.ErrUnsubscribed { 80 | var reason string 81 | if sub.Err() == nil { 82 | reason = "CometBFT exited" 83 | } else { 84 | reason = sub.Err().Error() 85 | } 86 | var ( 87 | err = fmt.Errorf("subscription was canceled (reason: %s)", reason) 88 | resp = rpctypes.RPCServerError(subscriptionID, err) 89 | ) 90 | if !ctx.WSConn.TryWriteRPCResponse(resp) { 91 | client.Logger.Info("Can't write response (slow client)", 92 | "to", addr, "subscriptionID", subscriptionID, "err", err) 93 | } 94 | } 95 | return 96 | } 97 | } 98 | }() 99 | 100 | return &ctypes.ResultSubscribe{}, nil 101 | } 102 | 103 | // Unsubscribe from events via WebSocket. 104 | // More: https://docs.cometbft.com/v0.38.x/rpc/#/Websocket/unsubscribe 105 | func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { 106 | addr := ctx.RemoteAddr() 107 | abci_client.GlobalClient.Logger.Info("Unsubscribe from query", "remote", addr, "query", query) 108 | q, err := cmtquery.New(query) 109 | if err != nil { 110 | return nil, fmt.Errorf("failed to parse query: %w", err) 111 | } 112 | err = abci_client.GlobalClient.EventBus.Unsubscribe(context.Background(), addr, q) 113 | if err != nil { 114 | return nil, err 115 | } 116 | return &ctypes.ResultUnsubscribe{}, nil 117 | } 118 | 119 | // UnsubscribeAll from all events via WebSocket. 120 | // More: https://docs.cometbft.com/v0.38.x/rpc/#/Websocket/unsubscribe_all 121 | func UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { 122 | addr := ctx.RemoteAddr() 123 | abci_client.GlobalClient.Logger.Info("Unsubscribe from all", "remote", addr) 124 | err := abci_client.GlobalClient.EventBus.UnsubscribeAll(context.Background(), addr) 125 | if err != nil { 126 | return nil, err 127 | } 128 | return &ctypes.ResultUnsubscribe{}, nil 129 | } 130 | -------------------------------------------------------------------------------- /cometmock/storage/storage.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | 7 | abcitypes "github.com/cometbft/cometbft/abci/types" 8 | cometstate "github.com/cometbft/cometbft/state" 9 | "github.com/cometbft/cometbft/types" 10 | ) 11 | 12 | // Storage is an interface for storing blocks, commits and states by height. 13 | // All methods are thread-safe. 14 | type Storage interface { 15 | // GetBlock returns the block at a given height. 16 | GetBlock(height int64) (*types.Block, error) 17 | 18 | // GetCommit returns the commit at a given height. 19 | GetCommit(height int64) (*types.Commit, error) 20 | 21 | // GetState returns the state at a given height. This is the state after 22 | // applying the block at that height. 23 | GetState(height int64) (*cometstate.State, error) 24 | 25 | // GetResponses returns the ABCI responses from a given height. 26 | GetResponses(height int64) (*abcitypes.ResponseFinalizeBlock, error) 27 | 28 | // LockBeforeStateUpdate locks the storage for state update. 29 | LockBeforeStateUpdate() 30 | 31 | // UnlockAfterStateUpdate unlocks the storage for state update. 32 | UnlockAfterStateUpdate() 33 | 34 | // UpdateStores updates the storage with the given block, commit, state and responses. 35 | // It is assumed that the block, commit, state and responses are all from the same height. 36 | // If they are not, the storage will be in an inconsistent state. 37 | // If the storage is already updated with the given height, the storage will overwrite the existing data. 38 | // This method is *not* thread-safe. 39 | // Before calling this, the caller should call LockForStateUpdate(). 40 | // After calling this, the caller should call UnlockForStateUpdate(). 41 | UpdateStores( 42 | height int64, 43 | block *types.Block, 44 | commit *types.Commit, 45 | state *cometstate.State, 46 | responses *abcitypes.ResponseFinalizeBlock, 47 | ) error 48 | } 49 | 50 | // MapStorage is a simple in-memory implementation of Storage. 51 | type MapStorage struct { 52 | // a mutex that gets locked while the state is being updated, 53 | // so that a) updates do not interleave and b) reads do not happen while 54 | // the state is being updated, i.e. two stores might give bogus data. 55 | stateUpdateMutex sync.RWMutex 56 | blocks map[int64]*types.Block 57 | commits map[int64]*types.Commit 58 | states map[int64]*cometstate.State 59 | responses map[int64]*abcitypes.ResponseFinalizeBlock 60 | } 61 | 62 | // ensure MapStorage implements Storage 63 | var _ Storage = (*MapStorage)(nil) 64 | 65 | func (m *MapStorage) insertBlock(height int64, block *types.Block) error { 66 | if m.blocks == nil { 67 | m.blocks = make(map[int64]*types.Block) 68 | } 69 | m.blocks[height] = block 70 | return nil 71 | } 72 | 73 | func (m *MapStorage) GetBlock(height int64) (*types.Block, error) { 74 | m.stateUpdateMutex.RLock() 75 | defer m.stateUpdateMutex.RUnlock() 76 | if m.blocks == nil { 77 | m.blocks = make(map[int64]*types.Block) 78 | } 79 | if block, ok := m.blocks[height]; ok { 80 | return block, nil 81 | } 82 | return nil, fmt.Errorf("block for height %v not found", height) 83 | } 84 | 85 | func (m *MapStorage) insertCommit(height int64, commit *types.Commit) error { 86 | if m.commits == nil { 87 | m.commits = make(map[int64]*types.Commit) 88 | } 89 | 90 | m.commits[height] = commit 91 | return nil 92 | } 93 | 94 | func (m *MapStorage) GetCommit(height int64) (*types.Commit, error) { 95 | m.stateUpdateMutex.RLock() 96 | defer m.stateUpdateMutex.RUnlock() 97 | if m.commits == nil { 98 | m.commits = make(map[int64]*types.Commit) 99 | } 100 | 101 | if commit, ok := m.commits[height]; ok { 102 | return commit, nil 103 | } 104 | return nil, fmt.Errorf("commit for height %v not found", height) 105 | } 106 | 107 | func (m *MapStorage) insertState(height int64, state *cometstate.State) error { 108 | if m.states == nil { 109 | m.states = make(map[int64]*cometstate.State) 110 | } 111 | 112 | m.states[height] = state 113 | return nil 114 | } 115 | 116 | func (m *MapStorage) GetState(height int64) (*cometstate.State, error) { 117 | m.stateUpdateMutex.RLock() 118 | defer m.stateUpdateMutex.RUnlock() 119 | if m.states == nil { 120 | m.states = make(map[int64]*cometstate.State) 121 | } 122 | 123 | if state, ok := m.states[height]; ok { 124 | return state, nil 125 | } 126 | return nil, fmt.Errorf("state for height %v not found", height) 127 | } 128 | 129 | func (m *MapStorage) insertResponses( 130 | height int64, 131 | responses *abcitypes.ResponseFinalizeBlock, 132 | ) error { 133 | if m.responses == nil { 134 | m.responses = make(map[int64]*abcitypes.ResponseFinalizeBlock) 135 | } 136 | 137 | m.responses[height] = responses 138 | return nil 139 | } 140 | 141 | func (m *MapStorage) GetResponses(height int64) (*abcitypes.ResponseFinalizeBlock, error) { 142 | m.stateUpdateMutex.RLock() 143 | defer m.stateUpdateMutex.RUnlock() 144 | if m.responses == nil { 145 | m.responses = make(map[int64]*abcitypes.ResponseFinalizeBlock) 146 | } 147 | 148 | if responses, ok := m.responses[height]; ok { 149 | return responses, nil 150 | } 151 | return nil, fmt.Errorf("responses for height %v not found", height) 152 | } 153 | 154 | func (m *MapStorage) LockBeforeStateUpdate() { 155 | m.stateUpdateMutex.Lock() 156 | } 157 | 158 | func (m *MapStorage) UnlockAfterStateUpdate() { 159 | m.stateUpdateMutex.Unlock() 160 | } 161 | 162 | func (m *MapStorage) UpdateStores(height int64, block *types.Block, commit *types.Commit, state *cometstate.State, responses *abcitypes.ResponseFinalizeBlock) error { 163 | m.insertBlock(height, block) 164 | m.insertCommit(height, commit) 165 | m.insertState(height, state) 166 | m.insertResponses(height, responses) 167 | return nil 168 | } 169 | -------------------------------------------------------------------------------- /e2e-tests/test_utils.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "math/big" 8 | "os/exec" 9 | "strconv" 10 | "strings" 11 | "time" 12 | ) 13 | 14 | // From the output of the AbciInfo command, extract the latest block height. 15 | // The json bytes should look e.g. like this: 16 | // {"jsonrpc":"2.0","id":1,"result":{"response":{"data":"interchain-security-p","last_block_height":"2566","last_block_app_hash":"R4Q3Si7+t7TIidl2oTHcQRDNEz+lP0IDWhU5OI89psg="}}} 17 | func extractHeightFromInfo(jsonBytes []byte) (int, error) { 18 | // Use a generic map to represent the JSON structure 19 | var data map[string]interface{} 20 | 21 | if err := json.Unmarshal(jsonBytes, &data); err != nil { 22 | return -1, fmt.Errorf("failed to unmarshal JSON %s \n error was %v", string(jsonBytes), err) 23 | } 24 | 25 | // Navigate the map and use type assertions to get the last_block_height 26 | result, ok := data["result"].(map[string]interface{}) 27 | if !ok { 28 | return -1, fmt.Errorf("failed to navigate abci_info output structure trying to access result: json was %s", string(jsonBytes)) 29 | } 30 | 31 | response, ok := result["response"].(map[string]interface{}) 32 | if !ok { 33 | return -1, fmt.Errorf("failed to navigate abci_info output structure trying to access response: json was %s", string(jsonBytes)) 34 | } 35 | 36 | lastBlockHeight, ok := response["last_block_height"].(string) 37 | if !ok { 38 | return -1, fmt.Errorf("failed to navigate abci_info output structure trying to access last_block_height: json was %s", string(jsonBytes)) 39 | } 40 | 41 | return strconv.Atoi(lastBlockHeight) 42 | } 43 | 44 | // Queries simd for the latest block. 45 | func QueryBlock() (string, error) { 46 | // execute the query command 47 | cmd := exec.Command("bash", "-c", "simd q block --type height 0 --output json --node tcp://127.0.0.1:22331 --output json") 48 | out, err := runCommandWithOutput(cmd) 49 | if err != nil { 50 | return "", fmt.Errorf("error running query command: %v", err) 51 | } 52 | 53 | return out, nil 54 | } 55 | 56 | type BlockInfo struct { 57 | Header struct { 58 | Height string `json:"height"` 59 | Time string `json:"time"` 60 | } `json:"header"` 61 | } 62 | 63 | func GetHeightFromBlock(blockString string) (int, error) { 64 | var block BlockInfo 65 | err := json.Unmarshal([]byte(blockString), &block) 66 | if err != nil { 67 | return 0, err 68 | } 69 | 70 | res, err := strconv.Atoi(block.Header.Height) 71 | if err != nil { 72 | return 0, err 73 | } 74 | 75 | return res, nil 76 | } 77 | 78 | func GetTimeFromBlock(blockBytes string) (time.Time, error) { 79 | var block BlockInfo 80 | err := json.Unmarshal([]byte(blockBytes), &block) 81 | if err != nil { 82 | return time.Time{}, err 83 | } 84 | 85 | res, err := time.Parse(time.RFC3339, block.Header.Time) 86 | if err != nil { 87 | return time.Time{}, err 88 | } 89 | 90 | return res, nil 91 | } 92 | 93 | func GetHeightAndTime() (int, time.Time, error) { 94 | blockBytes, err := QueryBlock() 95 | if err != nil { 96 | return 0, time.Time{}, err 97 | } 98 | 99 | height, err := GetHeightFromBlock(blockBytes) 100 | if err != nil { 101 | return 0, time.Time{}, err 102 | } 103 | 104 | timestamp, err := GetTimeFromBlock(blockBytes) 105 | if err != nil { 106 | return 0, time.Time{}, err 107 | } 108 | 109 | return height, timestamp, nil 110 | } 111 | 112 | // Queries the size of the community pool. 113 | // For this, it will just check the number of tokens of the first denom in the community pool. 114 | func getCommunityPoolSize() (*big.Int, error) { 115 | // execute the query command 116 | cmd := exec.Command("bash", "-c", "simd q distribution community-pool --output json --node tcp://127.0.0.1:22331 | jq -r '.pool[0].amount'") 117 | out, err := runCommandWithOutput(cmd) 118 | if err != nil { 119 | return big.NewInt(-1), fmt.Errorf("error running query command: %v", err) 120 | } 121 | 122 | res := new(big.Int) 123 | 124 | res, ok := res.SetString(strings.TrimSpace(out), 10) 125 | if !ok { 126 | return big.NewInt(-1), fmt.Errorf("error parsing community pool size: %v", err) 127 | } 128 | return res, err 129 | } 130 | 131 | func sendToCommunityPool(amount int, sender string) error { 132 | // execute the tx command 133 | stringCmd := fmt.Sprintf("simd tx distribution fund-community-pool %vstake --chain-id provider --from %v-key --keyring-backend test --node tcp://127.0.0.1:22331 --home ~/nodes/provider/provider-%v -y", amount, sender, sender) 134 | cmd := exec.Command("bash", "-c", stringCmd) 135 | _, err := runCommandWithOutput(cmd) 136 | return err 137 | } 138 | 139 | func runCommandWithOutput(cmd *exec.Cmd) (string, error) { 140 | var stdout, stderr bytes.Buffer 141 | cmd.Stdout = &stdout 142 | cmd.Stderr = &stderr 143 | 144 | err := cmd.Run() 145 | if err != nil { 146 | return "", fmt.Errorf("error running command: %v\nstdout: %s\nstderr: %s", err, stdout.String(), stderr.String()) 147 | } 148 | 149 | return stdout.String(), nil 150 | } 151 | 152 | func AdvanceTime(duration time.Duration) error { 153 | stringCmd := fmt.Sprintf("curl -H 'Content-Type: application/json' -H 'Accept:application/json' --data '{\"jsonrpc\":\"2.0\",\"method\":\"advance_time\",\"params\":{\"duration_in_seconds\": \"%v\"},\"id\":1}' 127.0.0.1:22331", duration.Seconds()) 154 | 155 | cmd := exec.Command("bash", "-c", stringCmd) 156 | _, err := runCommandWithOutput(cmd) 157 | return err 158 | } 159 | 160 | func AdvanceBlocks(numBlocks int) error { 161 | stringCmd := fmt.Sprintf("curl -H 'Content-Type: application/json' -H 'Accept:application/json' --data '{\"jsonrpc\":\"2.0\",\"method\":\"advance_blocks\",\"params\":{\"num_blocks\": \"%v\"},\"id\":1}' 127.0.0.1:22331", numBlocks) 162 | 163 | cmd := exec.Command("bash", "-c", stringCmd) 164 | _, err := runCommandWithOutput(cmd) 165 | return err 166 | } 167 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/informalsystems/CometMock 2 | 3 | go 1.21 4 | 5 | toolchain go1.21.2 6 | 7 | require ( 8 | github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df 9 | github.com/cometbft/cometbft v0.38.0 10 | github.com/cometbft/cometbft-db v0.7.0 11 | github.com/cosmos/cosmos-sdk v0.50.0-rc.1 12 | github.com/urfave/cli/v2 v2.25.7 13 | ) 14 | 15 | require ( 16 | cosmossdk.io/api v0.7.1 // indirect 17 | cosmossdk.io/collections v0.4.0 // indirect 18 | cosmossdk.io/core v0.11.0 // indirect 19 | cosmossdk.io/depinject v1.0.0-alpha.4 // indirect 20 | cosmossdk.io/errors v1.0.0 // indirect 21 | cosmossdk.io/log v1.2.1 // indirect 22 | cosmossdk.io/math v1.1.3-rc.1 // indirect 23 | cosmossdk.io/store v1.0.0-rc.0 // indirect 24 | cosmossdk.io/x/tx v0.10.0 // indirect 25 | filippo.io/edwards25519 v1.0.0 // indirect 26 | github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect 27 | github.com/99designs/keyring v1.2.1 // indirect 28 | github.com/DataDog/zstd v1.5.5 // indirect 29 | github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect 30 | github.com/cenkalti/backoff/v4 v4.1.3 // indirect 31 | github.com/cockroachdb/errors v1.11.1 // indirect 32 | github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect 33 | github.com/cockroachdb/pebble v0.0.0-20230817233644-564b068800e0 // indirect 34 | github.com/cockroachdb/redact v1.1.5 // indirect 35 | github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect 36 | github.com/cosmos/btcutil v1.0.5 // indirect 37 | github.com/cosmos/cosmos-db v1.0.0 // indirect 38 | github.com/cosmos/cosmos-proto v1.0.0-beta.3 // indirect 39 | github.com/cosmos/go-bip39 v1.0.0 // indirect 40 | github.com/cosmos/gogogateway v1.2.0 // indirect 41 | github.com/cosmos/iavl v1.0.0-rc.1 // indirect 42 | github.com/cosmos/ics23/go v0.10.0 // indirect 43 | github.com/cosmos/ledger-cosmos-go v0.13.0 // indirect 44 | github.com/danieljoos/wincred v1.1.2 // indirect 45 | github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect 46 | github.com/dvsekhvalnov/jose2go v1.5.0 // indirect 47 | github.com/emicklei/dot v1.5.0 // indirect 48 | github.com/fatih/color v1.15.0 // indirect 49 | github.com/felixge/httpsnoop v1.0.2 // indirect 50 | github.com/fsnotify/fsnotify v1.6.0 // indirect 51 | github.com/getsentry/sentry-go v0.23.0 // indirect 52 | github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect 53 | github.com/gogo/googleapis v1.4.1 // indirect 54 | github.com/gogo/protobuf v1.3.2 // indirect 55 | github.com/gorilla/handlers v1.5.1 // indirect 56 | github.com/gorilla/mux v1.8.0 // indirect 57 | github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect 58 | github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect 59 | github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect 60 | github.com/hashicorp/go-hclog v1.5.0 // indirect 61 | github.com/hashicorp/go-immutable-radix v1.3.1 // indirect 62 | github.com/hashicorp/go-metrics v0.5.1 // indirect 63 | github.com/hashicorp/go-plugin v1.4.10 // indirect 64 | github.com/hashicorp/golang-lru v1.0.2 // indirect 65 | github.com/hashicorp/hcl v1.0.0 // indirect 66 | github.com/hashicorp/yamux v0.1.1 // indirect 67 | github.com/hdevalence/ed25519consensus v0.1.0 // indirect 68 | github.com/huandu/skiplist v1.2.0 // indirect 69 | github.com/iancoleman/strcase v0.3.0 // indirect 70 | github.com/improbable-eng/grpc-web v0.15.0 // indirect 71 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 72 | github.com/kr/text v0.2.0 // indirect 73 | github.com/linxGnu/grocksdb v1.8.0 // indirect 74 | github.com/magiconair/properties v1.8.7 // indirect 75 | github.com/mattn/go-colorable v0.1.13 // indirect 76 | github.com/mattn/go-isatty v0.0.19 // indirect 77 | github.com/mitchellh/go-testing-interface v1.14.1 // indirect 78 | github.com/mitchellh/mapstructure v1.5.0 // indirect 79 | github.com/mtibben/percent v0.2.1 // indirect 80 | github.com/oklog/run v1.1.0 // indirect 81 | github.com/pelletier/go-toml/v2 v2.0.8 // indirect 82 | github.com/rogpeppe/go-internal v1.11.0 // indirect 83 | github.com/rs/cors v1.10.1 // indirect 84 | github.com/rs/zerolog v1.30.0 // indirect 85 | github.com/spf13/afero v1.9.5 // indirect 86 | github.com/spf13/cast v1.5.1 // indirect 87 | github.com/spf13/cobra v1.7.0 // indirect 88 | github.com/spf13/jwalterweatherman v1.1.0 // indirect 89 | github.com/spf13/pflag v1.0.5 // indirect 90 | github.com/spf13/viper v1.16.0 // indirect 91 | github.com/subosito/gotenv v1.4.2 // indirect 92 | github.com/tendermint/go-amino v0.16.0 // indirect 93 | github.com/tidwall/btree v1.6.0 // indirect 94 | github.com/zondax/hid v0.9.1 // indirect 95 | github.com/zondax/ledger-go v0.14.1 // indirect 96 | golang.org/x/term v0.13.0 // indirect 97 | google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect 98 | google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect 99 | gopkg.in/ini.v1 v1.67.0 // indirect 100 | gopkg.in/yaml.v2 v2.4.0 // indirect 101 | gotest.tools/v3 v3.5.0 // indirect 102 | nhooyr.io/websocket v1.8.6 // indirect 103 | pgregory.net/rapid v1.1.0 // indirect 104 | sigs.k8s.io/yaml v1.3.0 // indirect 105 | ) 106 | 107 | require ( 108 | github.com/beorn7/perks v1.0.1 // indirect 109 | github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect 110 | github.com/cespare/xxhash v1.1.0 // indirect 111 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 112 | github.com/cosmos/gogoproto v1.4.11 // indirect 113 | github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect 114 | github.com/davecgh/go-spew v1.1.1 // indirect 115 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect 116 | github.com/dgraph-io/badger/v2 v2.2007.4 // indirect 117 | github.com/dgraph-io/ristretto v0.1.1 // indirect 118 | github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect 119 | github.com/dustin/go-humanize v1.0.1 // indirect 120 | github.com/go-kit/kit v0.13.0 // indirect 121 | github.com/go-kit/log v0.2.1 // indirect 122 | github.com/go-logfmt/logfmt v0.6.0 // indirect 123 | github.com/golang/glog v1.1.0 // indirect 124 | github.com/golang/protobuf v1.5.3 // indirect 125 | github.com/golang/snappy v0.0.4 // indirect 126 | github.com/google/btree v1.1.2 // indirect 127 | github.com/google/go-cmp v0.5.9 // indirect 128 | github.com/google/orderedcode v0.0.1 // indirect 129 | github.com/gorilla/websocket v1.5.0 // indirect 130 | github.com/jmhodges/levigo v1.0.0 // indirect 131 | github.com/klauspost/compress v1.16.7 // indirect 132 | github.com/kr/pretty v0.3.1 // indirect 133 | github.com/libp2p/go-buffer-pool v0.1.0 // indirect 134 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect 135 | github.com/oasisprotocol/curve25519-voi v0.0.0-20230110094441-db37f07504ce // indirect 136 | github.com/petermattis/goid v0.0.0-20230518223814-80aa455d8761 // indirect 137 | github.com/pkg/errors v0.9.1 // indirect 138 | github.com/pmezard/go-difflib v1.0.0 // indirect 139 | github.com/prometheus/client_golang v1.17.0 // indirect 140 | github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect 141 | github.com/prometheus/common v0.44.0 // indirect 142 | github.com/prometheus/procfs v0.11.1 // indirect 143 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect 144 | github.com/russross/blackfriday/v2 v2.1.0 // indirect 145 | github.com/sasha-s/go-deadlock v0.3.1 // indirect 146 | github.com/stretchr/testify v1.8.4 // indirect 147 | github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect 148 | github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect 149 | github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect 150 | go.etcd.io/bbolt v1.3.7 // indirect 151 | golang.org/x/crypto v0.14.0 // indirect 152 | golang.org/x/exp v0.0.0-20231005195138-3e424a577f31 // indirect 153 | golang.org/x/net v0.15.0 // indirect 154 | golang.org/x/sys v0.13.0 // indirect 155 | golang.org/x/text v0.13.0 // indirect 156 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb // indirect 157 | google.golang.org/grpc v1.58.2 // indirect 158 | google.golang.org/protobuf v1.31.0 // indirect 159 | gopkg.in/yaml.v3 v3.0.1 // indirect 160 | ) 161 | -------------------------------------------------------------------------------- /genesis.json: -------------------------------------------------------------------------------- 1 | { 2 | "genesis_time": "2022-09-27T14:43:24.802423659Z", 3 | "chain_id": "tendermock", 4 | "initial_height": "1", 5 | "consensus_params": { 6 | "block": { 7 | "max_bytes": "22020096", 8 | "max_gas": "-1" 9 | }, 10 | "evidence": { 11 | "max_age_num_blocks": "100000", 12 | "max_age_duration": "172800", 13 | "max_bytes": "1048576" 14 | }, 15 | "validator": { 16 | "pub_key_types": [ 17 | "ed25519" 18 | ] 19 | }, 20 | "version": {} 21 | }, 22 | "app_hash": "", 23 | "app_state": { 24 | "auth": { 25 | "params": { 26 | "max_memo_characters": "256", 27 | "tx_sig_limit": "7", 28 | "tx_size_cost_per_byte": "10", 29 | "sig_verify_cost_ed25519": "590", 30 | "sig_verify_cost_secp256k1": "1000" 31 | }, 32 | "accounts": [ 33 | { 34 | "@type": "/cosmos.auth.v1beta1.BaseAccount", 35 | "address": "cosmos134r9s82qv8fprz3y7fw5lv40yuvsh285vxev02", 36 | "pub_key": null, 37 | "account_number": "0", 38 | "sequence": "0" 39 | }, 40 | { 41 | "@type": "/cosmos.auth.v1beta1.BaseAccount", 42 | "address": "cosmos153rpdnp3jcq4kpac8njlyf4gmf724hm6repu72", 43 | "pub_key": null, 44 | "account_number": "0", 45 | "sequence": "0" 46 | }, 47 | { 48 | "@type": "/cosmos.auth.v1beta1.BaseAccount", 49 | "address": "cosmos1x63y2p7wzsyf9ln0at56vdpe3x66jaf9qzh86t", 50 | "pub_key": null, 51 | "account_number": "0", 52 | "sequence": "0" 53 | } 54 | ] 55 | }, 56 | "authz": { 57 | "authorization": [] 58 | }, 59 | "bank": { 60 | "params": { 61 | "send_enabled": [], 62 | "default_send_enabled": true 63 | }, 64 | "balances": [ 65 | { 66 | "address": "cosmos1x63y2p7wzsyf9ln0at56vdpe3x66jaf9qzh86t", 67 | "coins": [ 68 | { 69 | "denom": "stake", 70 | "amount": "5000000000" 71 | } 72 | ] 73 | }, 74 | { 75 | "address": "cosmos134r9s82qv8fprz3y7fw5lv40yuvsh285vxev02", 76 | "coins": [ 77 | { 78 | "denom": "stake", 79 | "amount": "5000000000" 80 | } 81 | ] 82 | }, 83 | { 84 | "address": "cosmos153rpdnp3jcq4kpac8njlyf4gmf724hm6repu72", 85 | "coins": [ 86 | { 87 | "denom": "stake", 88 | "amount": "5000000000" 89 | } 90 | ] 91 | } 92 | ], 93 | "supply": [ 94 | { 95 | "denom": "stake", 96 | "amount": "15000000000" 97 | } 98 | ], 99 | "denom_metadata": [] 100 | }, 101 | "capability": { 102 | "index": "1", 103 | "owners": [] 104 | }, 105 | "crisis": { 106 | "constant_fee": { 107 | "denom": "stake", 108 | "amount": "1000" 109 | } 110 | }, 111 | "distribution": { 112 | "params": { 113 | "community_tax": "0.020000000000000000", 114 | "base_proposer_reward": "0.010000000000000000", 115 | "bonus_proposer_reward": "0.040000000000000000", 116 | "withdraw_addr_enabled": true 117 | }, 118 | "fee_pool": { 119 | "community_pool": [] 120 | }, 121 | "delegator_withdraw_infos": [], 122 | "previous_proposer": "", 123 | "outstanding_rewards": [], 124 | "validator_accumulated_commissions": [], 125 | "validator_historical_rewards": [], 126 | "validator_current_rewards": [], 127 | "delegator_starting_infos": [], 128 | "validator_slash_events": [] 129 | }, 130 | "evidence": { 131 | "evidence": [] 132 | }, 133 | "feegrant": { 134 | "allowances": [] 135 | }, 136 | "genutil": { 137 | "gen_txs": [ 138 | { 139 | "body": { 140 | "messages": [ 141 | { 142 | "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", 143 | "description": { 144 | "moniker": "node", 145 | "identity": "", 146 | "website": "", 147 | "security_contact": "", 148 | "details": "" 149 | }, 150 | "commission": { 151 | "rate": "0.100000000000000000", 152 | "max_rate": "0.200000000000000000", 153 | "max_change_rate": "0.010000000000000000" 154 | }, 155 | "min_self_delegation": "1", 156 | "delegator_address": "cosmos1x63y2p7wzsyf9ln0at56vdpe3x66jaf9qzh86t", 157 | "validator_address": "cosmosvaloper1x63y2p7wzsyf9ln0at56vdpe3x66jaf99krjkc", 158 | "pubkey": { 159 | "@type": "/cosmos.crypto.ed25519.PubKey", 160 | "key": "pZR7fq8nmVbyJaUhV9jvlzHOG01vJQjCHi8Pb5k8m/8=" 161 | }, 162 | "value": { 163 | "denom": "stake", 164 | "amount": "5000000000" 165 | } 166 | } 167 | ], 168 | "memo": "919555b1567d03bc0ff2a6a885cc9a3e8098db49@172.17.0.2:26656", 169 | "timeout_height": "0", 170 | "extension_options": [], 171 | "non_critical_extension_options": [] 172 | }, 173 | "auth_info": { 174 | "signer_infos": [ 175 | { 176 | "public_key": { 177 | "@type": "/cosmos.crypto.secp256k1.PubKey", 178 | "key": "AsBiSVZ2Ht/+o6qgBrjlRbkfRw3sJDkb7ew1GedJCbsM" 179 | }, 180 | "mode_info": { 181 | "single": { 182 | "mode": "SIGN_MODE_DIRECT" 183 | } 184 | }, 185 | "sequence": "0" 186 | } 187 | ], 188 | "fee": { 189 | "amount": [], 190 | "gas_limit": "200000", 191 | "payer": "", 192 | "granter": "" 193 | } 194 | }, 195 | "signatures": [ 196 | "LT99RZPm6rpPsJ0ERVQkCnkI85pk57QYUkoPoTAYnX8QghrUFRIAIqFHx/SgxiRpq6XBl3hZTusNRgyqxweyNA==" 197 | ] 198 | } 199 | ] 200 | }, 201 | "gov": { 202 | "starting_proposal_id": "1", 203 | "deposits": [], 204 | "votes": [], 205 | "proposals": [], 206 | "deposit_params": { 207 | "min_deposit": [ 208 | { 209 | "denom": "stake", 210 | "amount": "10000000" 211 | } 212 | ], 213 | "max_deposit_period": "172800s" 214 | }, 215 | "voting_params": { 216 | "voting_period": "172800s" 217 | }, 218 | "tally_params": { 219 | "quorum": "0.334000000000000000", 220 | "threshold": "0.500000000000000000", 221 | "veto_threshold": "0.334000000000000000" 222 | } 223 | }, 224 | "mint": { 225 | "minter": { 226 | "inflation": "0.130000000000000000", 227 | "annual_provisions": "0.000000000000000000" 228 | }, 229 | "params": { 230 | "mint_denom": "stake", 231 | "inflation_rate_change": "0.130000000000000000", 232 | "inflation_max": "0.200000000000000000", 233 | "inflation_min": "0.070000000000000000", 234 | "goal_bonded": "0.670000000000000000", 235 | "blocks_per_year": "6311520" 236 | } 237 | }, 238 | "params": null, 239 | "slashing": { 240 | "params": { 241 | "signed_blocks_window": "100", 242 | "min_signed_per_window": "0.500000000000000000", 243 | "downtime_jail_duration": "600s", 244 | "slash_fraction_double_sign": "0.050000000000000000", 245 | "slash_fraction_downtime": "0.010000000000000000" 246 | }, 247 | "signing_infos": [], 248 | "missed_blocks": [] 249 | }, 250 | "staking": { 251 | "params": { 252 | "unbonding_time": "1814400s", 253 | "max_validators": 100, 254 | "max_entries": 7, 255 | "historical_entries": 10000, 256 | "bond_denom": "stake" 257 | }, 258 | "last_total_power": "0", 259 | "last_validator_powers": [], 260 | "validators": [], 261 | "delegations": [], 262 | "unbonding_delegations": [], 263 | "redelegations": [], 264 | "exported": false 265 | }, 266 | "upgrade": {}, 267 | "vesting": {} 268 | } 269 | } -------------------------------------------------------------------------------- /e2e-tests/local-testnet-singlechain-cometbft.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | BINARY_NAME=$1 5 | 6 | # User balance of stake tokens 7 | USER_COINS="100000000000stake" 8 | # Amount of stake tokens staked 9 | STAKE="100000000stake" 10 | # Node IP address 11 | NODE_IP="127.0.0.1" 12 | 13 | # Home directory 14 | HOME_DIR=$HOME 15 | 16 | # Validator moniker 17 | MONIKERS=("coordinator" "alice" "bob") 18 | LEAD_VALIDATOR_MONIKER="coordinator" 19 | 20 | PROV_NODES_ROOT_DIR=${HOME_DIR}/nodes/provider 21 | CONS_NODES_ROOT_DIR=${HOME_DIR}/nodes/consumer 22 | 23 | # Base port. Ports assigned after these ports sequentially by nodes. 24 | RPC_LADDR_BASEPORT=29170 25 | P2P_LADDR_BASEPORT=29180 26 | GRPC_LADDR_BASEPORT=29190 27 | NODE_ADDRESS_BASEPORT=29200 28 | PPROF_LADDR_BASEPORT=29210 29 | CLIENT_BASEPORT=29220 30 | 31 | # keeps a comma separated list of node addresses for provider and consumer 32 | PROVIDER_NODE_LISTEN_ADDR_STR="" 33 | CONSUMER_NODE_LISTEN_ADDR_STR="" 34 | 35 | # Strings that keep the homes of provider nodes and homes of consumer nodes 36 | PROV_NODES_HOME_STR="" 37 | CONS_NODES_HOME_STR="" 38 | 39 | PROVIDER_COMETMOCK_ADDR=tcp://$NODE_IP:22331 40 | CONSUMER_COMETMOCK_ADDR=tcp://$NODE_IP:22332 41 | 42 | # Clean start 43 | pkill -f ^$BINARY_NAME &> /dev/null || true 44 | pkill -f ^cometmock &> /dev/null || true 45 | sleep 1 46 | rm -rf ${PROV_NODES_ROOT_DIR} 47 | rm -rf ${CONS_NODES_ROOT_DIR} 48 | 49 | # Let lead validator create genesis file 50 | LEAD_VALIDATOR_PROV_DIR=${PROV_NODES_ROOT_DIR}/provider-${LEAD_VALIDATOR_MONIKER} 51 | LEAD_VALIDATOR_CONS_DIR=${CONS_NODES_ROOT_DIR}/consumer-${LEAD_VALIDATOR_MONIKER} 52 | LEAD_PROV_KEY=${LEAD_VALIDATOR_MONIKER}-key 53 | LEAD_PROV_LISTEN_ADDR=tcp://${NODE_IP}:${RPC_LADDR_BASEPORT} 54 | 55 | for index in "${!MONIKERS[@]}" 56 | do 57 | MONIKER=${MONIKERS[$index]} 58 | # validator key 59 | PROV_KEY=${MONIKER}-key 60 | 61 | # home directory of this validator on provider 62 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 63 | 64 | # home directory of this validator on consumer 65 | CONS_NODE_DIR=${CONS_NODES_ROOT_DIR}/consumer-${MONIKER} 66 | 67 | # Build genesis file and node directory structure 68 | $BINARY_NAME init $MONIKER --chain-id provider --home ${PROV_NODE_DIR} 69 | jq ".app_state.gov.params.voting_period = \"100000s\" | .app_state.staking.params.unbonding_time = \"86400s\" | .app_state.slashing.params.signed_blocks_window=\"1000\" " \ 70 | ${PROV_NODE_DIR}/config/genesis.json > \ 71 | ${PROV_NODE_DIR}/edited_genesis.json && mv ${PROV_NODE_DIR}/edited_genesis.json ${PROV_NODE_DIR}/config/genesis.json 72 | 73 | 74 | sleep 1 75 | 76 | # Create account keypair 77 | $BINARY_NAME keys add $PROV_KEY --home ${PROV_NODE_DIR} --keyring-backend test --output json > ${PROV_NODE_DIR}/${PROV_KEY}.json 2>&1 78 | sleep 1 79 | 80 | # copy genesis in, unless this validator is the lead validator 81 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 82 | cp ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json ${PROV_NODE_DIR}/config/genesis.json 83 | fi 84 | 85 | # Add stake to user 86 | PROV_ACCOUNT_ADDR=$(jq -r '.address' ${PROV_NODE_DIR}/${PROV_KEY}.json) 87 | $BINARY_NAME genesis add-genesis-account $PROV_ACCOUNT_ADDR $USER_COINS --home ${PROV_NODE_DIR} --keyring-backend test 88 | sleep 1 89 | 90 | # copy genesis out, unless this validator is the lead validator 91 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 92 | cp ${PROV_NODE_DIR}/config/genesis.json ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json 93 | fi 94 | 95 | PPROF_LADDR=${NODE_IP}:$(($PPROF_LADDR_BASEPORT + $index)) 96 | P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + $index)) 97 | 98 | # adjust configs of this node 99 | sed -i -r 's/timeout_commit = "5s"/timeout_commit = "3s"/g' ${PROV_NODE_DIR}/config/config.toml 100 | sed -i -r 's/timeout_propose = "3s"/timeout_propose = "1s"/g' ${PROV_NODE_DIR}/config/config.toml 101 | 102 | # make address book non-strict. necessary for this setup 103 | sed -i -r 's/addr_book_strict = true/addr_book_strict = false/g' ${PROV_NODE_DIR}/config/config.toml 104 | 105 | # avoid port double binding 106 | sed -i -r "s/pprof_laddr = \"localhost:6060\"/pprof_laddr = \"${PPROF_LADDR}\"/g" ${PROV_NODE_DIR}/config/config.toml 107 | 108 | # allow duplicate IP addresses (all nodes are on the same machine) 109 | sed -i -r 's/allow_duplicate_ip = false/allow_duplicate_ip = true/g' ${PROV_NODE_DIR}/config/config.toml 110 | done 111 | 112 | for MONIKER in "${MONIKERS[@]}" 113 | do 114 | # validator key 115 | PROV_KEY=${MONIKER}-key 116 | 117 | # home directory of this validator on provider 118 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 119 | 120 | # copy genesis in, unless this validator is the lead validator 121 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 122 | cp ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json* ${PROV_NODE_DIR}/config/genesis.json 123 | fi 124 | 125 | # Stake 1/1000 user's coins 126 | $BINARY_NAME genesis gentx $PROV_KEY $STAKE --chain-id provider --home ${PROV_NODE_DIR} --keyring-backend test --moniker $MONIKER 127 | sleep 1 128 | 129 | # Copy gentxs to the lead validator for possible future collection. 130 | # Obviously we don't need to copy the first validator's gentx to itself 131 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 132 | cp ${PROV_NODE_DIR}/config/gentx/* ${LEAD_VALIDATOR_PROV_DIR}/config/gentx/ 133 | fi 134 | done 135 | 136 | # Collect genesis transactions with lead validator 137 | $BINARY_NAME genesis collect-gentxs --home ${LEAD_VALIDATOR_PROV_DIR} --gentx-dir ${LEAD_VALIDATOR_PROV_DIR}/config/gentx/ 138 | 139 | sleep 1 140 | 141 | 142 | for index in "${!MONIKERS[@]}" 143 | do 144 | MONIKER=${MONIKERS[$index]} 145 | 146 | PERSISTENT_PEERS="" 147 | 148 | for peer_index in "${!MONIKERS[@]}" 149 | do 150 | if [ $index == $peer_index ]; then 151 | continue 152 | fi 153 | PEER_MONIKER=${MONIKERS[$peer_index]} 154 | 155 | PEER_PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${PEER_MONIKER} 156 | 157 | PEER_NODE_ID=$($BINARY_NAME tendermint show-node-id --home ${PEER_PROV_NODE_DIR}) 158 | 159 | PEER_P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + $peer_index)) 160 | PERSISTENT_PEERS="$PERSISTENT_PEERS,$PEER_NODE_ID@${NODE_IP}:${PEER_P2P_LADDR_PORT}" 161 | done 162 | 163 | # remove trailing comma from persistent peers 164 | PERSISTENT_PEERS=${PERSISTENT_PEERS:1} 165 | 166 | # validator key 167 | PROV_KEY=${MONIKER}-key 168 | 169 | # home directory of this validator on provider 170 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 171 | 172 | # home directory of this validator on consumer 173 | CONS_NODE_DIR=${PROV_NODES_ROOT_DIR}/consumer-${MONIKER} 174 | 175 | # copy genesis in, unless this validator is already the lead validator and thus it already has its genesis 176 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 177 | cp ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json ${PROV_NODE_DIR}/config/genesis.json 178 | fi 179 | 180 | # enable vote extensions by setting .consesnsus.params.abci.vote_extensions_enable_height to 1, but 1 currently crashes, see https://github.com/cosmos/cosmos-sdk/issues/18029#issuecomment-1754598598 181 | jq ".consensus.params.abci.vote_extensions_enable_height = \"2\"" ${PROV_NODE_DIR}/config/genesis.json > ${PROV_NODE_DIR}/edited_genesis.json && mv ${PROV_NODE_DIR}/edited_genesis.json ${PROV_NODE_DIR}/config/genesis.json 182 | 183 | RPC_LADDR_PORT=$(($RPC_LADDR_BASEPORT + $index)) 184 | P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + $index)) 185 | GRPC_LADDR_PORT=$(($GRPC_LADDR_BASEPORT + $index)) 186 | NODE_ADDRESS_PORT=$(($NODE_ADDRESS_BASEPORT + $index)) 187 | 188 | PROVIDER_NODE_LISTEN_ADDR_STR="${NODE_IP}:${NODE_ADDRESS_PORT},$PROVIDER_NODE_LISTEN_ADDR_STR" 189 | PROV_NODES_HOME_STR="${PROV_NODE_DIR},$PROV_NODES_HOME_STR" 190 | 191 | # Start gaia 192 | $BINARY_NAME start \ 193 | --home ${PROV_NODE_DIR} \ 194 | --p2p.persistent_peers ${PERSISTENT_PEERS} \ 195 | --rpc.laddr tcp://${NODE_IP}:${RPC_LADDR_PORT} \ 196 | --grpc.address ${NODE_IP}:${GRPC_LADDR_PORT} \ 197 | --address tcp://${NODE_IP}:${NODE_ADDRESS_PORT} \ 198 | --p2p.laddr tcp://${NODE_IP}:${P2P_LADDR_PORT} \ 199 | --grpc-web.enable=false &> ${PROV_NODE_DIR}/logs & 200 | 201 | sleep 5 202 | done 203 | 204 | PROVIDER_NODE_LISTEN_ADDR_STR=${PROVIDER_NODE_LISTEN_ADDR_STR::${#PROVIDER_NODE_LISTEN_ADDR_STR}-1} 205 | PROV_NODES_HOME_STR=${PROV_NODES_HOME_STR::${#PROV_NODES_HOME_STR}-1} 206 | 207 | echo "Testnet applications are set up!" 208 | 209 | sleep 5 -------------------------------------------------------------------------------- /cometmock/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "strings" 8 | "time" 9 | 10 | comet_abciclient "github.com/cometbft/cometbft/abci/client" 11 | cometlog "github.com/cometbft/cometbft/libs/log" 12 | "github.com/cometbft/cometbft/privval" 13 | "github.com/cometbft/cometbft/state" 14 | "github.com/cometbft/cometbft/types" 15 | genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" 16 | "github.com/informalsystems/CometMock/cometmock/abci_client" 17 | "github.com/informalsystems/CometMock/cometmock/rpc_server" 18 | "github.com/informalsystems/CometMock/cometmock/storage" 19 | "github.com/urfave/cli/v2" 20 | ) 21 | 22 | const version = "v0.38.x" 23 | 24 | // GetMockPVsFromNodeHomes returns a list of MockPVs, created with the priv_validator_key's from the specified node homes 25 | // We use MockPV because they do not do sanity checks that would e.g. prevent double signing 26 | func GetMockPVsFromNodeHomes(nodeHomes []string) []types.PrivValidator { 27 | mockPVs := make([]types.PrivValidator, 0) 28 | 29 | for _, nodeHome := range nodeHomes { 30 | privValidatorKeyFile := nodeHome + "/config/priv_validator_key.json" 31 | privValidatorStateFile := nodeHome + "/data/priv_validator_state.json" 32 | validator := privval.LoadFilePV(privValidatorKeyFile, privValidatorStateFile) 33 | 34 | mockPV := types.NewMockPVWithParams(validator.Key.PrivKey, false, false) 35 | mockPVs = append(mockPVs, mockPV) 36 | } 37 | 38 | return mockPVs 39 | } 40 | 41 | func main() { 42 | logger := cometlog.NewTMLogger(cometlog.NewSyncWriter(os.Stdout)) 43 | 44 | argumentString := "[--block-time=value] [--auto-tx=] [--block-production-interval=] [--starting-timestamp=] [--starting-timestamp-from-genesis=] " 45 | 46 | app := &cli.App{ 47 | Name: "cometmock", 48 | HideHelpCommand: true, 49 | Commands: []*cli.Command{ 50 | { 51 | Name: "version", 52 | Usage: "Print the version of cometmock", 53 | Action: func(c *cli.Context) error { 54 | fmt.Printf("%s\n", version) 55 | return nil 56 | }, 57 | }, 58 | }, 59 | Flags: []cli.Flag{ 60 | &cli.Int64Flag{ 61 | Name: "block-time", 62 | Usage: ` 63 | The number of milliseconds by which the block timestamp should advance from one block to the next. 64 | If this is <0, block timestamps will advance with the system time between the block productions. 65 | Even then, it is still possible to shift the block time from the system time, e.g. by setting an initial timestamp 66 | or by using the 'advance_time' endpoint.`, 67 | Value: -1, 68 | }, 69 | &cli.BoolFlag{ 70 | Name: "auto-tx", 71 | Usage: ` 72 | If this is true, transactions are included immediately 73 | after they are received via broadcast_tx, i.e. a new block 74 | is created when a BroadcastTx endpoint is hit. 75 | If this is false, transactions are still included 76 | upon creation of new blocks, but CometMock will not specifically produce 77 | a new block when a transaction is broadcast.`, 78 | Value: true, 79 | }, 80 | &cli.Int64Flag{ 81 | Name: "block-production-interval", 82 | Usage: ` 83 | Time to sleep between blocks in milliseconds. 84 | To disable block production, set to 0. 85 | This will not necessarily mean block production is this fast 86 | - it is just the sleep time between blocks. 87 | Setting this to a value < 0 disables automatic block production. 88 | In this case, blocks are only produced when instructed explicitly either by 89 | advancing blocks or broadcasting transactions.`, 90 | Value: 1000, 91 | }, 92 | &cli.Int64Flag{ 93 | Name: "starting-timestamp", 94 | Usage: ` 95 | The timestamp to use for the first block, given in milliseconds since the unix epoch. 96 | If this is < 0, the current system time is used. 97 | If this is >= 0, the system time is ignored and this timestamp is used for the first block instead.`, 98 | Value: -1, 99 | }, 100 | &cli.BoolFlag{ 101 | Name: "starting-timestamp-from-genesis", 102 | Usage: ` 103 | If this is true, it overrides the starting-timestamp, and instead 104 | bases the time for the first block on the genesis time, incremented by the block time 105 | or the system time between creating the genesis request and producing the first block.`, 106 | Value: false, 107 | }, 108 | }, 109 | ArgsUsage: argumentString, 110 | Action: func(c *cli.Context) error { 111 | if c.NArg() < 5 { 112 | return cli.Exit("Not enough arguments.\nUsage: "+argumentString, 1) 113 | } 114 | 115 | appAddresses := strings.Split(c.Args().Get(0), ",") 116 | genesisFile := c.Args().Get(1) 117 | cometMockListenAddress := c.Args().Get(2) 118 | nodeHomesString := c.Args().Get(3) 119 | connectionMode := c.Args().Get(4) 120 | 121 | if connectionMode != "socket" && connectionMode != "grpc" { 122 | return cli.Exit(fmt.Sprintf("Invalid connection mode: %s. Connection mode must be either 'socket' or 'grpc'.\nUsage: %s", connectionMode, argumentString), 1) 123 | } 124 | 125 | blockProductionInterval := c.Int("block-production-interval") 126 | fmt.Printf("Block production interval: %d\n", blockProductionInterval) 127 | 128 | // read node homes from args 129 | nodeHomes := strings.Split(nodeHomesString, ",") 130 | 131 | // get priv validators from node Homes 132 | privVals := GetMockPVsFromNodeHomes(nodeHomes) 133 | 134 | appGenesis, err := genutiltypes.AppGenesisFromFile(genesisFile) 135 | if err != nil { 136 | logger.Error(err.Error()) 137 | } 138 | 139 | genesisDoc, err := appGenesis.ToGenesisDoc() 140 | if err != nil { 141 | logger.Error(err.Error()) 142 | panic(err) 143 | } 144 | 145 | curState, err := state.MakeGenesisState(genesisDoc) 146 | if err != nil { 147 | logger.Error(err.Error()) 148 | panic(err) 149 | } 150 | 151 | // read starting timestamp from args 152 | // if starting timestamp should be taken from genesis, 153 | // read it from there 154 | var startingTime time.Time 155 | if c.Bool("starting-timestamp-from-genesis") { 156 | startingTime = genesisDoc.GenesisTime 157 | } else { 158 | if c.Int64("starting-timestamp") < 0 { 159 | startingTime = time.Now() 160 | } else { 161 | dur := time.Duration(c.Int64("starting-timestamp")) * time.Millisecond 162 | startingTime = time.Unix(0, 0).Add(dur) 163 | } 164 | } 165 | fmt.Printf("Starting time: %s\n", startingTime.Format(time.RFC3339)) 166 | 167 | // read block time from args 168 | blockTime := time.Duration(c.Int64("block-time")) * time.Millisecond 169 | fmt.Printf("Block time: %d\n", blockTime.Milliseconds()) 170 | 171 | clientMap := make(map[string]abci_client.AbciCounterpartyClient) 172 | 173 | for i, appAddress := range appAddresses { 174 | logger.Info("Connecting to client at %v", appAddress) 175 | 176 | var client comet_abciclient.Client 177 | if connectionMode == "grpc" { 178 | client = comet_abciclient.NewGRPCClient(appAddress, true) 179 | } else { 180 | client = comet_abciclient.NewSocketClient(appAddress, true) 181 | } 182 | client.SetLogger(logger) 183 | client.Start() 184 | 185 | privVal := privVals[i] 186 | 187 | pubkey, err := privVal.GetPubKey() 188 | if err != nil { 189 | logger.Error(err.Error()) 190 | panic(err) 191 | } 192 | validatorAddress := pubkey.Address() 193 | counterpartyClient := abci_client.NewAbciCounterpartyClient(client, appAddress, validatorAddress.String(), privVal) 194 | 195 | clientMap[validatorAddress.String()] = *counterpartyClient 196 | } 197 | 198 | var timeHandler abci_client.TimeHandler 199 | if blockTime < 0 { 200 | timeHandler = abci_client.NewSystemClockTimeHandler(startingTime) 201 | } else { 202 | timeHandler = abci_client.NewFixedBlockTimeHandler(blockTime) 203 | } 204 | 205 | abci_client.GlobalClient = abci_client.NewAbciClient( 206 | clientMap, 207 | logger, 208 | curState, 209 | &types.Block{}, 210 | &types.ExtendedCommit{}, 211 | &storage.MapStorage{}, 212 | timeHandler, 213 | true, 214 | ) 215 | 216 | abci_client.GlobalClient.AutoIncludeTx = c.Bool("auto-tx") 217 | fmt.Printf("Auto include tx: %t\n", abci_client.GlobalClient.AutoIncludeTx) 218 | 219 | // initialize chain 220 | err = abci_client.GlobalClient.SendInitChain(curState, genesisDoc) 221 | if err != nil { 222 | logger.Error(err.Error()) 223 | panic(err) 224 | } 225 | 226 | var firstBlockTime time.Time 227 | if blockTime < 0 { 228 | firstBlockTime = startingTime 229 | } else { 230 | firstBlockTime = startingTime.Add(blockTime) 231 | } 232 | 233 | // run an empty block 234 | err = abci_client.GlobalClient.RunBlockWithTime(firstBlockTime) 235 | if err != nil { 236 | logger.Error(err.Error()) 237 | panic(err) 238 | } 239 | 240 | go rpc_server.StartRPCServerWithDefaultConfig(cometMockListenAddress, logger) 241 | 242 | if blockProductionInterval > 0 { 243 | // produce blocks according to blockTime 244 | for { 245 | err := abci_client.GlobalClient.RunBlock() 246 | if err != nil { 247 | logger.Error(err.Error()) 248 | panic(err) 249 | } 250 | time.Sleep(time.Millisecond * time.Duration(blockProductionInterval)) 251 | } 252 | } else { 253 | // wait forever 254 | time.Sleep(time.Hour * 24 * 365 * 100) // 100 years 255 | } 256 | return nil 257 | }, 258 | } 259 | 260 | err := app.Run(os.Args) 261 | if err != nil { 262 | log.Fatal(err) 263 | } 264 | } 265 | -------------------------------------------------------------------------------- /e2e-tests/local-testnet-singlechain-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ## This script sets up the environment to run the single chain local testnet. 3 | ## Importantly, it does not actually start nodes (or cometmock) - instead, 4 | ## it will produce two scripts, start_apps.sh and start_cometmock.sh. 5 | ## After this script is done setting up, simply run these two scripts to run the 6 | ## testnet. 7 | ## The reason for this is that we want to be able to make the testnet setup 8 | ## differentiated from the actual run to allow for better caching in Docker. 9 | 10 | set -eux 11 | 12 | BINARY_NAME=$1 13 | 14 | # User balance of stake tokens 15 | USER_COINS="100000000000stake" 16 | # Amount of stake tokens staked 17 | STAKE="100000000stake" 18 | # Node IP address 19 | NODE_IP="127.0.0.1" 20 | 21 | # Home directory 22 | HOME_DIR=$HOME 23 | 24 | rm -rf ./start_apps.sh 25 | rm -rf ./start_cometmock.sh 26 | 27 | # Validator moniker 28 | MONIKERS=("coordinator" "alice" "bob") 29 | LEAD_VALIDATOR_MONIKER="coordinator" 30 | 31 | PROV_NODES_ROOT_DIR=${HOME_DIR}/nodes/provider 32 | CONS_NODES_ROOT_DIR=${HOME_DIR}/nodes/consumer 33 | 34 | # Base port. Ports assigned after these ports sequentially by nodes. 35 | RPC_LADDR_BASEPORT=29170 36 | P2P_LADDR_BASEPORT=29180 37 | GRPC_LADDR_BASEPORT=29190 38 | NODE_ADDRESS_BASEPORT=29200 39 | PPROF_LADDR_BASEPORT=29210 40 | CLIENT_BASEPORT=29220 41 | 42 | # keeps a comma separated list of node addresses for provider and consumer 43 | PROVIDER_NODE_LISTEN_ADDR_STR="" 44 | CONSUMER_NODE_LISTEN_ADDR_STR="" 45 | 46 | # Strings that keep the homes of provider nodes and homes of consumer nodes 47 | PROV_NODES_HOME_STR="" 48 | CONS_NODES_HOME_STR="" 49 | 50 | PROVIDER_COMETMOCK_ADDR=tcp://$NODE_IP:22331 51 | CONSUMER_COMETMOCK_ADDR=tcp://$NODE_IP:22332 52 | 53 | # Clean start 54 | pkill -f ^$BINARY_NAME &> /dev/null || true 55 | pkill -f ^cometmock &> /dev/null || true 56 | sleep 1 57 | rm -rf ${PROV_NODES_ROOT_DIR} 58 | rm -rf ${CONS_NODES_ROOT_DIR} 59 | 60 | # Let lead validator create genesis file 61 | LEAD_VALIDATOR_PROV_DIR=${PROV_NODES_ROOT_DIR}/provider-${LEAD_VALIDATOR_MONIKER} 62 | LEAD_VALIDATOR_CONS_DIR=${CONS_NODES_ROOT_DIR}/consumer-${LEAD_VALIDATOR_MONIKER} 63 | LEAD_PROV_KEY=${LEAD_VALIDATOR_MONIKER}-key 64 | LEAD_PROV_LISTEN_ADDR=tcp://${NODE_IP}:${RPC_LADDR_BASEPORT} 65 | 66 | for index in "${!MONIKERS[@]}" 67 | do 68 | MONIKER=${MONIKERS[$index]} 69 | # validator key 70 | PROV_KEY=${MONIKER}-key 71 | 72 | # home directory of this validator on provider 73 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 74 | 75 | # home directory of this validator on consumer 76 | CONS_NODE_DIR=${CONS_NODES_ROOT_DIR}/consumer-${MONIKER} 77 | 78 | # Build genesis file and node directory structure 79 | $BINARY_NAME init $MONIKER --chain-id provider --home ${PROV_NODE_DIR} 80 | jq ".app_state.gov.params.voting_period = \"100000s\" | .app_state.staking.params.unbonding_time = \"86400s\" | .app_state.slashing.params.signed_blocks_window=\"1000\" " \ 81 | ${PROV_NODE_DIR}/config/genesis.json > \ 82 | ${PROV_NODE_DIR}/edited_genesis.json && mv ${PROV_NODE_DIR}/edited_genesis.json ${PROV_NODE_DIR}/config/genesis.json 83 | 84 | 85 | sleep 1 86 | 87 | # Create account keypair 88 | $BINARY_NAME keys add $PROV_KEY --home ${PROV_NODE_DIR} --keyring-backend test --output json > ${PROV_NODE_DIR}/${PROV_KEY}.json 2>&1 89 | sleep 1 90 | 91 | # copy genesis in, unless this validator is the lead validator 92 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 93 | cp ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json ${PROV_NODE_DIR}/config/genesis.json 94 | fi 95 | 96 | # Add stake to user 97 | PROV_ACCOUNT_ADDR=$(jq -r '.address' ${PROV_NODE_DIR}/${PROV_KEY}.json) 98 | $BINARY_NAME genesis add-genesis-account $PROV_ACCOUNT_ADDR $USER_COINS --home ${PROV_NODE_DIR} --keyring-backend test 99 | sleep 1 100 | 101 | # copy genesis out, unless this validator is the lead validator 102 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 103 | cp ${PROV_NODE_DIR}/config/genesis.json ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json 104 | fi 105 | 106 | PPROF_LADDR=${NODE_IP}:$(($PPROF_LADDR_BASEPORT + $index)) 107 | P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + $index)) 108 | 109 | # adjust configs of this node 110 | sed -i -r 's/timeout_commit = "5s"/timeout_commit = "3s"/g' ${PROV_NODE_DIR}/config/config.toml 111 | sed -i -r 's/timeout_propose = "3s"/timeout_propose = "1s"/g' ${PROV_NODE_DIR}/config/config.toml 112 | 113 | # make address book non-strict. necessary for this setup 114 | sed -i -r 's/addr_book_strict = true/addr_book_strict = false/g' ${PROV_NODE_DIR}/config/config.toml 115 | 116 | # avoid port double binding 117 | sed -i -r "s/pprof_laddr = \"localhost:6060\"/pprof_laddr = \"${PPROF_LADDR}\"/g" ${PROV_NODE_DIR}/config/config.toml 118 | 119 | # allow duplicate IP addresses (all nodes are on the same machine) 120 | sed -i -r 's/allow_duplicate_ip = false/allow_duplicate_ip = true/g' ${PROV_NODE_DIR}/config/config.toml 121 | done 122 | 123 | for MONIKER in "${MONIKERS[@]}" 124 | do 125 | # validator key 126 | PROV_KEY=${MONIKER}-key 127 | 128 | # home directory of this validator on provider 129 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 130 | 131 | # copy genesis in, unless this validator is the lead validator 132 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 133 | cp ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json* ${PROV_NODE_DIR}/config/genesis.json 134 | fi 135 | 136 | # Stake 1/1000 user's coins 137 | $BINARY_NAME genesis gentx $PROV_KEY $STAKE --chain-id provider --home ${PROV_NODE_DIR} --keyring-backend test --moniker $MONIKER 138 | sleep 1 139 | 140 | # Copy gentxs to the lead validator for possible future collection. 141 | # Obviously we don't need to copy the first validator's gentx to itself 142 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 143 | cp ${PROV_NODE_DIR}/config/gentx/* ${LEAD_VALIDATOR_PROV_DIR}/config/gentx/ 144 | fi 145 | done 146 | 147 | # Collect genesis transactions with lead validator 148 | $BINARY_NAME genesis collect-gentxs --home ${LEAD_VALIDATOR_PROV_DIR} --gentx-dir ${LEAD_VALIDATOR_PROV_DIR}/config/gentx/ 149 | 150 | sleep 1 151 | 152 | START_COMMANDS="" 153 | for index in "${!MONIKERS[@]}" 154 | do 155 | MONIKER=${MONIKERS[$index]} 156 | 157 | PERSISTENT_PEERS="" 158 | 159 | for peer_index in "${!MONIKERS[@]}" 160 | do 161 | if [ $index == $peer_index ]; then 162 | continue 163 | fi 164 | PEER_MONIKER=${MONIKERS[$peer_index]} 165 | 166 | PEER_PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${PEER_MONIKER} 167 | 168 | PEER_NODE_ID=$($BINARY_NAME tendermint show-node-id --home ${PEER_PROV_NODE_DIR}) 169 | 170 | PEER_P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + $peer_index)) 171 | PERSISTENT_PEERS="$PERSISTENT_PEERS,$PEER_NODE_ID@${NODE_IP}:${PEER_P2P_LADDR_PORT}" 172 | done 173 | 174 | # remove trailing comma from persistent peers 175 | PERSISTENT_PEERS=${PERSISTENT_PEERS:1} 176 | 177 | # validator key 178 | PROV_KEY=${MONIKER}-key 179 | 180 | # home directory of this validator on provider 181 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 182 | 183 | # home directory of this validator on consumer 184 | CONS_NODE_DIR=${PROV_NODES_ROOT_DIR}/consumer-${MONIKER} 185 | 186 | # copy genesis in, unless this validator is already the lead validator and thus it already has its genesis 187 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 188 | cp ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json ${PROV_NODE_DIR}/config/genesis.json 189 | fi 190 | 191 | # enable vote extensions by setting .consesnsus.params.abci.vote_extensions_enable_height to 1, but 1 does not work currently - set it to 2 instead. see https://github.com/cosmos/cosmos-sdk/issues/18029#issuecomment-1754598598 192 | jq ".consensus.params.abci.vote_extensions_enable_height = \"2\"" ${PROV_NODE_DIR}/config/genesis.json > ${PROV_NODE_DIR}/edited_genesis.json && mv ${PROV_NODE_DIR}/edited_genesis.json ${PROV_NODE_DIR}/config/genesis.json 193 | 194 | RPC_LADDR_PORT=$(($RPC_LADDR_BASEPORT + $index)) 195 | P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + $index)) 196 | GRPC_LADDR_PORT=$(($GRPC_LADDR_BASEPORT + $index)) 197 | NODE_ADDRESS_PORT=$(($NODE_ADDRESS_BASEPORT + $index)) 198 | 199 | PROVIDER_NODE_LISTEN_ADDR_STR="${NODE_IP}:${NODE_ADDRESS_PORT},$PROVIDER_NODE_LISTEN_ADDR_STR" 200 | PROV_NODES_HOME_STR="${PROV_NODE_DIR},$PROV_NODES_HOME_STR" 201 | 202 | rm -rf ${PROV_NODES_ROOT_DIR}_bkup 203 | cp -r ${PROV_NODES_ROOT_DIR} ${PROV_NODES_ROOT_DIR}_bkup 204 | 205 | # Start gaia 206 | echo $BINARY_NAME start \ 207 | --home ${PROV_NODE_DIR} \ 208 | --transport=grpc --with-tendermint=false \ 209 | --p2p.persistent_peers ${PERSISTENT_PEERS} \ 210 | --rpc.laddr tcp://${NODE_IP}:${RPC_LADDR_PORT} \ 211 | --grpc.address ${NODE_IP}:${GRPC_LADDR_PORT} \ 212 | --address tcp://${NODE_IP}:${NODE_ADDRESS_PORT} \ 213 | --p2p.laddr tcp://${NODE_IP}:${P2P_LADDR_PORT} \ 214 | --grpc-web.enable=false "&> ${PROV_NODE_DIR}/logs &" | tee -a start_apps.sh 215 | 216 | sleep 5 217 | done 218 | 219 | PROVIDER_NODE_LISTEN_ADDR_STR=${PROVIDER_NODE_LISTEN_ADDR_STR::${#PROVIDER_NODE_LISTEN_ADDR_STR}-1} 220 | PROV_NODES_HOME_STR=${PROV_NODES_HOME_STR::${#PROV_NODES_HOME_STR}-1} 221 | 222 | echo "Testnet applications are set up! Starting CometMock..." 223 | echo cometmock \$1 $PROVIDER_NODE_LISTEN_ADDR_STR ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json $PROVIDER_COMETMOCK_ADDR $PROV_NODES_HOME_STR grpc "&> ${LEAD_VALIDATOR_PROV_DIR}/cometmock_log &" | tee -a start_cometmock.sh 224 | 225 | chmod +x start_apps.sh 226 | chmod +x start_cometmock.sh 227 | 228 | # cometmock $PROVIDER_NODE_LISTEN_ADDR_STR ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json $PROVIDER_COMETMOCK_ADDR $PROV_NODES_HOME_STR grpc $COMETMOCK_ARGS &> ${LEAD_VALIDATOR_PROV_DIR}/cometmock_log & -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CometMock 2 | 3 | CometMock is a mock implementation of CometBFT. 4 | It is meant to be used as a drop-in replacement for CometBFT in end-to-end tests. 5 | Some of the reasons to use CometMock instead of CometBFT are: 6 | * More reliable and faster block times: CometBFT runs a consensus algorithm, which involves many network communications, and non-determinism in the network layer can lead to varying block times that can make tests flaky. 7 | CometMock instead directly communicates with applications via ABCI, mimicking the behaviour of many CometBFT instances coming to consensus, but with much fewer network communications. 8 | * More control: When transactions are broadcasted, CometMock immediately includes them in the next block. CometMock also allows 9 | * causing downtime without the need to bother with the network or killing processes by 10 | controlling which validators sign blocks, 11 | * fast-forwarding time, letting arbitrary time pass to the view of the application, without needing to actually wait, 12 | * fast-forwarding blocks, creating empty blocks rapidly to wait for events on the chain to happen. 13 | 14 | On a technical level, CometMock communicates with applications via ABCI through GRPC or TSP (Tendermint Socket Protocol) calls. It calls BeginBlock, DeliverTx, EndBlock and Commit like CometBFT does during normal execution. 15 | 16 | Currently, CometMock maintains releases compatible with CometBFT v0.38, v0.37 and v0.34, see branches [v0.34.x](https://github.com/informalsystems/CometMock/tree/v0.34.x), [v0.37.x](https://github.com/informalsystems/CometMock/tree/v0.37.x) and [v0.38.x](https://github.com/informalsystems/CometMock/tree/v0.38.x). It offers *many* of the RPC endpoints offered by Comet (see https://docs.cometbft.com/v0.34/rpc/, https://docs.cometbft.com/v0.37/rpc/ and https://docs.cometbft.com/v0.38/rpc/ for the respective version of the interface), 17 | in particular it supports the subset used by Gorelayer (https://github.com/cosmos/relayer/). 18 | See the endpoints offered here: [https://github.com/informalsystems/CometMock/cometmock/rpc_server/routes.go#L30C2-L53](https://github.com/informalsystems/CometMock/blob/main/cometmock/rpc_server/routes.go) 19 | 20 | ## Installation 21 | 22 | Run `go install ./cometmock`, then you can run `cometmock` to see usage information. 23 | CometMock was tested with `go version go1.20.3 darwin/arm64`. 24 | 25 | ## How to use 26 | 27 | To run CometMock, start your (cosmos-sdk) application instances with the flags ```--with-tendermint=false, --transport=grpc```. 28 | After the applications started, start CometMock like this 29 | ``` 30 | cometmock [--block-time=value] [--auto-tx=] [--block-production-interval=] [--starting-timestamp=] [--starting-timestamp-from-genesis=] {app_address1,app_address2,...} {genesis_file} {cometmock_listen_address} {home_folder1,home_folder2,...} {connection_mode} 31 | ``` 32 | 33 | where: 34 | * The `--block-time` flag is optional and specifies the time in milliseconds between the timestamps of consecutive blocks. 35 | Values <= 0 mean that the timestamps are taken from the system time. The default value is -1. 36 | * The `--auto-tx` flag is optional. If it is set to true, when a transaction is broadcasted, it will be automatically included in the next block. The default value is false. 37 | * The `--block-production-interval` flag is optional and specifies the time (in milliseconds) to sleep between the production of consecutive blocks. 38 | This does not mean that blocks are produced this fast, just that CometMock will sleep by this amount between producing two blocks. 39 | The default value is 1000ms=1s. 40 | * The `--starting-timestamp` flag is optional and specifies the starting timestamp of the blockchain. If not specified, the starting timestamp is taken from the system time. 41 | * The `--starting-timestamp-from-genesis` flag is optional and can be used to override the starting timestamp of the blockchain with the timestamp of the genesis file. 42 | In that case, the first block will have a timestamp of Genesis timestamp + block time or, if block time is <= 0, Genesis timestamp + some small, unspecified amount depending on system time. 43 | * The `app_addresses` are the `--address` flags of the applications. This is by default `"tcp://0.0.0.0:26658"` 44 | * The `genesis_file` is the genesis json that is also used by apps. 45 | * The `cometmock_listen_address` can be freely chosen and will be the address that requests that would normally go to CometBFT rpc endpoints need to be directed to. 46 | * The `home_folders` are the home folders of the applications, in the same order as the `app_addresses`. This is required to use the private keys in the application folders to sign as appropriate validators. 47 | * Connection mode is the protocol over which CometMock should connect to the ABCI application, either `grpc` or `socket`. See the `--transport` flag for Cosmos SDK applications. For SDK applications, just make sure `--transport` and this argument match, i.e. either both `socket` or both `grpc`. 48 | 49 | When calling the cosmos sdk cli, use as node address the `cometmock_listen_address`, 50 | e.g. `simd q bank total --node {cometmock_listen_address}`. 51 | 52 | ### CometMock specific RPC endpoints 53 | 54 | Here is a quick explanation and example usage of each of the endpoints that are custom to CometMock 55 | 56 | * `advance_blocks(num_blocks)`: Runs `num_blocks` empty blocks in succession. This is way faster than waiting for blocks, e.g. roughly advancing hundreds of blocks takes a few seconds. 57 | Be aware that this still scales linearly in the number of blocks advanced, so e.g. advancing a million blocks will still take a while. 58 | Example usage: 59 | ``` 60 | curl -H 'Content-Type: application/json' -H 'Accept:application/json' --data '{"jsonrpc":"2.0","method":"advance_blocks","params":{"num_blocks": "20"},"id":1}' 127.0.0.1:22331 61 | ``` 62 | * `set_signing_status(private_key_address,status)`: Status can be either `up` (to make the validator sign blocks) or `down` (to make the validator stop signing blocks). 63 | The `private_key_address` is the `address` field of the validators private key. You can find this under `your_node_home/config/priv_validator_key.json`. 64 | That file looks like this: ```{ 65 | "address": "201A6CD9B0CCB5A467F1E13589C92D9C6A76D3E0", 66 | "pub_key": { 67 | "type": "tendermint/PubKeyEd25519", 68 | "value": "946RMFmXUavi+lEypuCu9Ul2ecs+RMKBVhRR9D3FvCo=" 69 | }, 70 | "priv_key": { 71 | "type": "tendermint/PrivKeyEd25519", 72 | "value": "OUHGIoJ1uxVKwDLSwOF+GDbLx9ePgiaGwcy0e5roC2L3jpEwWZdRq+L6UTKm4K71SXZ5yz5EwoFWFFH0PcW8Kg==" 73 | } 74 | }``` 75 | Here, the `address` field is what should be given to the command. 76 | Example usage: 77 | ``` 78 | # Stop the validator from signing 79 | curl -H 'Content-Type: application/json' -H 'Accept:application/json' --data '{"jsonrpc":"2.0","method":"set_signing_status","params":{"private_key_address": "'"$PRIV_VALIDATOR_ADDRESS"'", "status": "down"},"id":1}' 127.0.0.1:22331 80 | 81 | # Advance enough blocks to get the valdator downtime-slashed 82 | curl -H 'Content-Type: application/json' -H 'Accept:application/json' --data '{"jsonrpc":"2.0","method":"advance_blocks","params":{"num_blocks": "20"},"id":1}' 127.0.0.1:22331 83 | 84 | # Make the validator sign again 85 | curl -H 'Content-Type: application/json' -H 'Accept:application/json' --data '{"jsonrpc":"2.0","method":"set_signing_status","params":{"private_key_address": "'"$PRIV_VALIDATOR_ADDRESS"'", "status": "up"},"id":1}' 127.0.0.1:22331 86 | ``` 87 | 88 | * `advance_time(duration_in_seconds)`: Advances the local time of the blockchain by `duration_in_seconds` seconds. Under the hood, this is done by giving the application timestamps offset by the sum of time advancements that happened so far. 89 | When you test with multiple chains, be aware that you should advance chains at the same time, otherwise e.g. IBC will break due to large differences in the times of the different chains. 90 | This is constant time no matter the duration you advance by. 91 | Example usage: 92 | ``` 93 | curl -H 'Content-Type: application/json' -H 'Accept:application/json' --data '{"jsonrpc":"2.0","method":"advance_time","params":{"duration_in_seconds": "36000000"},"id":1}' 127.0.0.1:22331 94 | ``` 95 | 96 | * `cause_double_sign(private_key_address)`: Causes the validator with the given private key to double sign. This is done by signing two blocks with the same height. This will produce DuplicateVoteEvidence and propagate it to the app via ABCI. 97 | 98 | * `cause_light_client_attack(private_key_address, misbehaviour_type)`: Will produce LightClientAttackEvidence for the validator with the given private key. This will produce evidence in one of three different ways. Misbehaviour type can be: 99 | * Equivocation: The evidence has a conflicting block that has the same height, but a non-deterministic field is different, e.g. time. 100 | * Lunatic: The evidence has a conflicting block that differs in the app hash. 101 | * Amnesia: The evidence has a conflicting block that is the same as the original block. 102 | 103 | ## Limitations 104 | 105 | ### Not all CometBFT RPC endpoints are implemented 106 | Out of a desire to avoid unnecessary bloat, not all CometBFT RPC endpoints from https://docs.cometbft.com/v0.34/rpc/ are implemented. 107 | If you want to use CometMock but an RPC endpoint you rely on isn't present, please create an issue. 108 | 109 | ### Cosmos SDK GRPC endpoints are not working 110 | Cosmos SDK applications started with `--with-tendermint=false` 111 | do not start their grpc server, see https://github.com/cosmos/cosmos-sdk/issues/16277. 112 | This is a limitation of the Cosmos SDK related to using out-of-process consensus. 113 | 114 | ### --gas auto is not working 115 | Related, using `--gas auto` calls a cosmos sdk grpc endpoint, so it won't be possible with CometMock. 116 | It is recommended to manually specify a large enough gas amount. 117 | 118 | ### Hermes does not work with CometMock 119 | In particular, the fact that the cosmos sdk grpc endpoints are incompatible with having 120 | out-of-process consensus prevents CometMock from working with Hermes, since Hermes calls the SDK grpc endpoints. 121 | If you need a relayer with CometMock, the go relayer https://github.com/cosmos/relayer 122 | is an alternative. The only caveat is that it typically calls the gas simulation, which doesn't work with CometMock. 123 | Here is a fork of the gorelayer that removes the gas simulation in favor of a fixed value https://github.com/p-offtermatt/relayer/tree/v2.3.0-no-gas-sim. 124 | see this commit for the changes https://github.com/p-offtermatt/relayer/commit/39bc4b82acf1f95b9a8d40a281c3f90178d72d00 125 | 126 | 127 | ## Disclaimer 128 | 129 | CometMock is under heavy development and work-in-progress. 130 | Use at your own risk. In the current state, testing with CometMock cannot fully replace proper end-to-end tests 131 | with CometBFT. 132 | 133 | ## License Information 134 | 135 | Copyright © 2023 Informal Systems Inc. and CometMock authors. 136 | 137 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use the files in this repository except in compliance with the License. You may obtain a copy of the License at 138 | 139 | ```https://www.apache.org/licenses/LICENSE-2.0``` 140 | 141 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 142 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2023 Informal Systems, Inc and CometMock authors 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /e2e-tests/main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "math/big" 7 | "os/exec" 8 | "testing" 9 | "time" 10 | 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | func StartChain( 15 | t *testing.T, 16 | cometmockArgs string, 17 | ) error { 18 | // execute the local-testnet-singlechain.sh script 19 | t.Log("Running local-testnet-singlechain.sh") 20 | cmd := exec.Command("./local-testnet-singlechain-restart.sh", "simd") 21 | _, err := runCommandWithOutput(cmd) 22 | if err != nil { 23 | return fmt.Errorf("Error running local-testnet-singlechain.sh: %v", err) 24 | } 25 | 26 | cmd = exec.Command("./local-testnet-singlechain-start.sh", cometmockArgs) 27 | _, err = runCommandWithOutput(cmd) 28 | if err != nil { 29 | return fmt.Errorf("Error running local-testnet-singlechain.sh: %v", err) 30 | } 31 | 32 | t.Log("Done starting testnet") 33 | 34 | // wait until we are producing blocks 35 | for { 36 | // --type height 0 gets the latest height 37 | out, err := exec.Command("bash", "-c", "simd q block --type height 0 --output json --node tcp://127.0.0.1:22331 | jq -r '.header.height'").Output() 38 | 39 | if err == nil { 40 | t.Log("We are producing blocks: ", string(out)) 41 | break 42 | } 43 | t.Log("Waiting for blocks to be produced, latest output: ", string(out)) 44 | time.Sleep(1 * time.Second) 45 | } 46 | time.Sleep(5 * time.Second) 47 | return nil 48 | } 49 | 50 | // Tests happy path functionality for Abci Info. 51 | func TestAbciInfo(t *testing.T) { 52 | // start the chain 53 | err := StartChain(t, "") 54 | if err != nil { 55 | t.Fatalf("Error starting chain: %v", err) 56 | } 57 | 58 | // call the abci_info command by calling curl on the REST endpoint 59 | // curl -H 'Content-Type: application/json' -H 'Accept:application/json' --data '{"jsonrpc":"2.0","method":"abci_info","id":1}' 127.0.0.1:22331 60 | args := []string{"bash", "-c", "curl -H 'Content-Type: application/json' -H 'Accept:application/json' --data '{\"jsonrpc\":\"2.0\",\"method\":\"abci_info\",\"id\":1}' 127.0.0.1:22331"} 61 | cmd := exec.Command(args[0], args[1:]...) 62 | out, err := runCommandWithOutput(cmd) 63 | if err != nil { 64 | t.Fatalf("Error running curl\ncommand: %v\noutput: %v\nerror: %v", cmd, string(out), err) 65 | } 66 | 67 | // extract the latest block height from the output 68 | height, err := extractHeightFromInfo([]byte(out)) 69 | if err != nil { 70 | t.Fatalf("Error extracting block height from abci_info output: %v", err) 71 | } 72 | 73 | // wait a bit to make sure the block height has increased 74 | time.Sleep(2 * time.Second) 75 | 76 | // call the abci_info command again 77 | cmd2 := exec.Command(args[0], args[1:]...) 78 | out2, err := runCommandWithOutput(cmd2) 79 | if err != nil { 80 | t.Fatalf("Error running curl\ncommand: %v\noutput: %v\nerror: %v", cmd2, string(out2), err) 81 | } 82 | 83 | // extract the latest block height from the output 84 | height2, err := extractHeightFromInfo([]byte(out2)) 85 | if err != nil { 86 | t.Fatalf("Error extracting block height from abci_info output: %v", err) 87 | } 88 | 89 | // check that the block height has increased 90 | if height2 <= height { 91 | t.Fatalf("Expected block height to increase, but it did not. First height was %v, second height was %v", height, height2) 92 | } 93 | } 94 | 95 | func TestAbciQuery(t *testing.T) { 96 | // start the chain 97 | err := StartChain(t, "") 98 | if err != nil { 99 | t.Fatalf("Error starting chain: %v", err) 100 | } 101 | 102 | // call the abci_query command by submitting a query that hits the AbciQuery endpoint 103 | // for simplicity, we query for the staking params here - any query would work, 104 | // but ones without arguments are easier to construct 105 | args := []string{"bash", "-c", "simd q staking params --node tcp://127.0.0.1:22331 --output json"} 106 | cmd := exec.Command(args[0], args[1:]...) 107 | out, err := runCommandWithOutput(cmd) 108 | if err != nil { 109 | t.Fatalf("Error running command: %v\noutput: %v\nerror: %v", cmd, string(out), err) 110 | } 111 | 112 | // check that the output is valid JSON 113 | var data map[string]interface{} 114 | if err := json.Unmarshal([]byte(out), &data); err != nil { 115 | t.Fatalf("Failed to unmarshal JSON %s \n error was %v", string(out), err) 116 | } 117 | 118 | // check that the output contains the expected params field. its contents are not important 119 | _, ok := data["params"] 120 | if !ok { 121 | t.Fatalf("Expected output to contain params field, but it did not. Output was %s", string(out)) 122 | } 123 | } 124 | 125 | func TestTx(t *testing.T) { 126 | err := StartChain(t, "") 127 | if err != nil { 128 | t.Fatalf("Error starting chain: %v", err) 129 | } 130 | 131 | // check the current amount in the community pool 132 | communityPoolSize, err := getCommunityPoolSize() 133 | require.NoError(t, err) 134 | 135 | // send some tokens to the community pool 136 | err = sendToCommunityPool(50000000000, "coordinator") 137 | require.NoError(t, err) 138 | 139 | // check that the amount in the community pool has increased 140 | communityPoolSize2, err := getCommunityPoolSize() 141 | require.NoError(t, err) 142 | 143 | // cannot check for equality because the community pool gets dust over time 144 | require.True(t, communityPoolSize2.Cmp(communityPoolSize.Add(communityPoolSize, big.NewInt(50000000000))) == +1) 145 | } 146 | 147 | // TestBlockTime checks that the basic behaviour with a specified block-time is as expected, 148 | // i.e. the time increases by the specified block time for each block. 149 | func TestBlockTime(t *testing.T) { 150 | err := StartChain(t, "--block-time=5000") 151 | if err != nil { 152 | t.Fatalf("Error starting chain: %v", err) 153 | } 154 | 155 | // get a block with height+time 156 | blockString, err := QueryBlock() 157 | require.NoError(t, err) 158 | 159 | // get the height and time from the block 160 | height, err := GetHeightFromBlock(blockString) 161 | require.NoError(t, err) 162 | 163 | blockTime, err := GetTimeFromBlock(blockString) 164 | require.NoError(t, err) 165 | 166 | // wait for a couple of blocks to be produced 167 | time.Sleep(10 * time.Second) 168 | 169 | // get the new height and time 170 | blockString2, err := QueryBlock() 171 | require.NoError(t, err) 172 | 173 | height2, err := GetHeightFromBlock(blockString2) 174 | require.NoError(t, err) 175 | 176 | blockTime2, err := GetTimeFromBlock(blockString2) 177 | require.NoError(t, err) 178 | 179 | blockDifference := height2 - height 180 | // we expect that at least one block was produced, otherwise there is a problem 181 | require.True(t, blockDifference >= 1) 182 | 183 | // get the expected time diff between blocks, as block time was set to 5000 millis = 5 seconds 184 | expectedTimeDifference := time.Duration(blockDifference) * 5 * time.Second 185 | 186 | timeDifference := blockTime2.Sub(blockTime) 187 | 188 | require.Equal(t, expectedTimeDifference, timeDifference) 189 | } 190 | 191 | // TestAutoBlockProductionOff checks that the basic behaviour with 192 | // block-production-interval is as expected, i.e. blocks only 193 | // appear when it is manually instructed. 194 | func TestNoAutoBlockProduction(t *testing.T) { 195 | err := StartChain(t, "--block-production-interval=-1 --block-time=0") 196 | if err != nil { 197 | t.Fatalf("Error starting chain: %v", err) 198 | } 199 | 200 | height, blockTime, err := GetHeightAndTime() 201 | require.NoError(t, err) 202 | 203 | // wait a few seconds to detect it blocks are produced automatically 204 | time.Sleep(10 * time.Second) 205 | 206 | // get the new height and time 207 | height2, blockTime2, err := GetHeightAndTime() 208 | require.NoError(t, err) 209 | 210 | // no blocks should have been produced 211 | require.Equal(t, height, height2) 212 | require.Equal(t, blockTime, blockTime2) 213 | 214 | // advance time by 5 seconds 215 | err = AdvanceTime(5 * time.Second) 216 | require.NoError(t, err) 217 | 218 | // get the height and time again, they should not have changed yet 219 | height3, blockTime3, err := GetHeightAndTime() 220 | require.NoError(t, err) 221 | 222 | require.Equal(t, height, height3) 223 | require.Equal(t, blockTime, blockTime3) 224 | 225 | // produce a block 226 | err = AdvanceBlocks(1) 227 | require.NoError(t, err) 228 | 229 | // get the height and time again, they should have changed 230 | height4, blockTime4, err := GetHeightAndTime() 231 | require.NoError(t, err) 232 | 233 | require.Equal(t, height+1, height4) 234 | require.Equal(t, blockTime.Add(5*time.Second), blockTime4) 235 | } 236 | 237 | // TestNoAutoTx checks that without auto-tx, transactions are not included 238 | // in blocks automatically. 239 | func TestNoAutoTx(t *testing.T) { 240 | err := StartChain(t, "--block-production-interval=-1 --auto-tx=false") 241 | if err != nil { 242 | t.Fatalf("Error starting chain: %v", err) 243 | } 244 | 245 | // produce a couple of blocks to initialize the community pool 246 | err = AdvanceBlocks(10) 247 | require.NoError(t, err) 248 | 249 | height, blockTime, err := GetHeightAndTime() 250 | require.NoError(t, err) 251 | 252 | communityPoolBefore, err := getCommunityPoolSize() 253 | require.NoError(t, err) 254 | 255 | // broadcast txs 256 | err = sendToCommunityPool(50000000000, "coordinator") 257 | require.NoError(t, err) 258 | err = sendToCommunityPool(50000000000, "bob") 259 | require.NoError(t, err) 260 | 261 | // get the new height and time 262 | height2, blockTime2, err := GetHeightAndTime() 263 | require.NoError(t, err) 264 | 265 | // no blocks should have been produced 266 | require.Equal(t, height, height2) 267 | require.Equal(t, blockTime, blockTime2) 268 | 269 | // produce a block 270 | err = AdvanceBlocks(1) 271 | require.NoError(t, err) 272 | 273 | // get the height and time again, they should have changed 274 | height3, blockTime3, err := GetHeightAndTime() 275 | require.NoError(t, err) 276 | 277 | require.Equal(t, height+1, height3) 278 | // exact time does not matter, just that it is after the previous block 279 | require.True(t, blockTime.Before(blockTime3)) 280 | 281 | // check that the community pool was increased 282 | communityPoolAfter, err := getCommunityPoolSize() 283 | require.NoError(t, err) 284 | 285 | // cannot check for equality because the community pool gets dust over time 286 | require.True(t, communityPoolAfter.Cmp(communityPoolBefore.Add(communityPoolBefore, big.NewInt(100000000000))) == +1) 287 | } 288 | 289 | func TestStartingTimestamp(t *testing.T) { 290 | err := StartChain(t, "--block-production-interval=-1 --auto-tx=false --starting-timestamp=0 --block-time=1") 291 | if err != nil { 292 | t.Fatalf("Error starting chain: %v", err) 293 | } 294 | 295 | // produce a couple of blocks 296 | err = AdvanceBlocks(10) 297 | require.NoError(t, err) 298 | 299 | // get the time 300 | _, blockTime, err := GetHeightAndTime() 301 | require.NoError(t, err) 302 | 303 | // the time should be starting-timestamp + 10 * blockTime + 1 (for the first block needed after Genesis) 304 | startingTimestamp := time.Unix(0, 0) 305 | expectedTime := startingTimestamp.Add(11 * time.Millisecond) 306 | 307 | require.True(t, expectedTime.Compare(blockTime) == 0, "expectedTime: %v, blockTime: %v", expectedTime, blockTime) 308 | } 309 | 310 | func TestSystemStartingTime(t *testing.T) { 311 | err := StartChain(t, "--block-production-interval=-1 --auto-tx=false --starting-timestamp=-1 --block-time=1") 312 | if err != nil { 313 | t.Fatalf("Error starting chain: %v", err) 314 | } 315 | startingTime := time.Now() 316 | 317 | // produce a couple of blocks 318 | err = AdvanceBlocks(10) 319 | require.NoError(t, err) 320 | 321 | // get the time 322 | _, blockTime, err := GetHeightAndTime() 323 | require.NoError(t, err) 324 | 325 | // the time should be starting-timestamp + 10 * blockTime + 1 (for the first block needed after Genesis) 326 | expectedTime := startingTime.Add(11 * time.Millisecond) 327 | 328 | // since the starting timestamp is taken from the system time, 329 | // we can only check that the time is close to the expected time 330 | // since the chain startup is hard to time exactly 331 | delta := 30 * time.Second 332 | 333 | diff := expectedTime.Sub(blockTime).Abs() 334 | 335 | require.True(t, diff <= delta, "expectedTime: %v, blockTime: %v", expectedTime, blockTime) 336 | } 337 | -------------------------------------------------------------------------------- /e2e-tests/local-testnet-debug.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | # User balance of stake tokens 5 | USER_COINS="100000000000stake" 6 | # Amount of stake tokens staked 7 | STAKE="100000000stake" 8 | # Node IP address 9 | NODE_IP="127.0.0.1" 10 | 11 | # Home directory 12 | HOME_DIR=$HOME 13 | 14 | # Validator moniker 15 | MONIKERS=("coordinator" "alice" "bob") 16 | LEAD_VALIDATOR_MONIKER="coordinator" 17 | 18 | PROV_NODES_ROOT_DIR=${HOME_DIR}/nodes/provider 19 | CONS_NODES_ROOT_DIR=${HOME_DIR}/nodes/consumer 20 | 21 | # Base port. Ports assigned after these ports sequentially by nodes. 22 | RPC_LADDR_BASEPORT=29170 23 | P2P_LADDR_BASEPORT=29180 24 | GRPC_LADDR_BASEPORT=29190 25 | NODE_ADDRESS_BASEPORT=29200 26 | PPROF_LADDR_BASEPORT=29210 27 | CLIENT_BASEPORT=29220 28 | 29 | # keeps a comma separated list of node addresses for provider and consumer 30 | PROVIDER_NODE_LISTEN_ADDR_STR="" 31 | CONSUMER_NODE_LISTEN_ADDR_STR="" 32 | 33 | # Strings that keep the homes of provider nodes and homes of consumer nodes 34 | PROV_NODES_HOME_STR="" 35 | CONS_NODES_HOME_STR="" 36 | 37 | PROVIDER_COMETMOCK_ADDR=tcp://$NODE_IP:22331 38 | CONSUMER_COMETMOCK_ADDR=tcp://$NODE_IP:22332 39 | 40 | # Clean start 41 | pkill -f ^interchain-security-pd &> /dev/null || true 42 | pkill -f ^cometmock &> /dev/null || true 43 | sleep 1 44 | rm -rf ${PROV_NODES_ROOT_DIR} 45 | rm -rf ${CONS_NODES_ROOT_DIR} 46 | 47 | # Let lead validator create genesis file 48 | LEAD_VALIDATOR_PROV_DIR=${PROV_NODES_ROOT_DIR}/provider-${LEAD_VALIDATOR_MONIKER} 49 | LEAD_VALIDATOR_CONS_DIR=${CONS_NODES_ROOT_DIR}/consumer-${LEAD_VALIDATOR_MONIKER} 50 | LEAD_PROV_KEY=${LEAD_VALIDATOR_MONIKER}-key 51 | LEAD_PROV_LISTEN_ADDR=tcp://${NODE_IP}:${RPC_LADDR_BASEPORT} 52 | 53 | for index in "${!MONIKERS[@]}" 54 | do 55 | MONIKER=${MONIKERS[$index]} 56 | # validator key 57 | PROV_KEY=${MONIKER}-key 58 | 59 | # home directory of this validator on provider 60 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 61 | 62 | # home directory of this validator on consumer 63 | CONS_NODE_DIR=${CONS_NODES_ROOT_DIR}/consumer-${MONIKER} 64 | 65 | # Build genesis file and node directory structure 66 | interchain-security-pd init $MONIKER --chain-id provider --home ${PROV_NODE_DIR} 67 | jq ".app_state.gov.params.voting_period = \"10s\" | .app_state.staking.params.unbonding_time = \"86400s\"" \ 68 | ${PROV_NODE_DIR}/config/genesis.json > \ 69 | ${PROV_NODE_DIR}/edited_genesis.json && mv ${PROV_NODE_DIR}/edited_genesis.json ${PROV_NODE_DIR}/config/genesis.json 70 | 71 | 72 | sleep 1 73 | 74 | # Create account keypair 75 | interchain-security-pd keys add $PROV_KEY --home ${PROV_NODE_DIR} --keyring-backend test --output json > ${PROV_NODE_DIR}/${PROV_KEY}.json 2>&1 76 | sleep 1 77 | 78 | # copy genesis in, unless this validator is the lead validator 79 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 80 | cp ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json ${PROV_NODE_DIR}/config/genesis.json 81 | fi 82 | 83 | # Add stake to user 84 | PROV_ACCOUNT_ADDR=$(jq -r '.address' ${PROV_NODE_DIR}/${PROV_KEY}.json) 85 | interchain-security-pd genesis add-genesis-account $PROV_ACCOUNT_ADDR $USER_COINS --home ${PROV_NODE_DIR} --keyring-backend test 86 | sleep 1 87 | 88 | # copy genesis out, unless this validator is the lead validator 89 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 90 | cp ${PROV_NODE_DIR}/config/genesis.json ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json 91 | fi 92 | 93 | PPROF_LADDR=${NODE_IP}:$(($PPROF_LADDR_BASEPORT + $index)) 94 | P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + $index)) 95 | 96 | # adjust configs of this node 97 | sed -i -r 's/timeout_commit = "5s"/timeout_commit = "3s"/g' ${PROV_NODE_DIR}/config/config.toml 98 | sed -i -r 's/timeout_propose = "3s"/timeout_propose = "1s"/g' ${PROV_NODE_DIR}/config/config.toml 99 | 100 | # make address book non-strict. necessary for this setup 101 | sed -i -r 's/addr_book_strict = true/addr_book_strict = false/g' ${PROV_NODE_DIR}/config/config.toml 102 | 103 | # avoid port double binding 104 | sed -i -r "s/pprof_laddr = \"localhost:6060\"/pprof_laddr = \"${PPROF_LADDR}\"/g" ${PROV_NODE_DIR}/config/config.toml 105 | 106 | # allow duplicate IP addresses (all nodes are on the same machine) 107 | sed -i -r 's/allow_duplicate_ip = false/allow_duplicate_ip = true/g' ${PROV_NODE_DIR}/config/config.toml 108 | done 109 | 110 | for MONIKER in "${MONIKERS[@]}" 111 | do 112 | # validator key 113 | PROV_KEY=${MONIKER}-key 114 | 115 | # home directory of this validator on provider 116 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 117 | 118 | # copy genesis in, unless this validator is the lead validator 119 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 120 | cp ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json* ${PROV_NODE_DIR}/config/genesis.json 121 | fi 122 | 123 | # Stake 1/1000 user's coins 124 | interchain-security-pd genesis gentx $PROV_KEY $STAKE --chain-id provider --home ${PROV_NODE_DIR} --keyring-backend test --moniker $MONIKER 125 | sleep 1 126 | 127 | # Copy gentxs to the lead validator for possible future collection. 128 | # Obviously we don't need to copy the first validator's gentx to itself 129 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 130 | cp ${PROV_NODE_DIR}/config/gentx/* ${LEAD_VALIDATOR_PROV_DIR}/config/gentx/ 131 | fi 132 | done 133 | 134 | # Collect genesis transactions with lead validator 135 | interchain-security-pd genesis collect-gentxs --home ${LEAD_VALIDATOR_PROV_DIR} --gentx-dir ${LEAD_VALIDATOR_PROV_DIR}/config/gentx/ 136 | 137 | sleep 1 138 | 139 | 140 | for index in "${!MONIKERS[@]}" 141 | do 142 | MONIKER=${MONIKERS[$index]} 143 | 144 | PERSISTENT_PEERS="" 145 | 146 | for peer_index in "${!MONIKERS[@]}" 147 | do 148 | if [ $index == $peer_index ]; then 149 | continue 150 | fi 151 | PEER_MONIKER=${MONIKERS[$peer_index]} 152 | 153 | PEER_PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${PEER_MONIKER} 154 | 155 | PEER_NODE_ID=$(interchain-security-pd tendermint show-node-id --home ${PEER_PROV_NODE_DIR}) 156 | 157 | PEER_P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + $peer_index)) 158 | PERSISTENT_PEERS="$PERSISTENT_PEERS,$PEER_NODE_ID@${NODE_IP}:${PEER_P2P_LADDR_PORT}" 159 | done 160 | 161 | # remove trailing comma from persistent peers 162 | PERSISTENT_PEERS=${PERSISTENT_PEERS:1} 163 | 164 | # validator key 165 | PROV_KEY=${MONIKER}-key 166 | 167 | # home directory of this validator on provider 168 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 169 | 170 | # home directory of this validator on consumer 171 | CONS_NODE_DIR=${PROV_NODES_ROOT_DIR}/consumer-${MONIKER} 172 | 173 | # copy genesis in, unless this validator is already the lead validator and thus it already has its genesis 174 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 175 | cp ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json ${PROV_NODE_DIR}/config/genesis.json 176 | fi 177 | 178 | RPC_LADDR_PORT=$(($RPC_LADDR_BASEPORT + $index)) 179 | P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + $index)) 180 | GRPC_LADDR_PORT=$(($GRPC_LADDR_BASEPORT + $index)) 181 | NODE_ADDRESS_PORT=$(($NODE_ADDRESS_BASEPORT + $index)) 182 | 183 | PROVIDER_NODE_LISTEN_ADDR_STR="${NODE_IP}:${NODE_ADDRESS_PORT},$PROVIDER_NODE_LISTEN_ADDR_STR" 184 | PROV_NODES_HOME_STR="${PROV_NODE_DIR},$PROV_NODES_HOME_STR" 185 | 186 | # Start gaia 187 | interchain-security-pd start \ 188 | --home ${PROV_NODE_DIR} \ 189 | --transport=grpc --with-tendermint=false \ 190 | --p2p.persistent_peers ${PERSISTENT_PEERS} \ 191 | --rpc.laddr tcp://${NODE_IP}:${RPC_LADDR_PORT} \ 192 | --grpc.address ${NODE_IP}:${GRPC_LADDR_PORT} \ 193 | --address tcp://${NODE_IP}:${NODE_ADDRESS_PORT} \ 194 | --p2p.laddr tcp://${NODE_IP}:${P2P_LADDR_PORT} \ 195 | --grpc-web.enable=false &> ${PROV_NODE_DIR}/logs & 196 | 197 | sleep 5 198 | done 199 | 200 | PROVIDER_NODE_LISTEN_ADDR_STR=${PROVIDER_NODE_LISTEN_ADDR_STR::${#PROVIDER_NODE_LISTEN_ADDR_STR}-1} 201 | PROV_NODES_HOME_STR=${PROV_NODES_HOME_STR::${#PROV_NODES_HOME_STR}-1} 202 | 203 | echo cometmock $PROVIDER_NODE_LISTEN_ADDR_STR ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json $PROVIDER_COMETMOCK_ADDR $PROV_NODES_HOME_STR grpc 204 | 205 | echo "Start cometmock, then press any key to continue" 206 | read -n 1 -s key 207 | 208 | sleep 5 209 | 210 | # Build consumer chain proposal file 211 | tee ${LEAD_VALIDATOR_PROV_DIR}/consumer-proposal.json< /dev/null || true 259 | sleep 1 260 | rm -rf ${CONS_NODES_ROOT_DIR} 261 | 262 | for index in "${!MONIKERS[@]}" 263 | do 264 | MONIKER=${MONIKERS[$index]} 265 | # validator key 266 | PROV_KEY=${MONIKER}-key 267 | 268 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 269 | 270 | # home directory of this validator on consumer 271 | CONS_NODE_DIR=${CONS_NODES_ROOT_DIR}/consumer-${MONIKER} 272 | 273 | # Build genesis file and node directory structure 274 | interchain-security-cd init $MONIKER --chain-id consumer --home ${CONS_NODE_DIR} 275 | 276 | sleep 1 277 | 278 | # Create account keypair 279 | interchain-security-cd keys add $PROV_KEY --home ${CONS_NODE_DIR} --keyring-backend test --output json > ${CONS_NODE_DIR}/${PROV_KEY}.json 2>&1 280 | sleep 1 281 | 282 | # copy genesis in, unless this validator is the lead validator 283 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 284 | cp ${LEAD_VALIDATOR_CONS_DIR}/config/genesis.json ${CONS_NODE_DIR}/config/genesis.json 285 | fi 286 | 287 | # Add stake to user 288 | CONS_ACCOUNT_ADDR=$(jq -r '.address' ${CONS_NODE_DIR}/${PROV_KEY}.json) 289 | interchain-security-cd genesis add-genesis-account $CONS_ACCOUNT_ADDR $USER_COINS --home ${CONS_NODE_DIR} 290 | sleep 10 291 | 292 | ### this probably doesnt have to be done for each node 293 | # Add consumer genesis states to genesis file 294 | RPC_LADDR_PORT=$(($RPC_LADDR_BASEPORT + $index)) 295 | RPC_LADDR=tcp://${NODE_IP}:${RPC_LADDR_PORT} 296 | interchain-security-pd query provider consumer-genesis consumer --home ${PROV_NODE_DIR} --node $PROVIDER_COMETMOCK_ADDR -o json > consumer_gen.json 297 | jq -s '.[0].app_state.ccvconsumer = .[1] | .[0]' ${CONS_NODE_DIR}/config/genesis.json consumer_gen.json > ${CONS_NODE_DIR}/edited_genesis.json \ 298 | && mv ${CONS_NODE_DIR}/edited_genesis.json ${CONS_NODE_DIR}/config/genesis.json 299 | rm consumer_gen.json 300 | ### 301 | 302 | # copy genesis out, unless this validator is the lead validator 303 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 304 | cp ${CONS_NODE_DIR}/config/genesis.json ${LEAD_VALIDATOR_CONS_DIR}/config/genesis.json 305 | fi 306 | 307 | PPROF_LADDR=${NODE_IP}:$(($PPROF_LADDR_BASEPORT + ${#MONIKERS[@]} + $index)) 308 | P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + ${#MONIKERS[@]} + $index)) 309 | 310 | # adjust configs of this node 311 | sed -i -r 's/timeout_commit = "5s"/timeout_commit = "3s"/g' ${CONS_NODE_DIR}/config/config.toml 312 | sed -i -r 's/timeout_propose = "3s"/timeout_propose = "1s"/g' ${CONS_NODE_DIR}/config/config.toml 313 | 314 | # make address book non-strict. necessary for this setup 315 | sed -i -r 's/addr_book_strict = true/addr_book_strict = false/g' ${CONS_NODE_DIR}/config/config.toml 316 | 317 | # avoid port double binding 318 | sed -i -r "s/pprof_laddr = \"localhost:6060\"/pprof_laddr = \"${PPROF_LADDR}\"/g" ${CONS_NODE_DIR}/config/config.toml 319 | 320 | # allow duplicate IP addresses (all nodes are on the same machine) 321 | sed -i -r 's/allow_duplicate_ip = false/allow_duplicate_ip = true/g' ${CONS_NODE_DIR}/config/config.toml 322 | 323 | # Create validator states 324 | echo '{"height": "0","round": 0,"step": 0}' > ${CONS_NODE_DIR}/data/priv_validator_state.json 325 | 326 | # Copy validator key files 327 | cp ${PROV_NODE_DIR}/config/priv_validator_key.json ${CONS_NODE_DIR}/config/priv_validator_key.json 328 | cp ${PROV_NODE_DIR}/config/node_key.json ${CONS_NODE_DIR}/config/node_key.json 329 | 330 | # Set default client port 331 | CLIENT_PORT=$(($CLIENT_BASEPORT + ${#MONIKERS[@]} + $index)) 332 | sed -i -r "/node =/ s/= .*/= \"tcp:\/\/${NODE_IP}:${CLIENT_PORT}\"/" ${CONS_NODE_DIR}/config/client.toml 333 | 334 | done 335 | 336 | sleep 1 337 | 338 | 339 | for index in "${!MONIKERS[@]}" 340 | do 341 | MONIKER=${MONIKERS[$index]} 342 | 343 | PERSISTENT_PEERS="" 344 | 345 | for peer_index in "${!MONIKERS[@]}" 346 | do 347 | if [ $index == $peer_index ]; then 348 | continue 349 | fi 350 | PEER_MONIKER=${MONIKERS[$peer_index]} 351 | 352 | PEER_CONS_NODE_DIR=${CONS_NODES_ROOT_DIR}/consumer-${PEER_MONIKER} 353 | 354 | PEER_NODE_ID=$(interchain-security-pd tendermint show-node-id --home ${PEER_CONS_NODE_DIR}) 355 | 356 | PEER_P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + ${#MONIKERS[@]} + $peer_index)) 357 | PERSISTENT_PEERS="$PERSISTENT_PEERS,$PEER_NODE_ID@${NODE_IP}:${PEER_P2P_LADDR_PORT}" 358 | done 359 | 360 | # remove trailing comma from persistent peers 361 | PERSISTENT_PEERS=${PERSISTENT_PEERS:1} 362 | 363 | # validator key 364 | PROV_KEY=${MONIKER}-key 365 | 366 | # home directory of this validator on provider 367 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 368 | 369 | # home directory of this validator on consumer 370 | CONS_NODE_DIR=${CONS_NODES_ROOT_DIR}/consumer-${MONIKER} 371 | 372 | # copy genesis in, unless this validator is already the lead validator and thus it already has its genesis 373 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 374 | cp ${LEAD_VALIDATOR_CONS_DIR}/config/genesis.json ${CONS_NODE_DIR}/config/genesis.json 375 | fi 376 | 377 | RPC_LADDR_PORT=$(($RPC_LADDR_BASEPORT + ${#MONIKERS[@]} + $index)) 378 | P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + ${#MONIKERS[@]} + $index)) 379 | GRPC_LADDR_PORT=$(($GRPC_LADDR_BASEPORT + ${#MONIKERS[@]} + $index)) 380 | NODE_ADDRESS_PORT=$(($NODE_ADDRESS_BASEPORT + ${#MONIKERS[@]} + $index)) 381 | 382 | CONSUMER_NODE_LISTEN_ADDR_STR="${NODE_IP}:${NODE_ADDRESS_PORT},$CONSUMER_NODE_LISTEN_ADDR_STR" 383 | # Add the home directory to the CONS_NODES_HOME_STR 384 | CONS_NODES_HOME_STR="${CONS_NODE_DIR},${CONS_NODES_HOME_STR}" 385 | 386 | # Start gaia 387 | interchain-security-cd start \ 388 | --home ${CONS_NODE_DIR} \ 389 | --transport=grpc --with-tendermint=false \ 390 | --p2p.persistent_peers ${PERSISTENT_PEERS} \ 391 | --rpc.laddr tcp://${NODE_IP}:${RPC_LADDR_PORT} \ 392 | --grpc.address ${NODE_IP}:${GRPC_LADDR_PORT} \ 393 | --address tcp://${NODE_IP}:${NODE_ADDRESS_PORT} \ 394 | --p2p.laddr tcp://${NODE_IP}:${P2P_LADDR_PORT} \ 395 | --grpc-web.enable=false &> ${CONS_NODE_DIR}/logs & 396 | 397 | sleep 6 398 | done 399 | 400 | # remove trailing comma from consumer node listen addr str 401 | CONSUMER_NODE_LISTEN_ADDR_STR=${CONSUMER_NODE_LISTEN_ADDR_STR::${#CONSUMER_NODE_LISTEN_ADDR_STR}-1} 402 | CONS_NODES_HOME_STR=${CONS_NODES_HOME_STR::${#CONS_NODES_HOME_STR}-1} 403 | 404 | echo cometmock $CONSUMER_NODE_LISTEN_ADDR_STR ${LEAD_VALIDATOR_CONS_DIR}/config/genesis.json $CONSUMER_COMETMOCK_ADDR $CONS_NODES_HOME_STR grpc 405 | echo "Start cometmock, then press any key to continue" 406 | read -n 1 -s key 407 | 408 | sleep 3 409 | 410 | rm -r ~/.relayer 411 | 412 | # initialize gorelayer 413 | rly config init 414 | 415 | # add chains 416 | rly chains add --file go_rly_provider.json provider 417 | rly chains add --file go_rly_consumer.json consumer 418 | 419 | # gorelayer 420 | rly keys delete consumer default -y || true 421 | rly keys delete provider default -y || true 422 | 423 | # take keys from provider and consumer and add them to gorelayer 424 | rly keys restore provider default "$(cat ${LEAD_VALIDATOR_PROV_DIR}/${LEAD_VALIDATOR_MONIKER}-key.json | jq -r '.mnemonic')" 425 | rly keys restore consumer default "$(cat ${LEAD_VALIDATOR_CONS_DIR}/${LEAD_VALIDATOR_MONIKER}-key.json | jq -r '.mnemonic')" 426 | 427 | rly paths add consumer provider testpath --file go_rly_ics_path_config.json 428 | rly tx clients testpath 429 | rly tx connection testpath 430 | rly tx channel testpath --src-port consumer --dst-port provider --version 1 --order ordered --debug 431 | rly start -------------------------------------------------------------------------------- /e2e-tests/local-testnet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | # User balance of stake tokens 5 | USER_COINS="100000000000stake" 6 | # Amount of stake tokens staked 7 | STAKE="100000000stake" 8 | # Node IP address 9 | NODE_IP="127.0.0.1" 10 | 11 | # Home directory 12 | HOME_DIR=$HOME 13 | 14 | # Validator moniker 15 | MONIKERS=("coordinator" "alice" "bob") 16 | LEAD_VALIDATOR_MONIKER="coordinator" 17 | 18 | PROV_NODES_ROOT_DIR=${HOME_DIR}/nodes/provider 19 | CONS_NODES_ROOT_DIR=${HOME_DIR}/nodes/consumer 20 | 21 | # Base port. Ports assigned after these ports sequentially by nodes. 22 | RPC_LADDR_BASEPORT=29170 23 | P2P_LADDR_BASEPORT=29180 24 | GRPC_LADDR_BASEPORT=29190 25 | NODE_ADDRESS_BASEPORT=29200 26 | PPROF_LADDR_BASEPORT=29210 27 | CLIENT_BASEPORT=29220 28 | 29 | # keeps a comma separated list of node addresses for provider and consumer 30 | PROVIDER_NODE_LISTEN_ADDR_STR="" 31 | CONSUMER_NODE_LISTEN_ADDR_STR="" 32 | 33 | # Strings that keep the homes of provider nodes and homes of consumer nodes 34 | PROV_NODES_HOME_STR="" 35 | CONS_NODES_HOME_STR="" 36 | 37 | PROVIDER_COMETMOCK_ADDR=tcp://$NODE_IP:22331 38 | CONSUMER_COMETMOCK_ADDR=tcp://$NODE_IP:22332 39 | 40 | # Clean start 41 | pkill -f ^interchain-security-pd &> /dev/null || true 42 | pkill -f ^cometmock &> /dev/null || true 43 | sleep 1 44 | rm -rf ${PROV_NODES_ROOT_DIR} 45 | rm -rf ${CONS_NODES_ROOT_DIR} 46 | 47 | # Let lead validator create genesis file 48 | LEAD_VALIDATOR_PROV_DIR=${PROV_NODES_ROOT_DIR}/provider-${LEAD_VALIDATOR_MONIKER} 49 | LEAD_VALIDATOR_CONS_DIR=${CONS_NODES_ROOT_DIR}/consumer-${LEAD_VALIDATOR_MONIKER} 50 | LEAD_PROV_KEY=${LEAD_VALIDATOR_MONIKER}-key 51 | LEAD_PROV_LISTEN_ADDR=tcp://${NODE_IP}:${RPC_LADDR_BASEPORT} 52 | 53 | for index in "${!MONIKERS[@]}" 54 | do 55 | MONIKER=${MONIKERS[$index]} 56 | # validator key 57 | PROV_KEY=${MONIKER}-key 58 | 59 | # home directory of this validator on provider 60 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 61 | 62 | # home directory of this validator on consumer 63 | CONS_NODE_DIR=${CONS_NODES_ROOT_DIR}/consumer-${MONIKER} 64 | 65 | # Build genesis file and node directory structure 66 | interchain-security-pd init $MONIKER --chain-id provider --home ${PROV_NODE_DIR} 67 | jq ".app_state.gov.params.voting_period = \"10s\" | .app_state.staking.params.unbonding_time = \"86400s\"" \ 68 | ${PROV_NODE_DIR}/config/genesis.json > \ 69 | ${PROV_NODE_DIR}/edited_genesis.json && mv ${PROV_NODE_DIR}/edited_genesis.json ${PROV_NODE_DIR}/config/genesis.json 70 | 71 | sleep 1 72 | 73 | # Create account keypair 74 | interchain-security-pd keys add $PROV_KEY --home ${PROV_NODE_DIR} --keyring-backend test --output json > ${PROV_NODE_DIR}/${PROV_KEY}.json 2>&1 75 | sleep 1 76 | 77 | # copy genesis in, unless this validator is the lead validator 78 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 79 | cp ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json ${PROV_NODE_DIR}/config/genesis.json 80 | fi 81 | 82 | # Add stake to user 83 | PROV_ACCOUNT_ADDR=$(jq -r '.address' ${PROV_NODE_DIR}/${PROV_KEY}.json) 84 | interchain-security-pd genesis add-genesis-account $PROV_ACCOUNT_ADDR $USER_COINS --home ${PROV_NODE_DIR} --keyring-backend test 85 | sleep 1 86 | 87 | # copy genesis out, unless this validator is the lead validator 88 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 89 | cp ${PROV_NODE_DIR}/config/genesis.json ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json 90 | fi 91 | 92 | PPROF_LADDR=${NODE_IP}:$(($PPROF_LADDR_BASEPORT + $index)) 93 | P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + $index)) 94 | 95 | # adjust configs of this node 96 | sed -i -r 's/timeout_commit = "5s"/timeout_commit = "3s"/g' ${PROV_NODE_DIR}/config/config.toml 97 | sed -i -r 's/timeout_propose = "3s"/timeout_propose = "1s"/g' ${PROV_NODE_DIR}/config/config.toml 98 | 99 | # make address book non-strict. necessary for this setup 100 | sed -i -r 's/addr_book_strict = true/addr_book_strict = false/g' ${PROV_NODE_DIR}/config/config.toml 101 | 102 | # avoid port double binding 103 | sed -i -r "s/pprof_laddr = \"localhost:6060\"/pprof_laddr = \"${PPROF_LADDR}\"/g" ${PROV_NODE_DIR}/config/config.toml 104 | 105 | # allow duplicate IP addresses (all nodes are on the same machine) 106 | sed -i -r 's/allow_duplicate_ip = false/allow_duplicate_ip = true/g' ${PROV_NODE_DIR}/config/config.toml 107 | done 108 | 109 | for MONIKER in "${MONIKERS[@]}" 110 | do 111 | # validator key 112 | PROV_KEY=${MONIKER}-key 113 | 114 | # home directory of this validator on provider 115 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 116 | 117 | # copy genesis in, unless this validator is the lead validator 118 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 119 | cp ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json* ${PROV_NODE_DIR}/config/genesis.json 120 | fi 121 | 122 | # Stake 1/1000 user's coins 123 | interchain-security-pd genesis gentx $PROV_KEY $STAKE --chain-id provider --home ${PROV_NODE_DIR} --keyring-backend test --moniker $MONIKER 124 | sleep 1 125 | 126 | # Copy gentxs to the lead validator for possible future collection. 127 | # Obviously we don't need to copy the first validator's gentx to itself 128 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 129 | cp ${PROV_NODE_DIR}/config/gentx/* ${LEAD_VALIDATOR_PROV_DIR}/config/gentx/ 130 | fi 131 | done 132 | 133 | # Collect genesis transactions with lead validator 134 | interchain-security-pd genesis collect-gentxs --home ${LEAD_VALIDATOR_PROV_DIR} --gentx-dir ${LEAD_VALIDATOR_PROV_DIR}/config/gentx/ 135 | 136 | sleep 1 137 | 138 | 139 | for index in "${!MONIKERS[@]}" 140 | do 141 | MONIKER=${MONIKERS[$index]} 142 | 143 | PERSISTENT_PEERS="" 144 | 145 | for peer_index in "${!MONIKERS[@]}" 146 | do 147 | if [ $index == $peer_index ]; then 148 | continue 149 | fi 150 | PEER_MONIKER=${MONIKERS[$peer_index]} 151 | 152 | PEER_PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${PEER_MONIKER} 153 | 154 | PEER_NODE_ID=$(interchain-security-pd tendermint show-node-id --home ${PEER_PROV_NODE_DIR}) 155 | 156 | PEER_P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + $peer_index)) 157 | PERSISTENT_PEERS="$PERSISTENT_PEERS,$PEER_NODE_ID@${NODE_IP}:${PEER_P2P_LADDR_PORT}" 158 | done 159 | 160 | # remove trailing comma from persistent peers 161 | PERSISTENT_PEERS=${PERSISTENT_PEERS:1} 162 | 163 | # validator key 164 | PROV_KEY=${MONIKER}-key 165 | 166 | # home directory of this validator on provider 167 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 168 | 169 | # home directory of this validator on consumer 170 | CONS_NODE_DIR=${PROV_NODES_ROOT_DIR}/consumer-${MONIKER} 171 | 172 | # copy genesis in, unless this validator is already the lead validator and thus it already has its genesis 173 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 174 | cp ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json ${PROV_NODE_DIR}/config/genesis.json 175 | fi 176 | 177 | RPC_LADDR_PORT=$(($RPC_LADDR_BASEPORT + $index)) 178 | P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + $index)) 179 | GRPC_LADDR_PORT=$(($GRPC_LADDR_BASEPORT + $index)) 180 | NODE_ADDRESS_PORT=$(($NODE_ADDRESS_BASEPORT + $index)) 181 | 182 | PROVIDER_NODE_LISTEN_ADDR_STR="${NODE_IP}:${NODE_ADDRESS_PORT},$PROVIDER_NODE_LISTEN_ADDR_STR" 183 | PROV_NODES_HOME_STR="${PROV_NODE_DIR},$PROV_NODES_HOME_STR" 184 | 185 | # Start gaia 186 | interchain-security-pd start \ 187 | --home ${PROV_NODE_DIR} \ 188 | --transport=grpc --with-tendermint=false \ 189 | --p2p.persistent_peers ${PERSISTENT_PEERS} \ 190 | --rpc.laddr tcp://${NODE_IP}:${RPC_LADDR_PORT} \ 191 | --grpc.address ${NODE_IP}:${GRPC_LADDR_PORT} \ 192 | --address tcp://${NODE_IP}:${NODE_ADDRESS_PORT} \ 193 | --p2p.laddr tcp://${NODE_IP}:${P2P_LADDR_PORT} \ 194 | --grpc-web.enable=false &> ${PROV_NODE_DIR}/logs & 195 | 196 | sleep 5 197 | done 198 | 199 | PROVIDER_NODE_LISTEN_ADDR_STR=${PROVIDER_NODE_LISTEN_ADDR_STR::${#PROVIDER_NODE_LISTEN_ADDR_STR}-1} 200 | PROV_NODES_HOME_STR=${PROV_NODES_HOME_STR::${#PROV_NODES_HOME_STR}-1} 201 | 202 | cometmock $PROVIDER_NODE_LISTEN_ADDR_STR ${LEAD_VALIDATOR_PROV_DIR}/config/genesis.json $PROVIDER_COMETMOCK_ADDR $PROV_NODES_HOME_STR grpc &> ${LEAD_VALIDATOR_PROV_DIR}/cometmock_log & 203 | 204 | sleep 5 205 | 206 | # Build consumer chain proposal file 207 | tee ${LEAD_VALIDATOR_PROV_DIR}/consumer-proposal.json< /dev/null || true 255 | sleep 1 256 | rm -rf ${CONS_NODES_ROOT_DIR} 257 | 258 | for index in "${!MONIKERS[@]}" 259 | do 260 | MONIKER=${MONIKERS[$index]} 261 | # validator key 262 | PROV_KEY=${MONIKER}-key 263 | 264 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 265 | 266 | # home directory of this validator on consumer 267 | CONS_NODE_DIR=${CONS_NODES_ROOT_DIR}/consumer-${MONIKER} 268 | 269 | # Build genesis file and node directory structure 270 | interchain-security-cd init $MONIKER --chain-id consumer --home ${CONS_NODE_DIR} 271 | 272 | sleep 1 273 | 274 | # Create account keypair 275 | interchain-security-cd keys add $PROV_KEY --home ${CONS_NODE_DIR} --keyring-backend test --output json > ${CONS_NODE_DIR}/${PROV_KEY}.json 2>&1 276 | sleep 1 277 | 278 | # copy genesis in, unless this validator is the lead validator 279 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 280 | cp ${LEAD_VALIDATOR_CONS_DIR}/config/genesis.json ${CONS_NODE_DIR}/config/genesis.json 281 | fi 282 | 283 | # Add stake to user 284 | CONS_ACCOUNT_ADDR=$(jq -r '.address' ${CONS_NODE_DIR}/${PROV_KEY}.json) 285 | interchain-security-cd genesis add-genesis-account $CONS_ACCOUNT_ADDR $USER_COINS --home ${CONS_NODE_DIR} 286 | sleep 10 287 | 288 | ### this probably doesnt have to be done for each node 289 | # Add consumer genesis states to genesis file 290 | RPC_LADDR_PORT=$(($RPC_LADDR_BASEPORT + $index)) 291 | RPC_LADDR=tcp://${NODE_IP}:${RPC_LADDR_PORT} 292 | interchain-security-pd query provider consumer-genesis consumer --home ${PROV_NODE_DIR} --node $PROVIDER_COMETMOCK_ADDR -o json > consumer_gen.json 293 | jq -s '.[0].app_state.ccvconsumer = .[1] | .[0]' ${CONS_NODE_DIR}/config/genesis.json consumer_gen.json > ${CONS_NODE_DIR}/edited_genesis.json \ 294 | && mv ${CONS_NODE_DIR}/edited_genesis.json ${CONS_NODE_DIR}/config/genesis.json 295 | rm consumer_gen.json 296 | ### 297 | 298 | # copy genesis out, unless this validator is the lead validator 299 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 300 | cp ${CONS_NODE_DIR}/config/genesis.json ${LEAD_VALIDATOR_CONS_DIR}/config/genesis.json 301 | fi 302 | 303 | PPROF_LADDR=${NODE_IP}:$(($PPROF_LADDR_BASEPORT + ${#MONIKERS[@]} + $index)) 304 | P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + ${#MONIKERS[@]} + $index)) 305 | 306 | # adjust configs of this node 307 | sed -i -r 's/timeout_commit = "5s"/timeout_commit = "3s"/g' ${CONS_NODE_DIR}/config/config.toml 308 | sed -i -r 's/timeout_propose = "3s"/timeout_propose = "1s"/g' ${CONS_NODE_DIR}/config/config.toml 309 | 310 | # make address book non-strict. necessary for this setup 311 | sed -i -r 's/addr_book_strict = true/addr_book_strict = false/g' ${CONS_NODE_DIR}/config/config.toml 312 | 313 | # avoid port double binding 314 | sed -i -r "s/pprof_laddr = \"localhost:6060\"/pprof_laddr = \"${PPROF_LADDR}\"/g" ${CONS_NODE_DIR}/config/config.toml 315 | 316 | # allow duplicate IP addresses (all nodes are on the same machine) 317 | sed -i -r 's/allow_duplicate_ip = false/allow_duplicate_ip = true/g' ${CONS_NODE_DIR}/config/config.toml 318 | 319 | # Create validator states 320 | echo '{"height": "0","round": 0,"step": 0}' > ${CONS_NODE_DIR}/data/priv_validator_state.json 321 | 322 | # Copy validator key files 323 | cp ${PROV_NODE_DIR}/config/priv_validator_key.json ${CONS_NODE_DIR}/config/priv_validator_key.json 324 | cp ${PROV_NODE_DIR}/config/node_key.json ${CONS_NODE_DIR}/config/node_key.json 325 | 326 | # Set default client port 327 | CLIENT_PORT=$(($CLIENT_BASEPORT + ${#MONIKERS[@]} + $index)) 328 | sed -i -r "/node =/ s/= .*/= \"tcp:\/\/${NODE_IP}:${CLIENT_PORT}\"/" ${CONS_NODE_DIR}/config/client.toml 329 | 330 | done 331 | 332 | sleep 1 333 | 334 | 335 | for index in "${!MONIKERS[@]}" 336 | do 337 | MONIKER=${MONIKERS[$index]} 338 | 339 | PERSISTENT_PEERS="" 340 | 341 | for peer_index in "${!MONIKERS[@]}" 342 | do 343 | if [ $index == $peer_index ]; then 344 | continue 345 | fi 346 | PEER_MONIKER=${MONIKERS[$peer_index]} 347 | 348 | PEER_CONS_NODE_DIR=${CONS_NODES_ROOT_DIR}/consumer-${PEER_MONIKER} 349 | 350 | PEER_NODE_ID=$(interchain-security-pd tendermint show-node-id --home ${PEER_CONS_NODE_DIR}) 351 | 352 | PEER_P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + ${#MONIKERS[@]} + $peer_index)) 353 | PERSISTENT_PEERS="$PERSISTENT_PEERS,$PEER_NODE_ID@${NODE_IP}:${PEER_P2P_LADDR_PORT}" 354 | done 355 | 356 | # remove trailing comma from persistent peers 357 | PERSISTENT_PEERS=${PERSISTENT_PEERS:1} 358 | 359 | # validator key 360 | PROV_KEY=${MONIKER}-key 361 | 362 | # home directory of this validator on provider 363 | PROV_NODE_DIR=${PROV_NODES_ROOT_DIR}/provider-${MONIKER} 364 | 365 | # home directory of this validator on consumer 366 | CONS_NODE_DIR=${CONS_NODES_ROOT_DIR}/consumer-${MONIKER} 367 | 368 | # copy genesis in, unless this validator is already the lead validator and thus it already has its genesis 369 | if [ $MONIKER != $LEAD_VALIDATOR_MONIKER ]; then 370 | cp ${LEAD_VALIDATOR_CONS_DIR}/config/genesis.json ${CONS_NODE_DIR}/config/genesis.json 371 | fi 372 | 373 | RPC_LADDR_PORT=$(($RPC_LADDR_BASEPORT + ${#MONIKERS[@]} + $index)) 374 | P2P_LADDR_PORT=$(($P2P_LADDR_BASEPORT + ${#MONIKERS[@]} + $index)) 375 | GRPC_LADDR_PORT=$(($GRPC_LADDR_BASEPORT + ${#MONIKERS[@]} + $index)) 376 | NODE_ADDRESS_PORT=$(($NODE_ADDRESS_BASEPORT + ${#MONIKERS[@]} + $index)) 377 | 378 | CONSUMER_NODE_LISTEN_ADDR_STR="${NODE_IP}:${NODE_ADDRESS_PORT},$CONSUMER_NODE_LISTEN_ADDR_STR" 379 | # Add the home directory to the CONS_NODES_HOME_STR 380 | CONS_NODES_HOME_STR="${CONS_NODE_DIR},${CONS_NODES_HOME_STR}" 381 | 382 | # Start gaia 383 | interchain-security-cd start \ 384 | --home ${CONS_NODE_DIR} \ 385 | --transport=grpc --with-tendermint=false \ 386 | --p2p.persistent_peers ${PERSISTENT_PEERS} \ 387 | --rpc.laddr tcp://${NODE_IP}:${RPC_LADDR_PORT} \ 388 | --grpc.address ${NODE_IP}:${GRPC_LADDR_PORT} \ 389 | --address tcp://${NODE_IP}:${NODE_ADDRESS_PORT} \ 390 | --p2p.laddr tcp://${NODE_IP}:${P2P_LADDR_PORT} \ 391 | --grpc-web.enable=false &> ${CONS_NODE_DIR}/logs & 392 | 393 | sleep 6 394 | done 395 | 396 | # remove trailing comma from consumer node listen addr str 397 | CONSUMER_NODE_LISTEN_ADDR_STR=${CONSUMER_NODE_LISTEN_ADDR_STR::${#CONSUMER_NODE_LISTEN_ADDR_STR}-1} 398 | CONS_NODES_HOME_STR=${CONS_NODES_HOME_STR::${#CONS_NODES_HOME_STR}-1} 399 | 400 | cometmock $CONSUMER_NODE_LISTEN_ADDR_STR ${LEAD_VALIDATOR_CONS_DIR}/config/genesis.json $CONSUMER_COMETMOCK_ADDR $CONS_NODES_HOME_STR grpc &> ${LEAD_VALIDATOR_CONS_DIR}/cometmock_log & 401 | 402 | sleep 3 403 | 404 | rm -r ~/.relayer 405 | 406 | # initialize gorelayer 407 | rly config init 408 | 409 | # add chain configs 410 | 411 | echo "{ 412 | \"type\": \"cosmos\", 413 | \"value\": { 414 | \"key\": \"default\", 415 | \"chain-id\": \"provider\", 416 | \"rpc-addr\": \"${PROVIDER_COMETMOCK_ADDR}\", 417 | \"account-prefix\": \"cosmos\", 418 | \"keyring-backend\": \"test\", 419 | \"gas-adjustment\": 1.2, 420 | \"gas-prices\": \"0.01stake\", 421 | \"debug\": true, 422 | \"timeout\": \"20s\", 423 | \"output-format\": \"json\", 424 | \"sign-mode\": \"direct\" 425 | } 426 | }" > go_rly_provider.json 427 | 428 | echo "{ 429 | \"type\": \"cosmos\", 430 | \"value\": { 431 | \"key\": \"default\", 432 | \"chain-id\": \"consumer\", 433 | \"rpc-addr\": \"${CONSUMER_COMETMOCK_ADDR}\", 434 | \"account-prefix\": \"cosmos\", 435 | \"keyring-backend\": \"test\", 436 | \"gas-adjustment\": 1.2, 437 | \"gas-prices\": \"0.01stake\", 438 | \"debug\": true, 439 | \"timeout\": \"20s\", 440 | \"output-format\": \"json\", 441 | \"sign-mode\": \"direct\" 442 | } 443 | }" > go_rly_consumer.json 444 | 445 | # add chains 446 | rly chains add --file go_rly_provider.json provider 447 | rly chains add --file go_rly_consumer.json consumer 448 | 449 | # gorelayer 450 | rly keys delete consumer default -y || true 451 | rly keys delete provider default -y || true 452 | 453 | # take keys from provider and consumer and add them to gorelayer 454 | rly keys restore provider default "$(cat ${LEAD_VALIDATOR_PROV_DIR}/${LEAD_VALIDATOR_MONIKER}-key.json | jq -r '.mnemonic')" 455 | rly keys restore consumer default "$(cat ${LEAD_VALIDATOR_CONS_DIR}/${LEAD_VALIDATOR_MONIKER}-key.json | jq -r '.mnemonic')" 456 | 457 | rly paths add consumer provider testpath --file go_rly_ics_path_config.json 458 | rly tx clients testpath 459 | rly tx connection testpath 460 | rly tx channel testpath --src-port consumer --dst-port provider --version 1 --order ordered --debug 461 | rly start 462 | -------------------------------------------------------------------------------- /cometmock/rpc_server/routes.go: -------------------------------------------------------------------------------- 1 | package rpc_server 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "sort" 7 | "time" 8 | 9 | "github.com/cometbft/cometbft/libs/bytes" 10 | cmtmath "github.com/cometbft/cometbft/libs/math" 11 | cmtquery "github.com/cometbft/cometbft/libs/pubsub/query" 12 | "github.com/cometbft/cometbft/p2p" 13 | 14 | abcitypes "github.com/cometbft/cometbft/abci/types" 15 | ctypes "github.com/cometbft/cometbft/rpc/core/types" 16 | rpc "github.com/cometbft/cometbft/rpc/jsonrpc/server" 17 | rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" 18 | "github.com/cometbft/cometbft/types" 19 | "github.com/cometbft/cometbft/version" 20 | "github.com/informalsystems/CometMock/cometmock/abci_client" 21 | "github.com/informalsystems/CometMock/cometmock/utils" 22 | ) 23 | 24 | const ( 25 | defaultPerPage = 30 26 | maxPerPage = 100 27 | ) 28 | 29 | var Routes = map[string]*rpc.RPCFunc{ 30 | // websocket 31 | "subscribe": rpc.NewWSRPCFunc(Subscribe, "query"), 32 | "unsubscribe": rpc.NewWSRPCFunc(Unsubscribe, "query"), 33 | "unsubscribe_all": rpc.NewWSRPCFunc(UnsubscribeAll, ""), 34 | 35 | // info API 36 | "health": rpc.NewRPCFunc(Health, ""), 37 | "status": rpc.NewRPCFunc(Status, ""), 38 | "validators": rpc.NewRPCFunc(Validators, "height,page,per_page"), 39 | "block": rpc.NewRPCFunc(Block, "height", rpc.Cacheable("height")), 40 | "consensus_params": rpc.NewRPCFunc(ConsensusParams, "height", rpc.Cacheable("height")), 41 | // "header": rpc.NewRPCFunc(Header, "height", rpc.Cacheable("height")), // not available in 0.34.x 42 | "commit": rpc.NewRPCFunc(Commit, "height", rpc.Cacheable("height")), 43 | "block_results": rpc.NewRPCFunc(BlockResults, "height", rpc.Cacheable("height")), 44 | "tx": rpc.NewRPCFunc(Tx, "hash,prove", rpc.Cacheable()), 45 | "tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page,order_by"), 46 | "block_search": rpc.NewRPCFunc(BlockSearch, "query,page,per_page,order_by"), 47 | 48 | // tx broadcast API 49 | "broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommit, "tx"), 50 | "broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSync, "tx"), 51 | "broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"), 52 | 53 | // abci API 54 | "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"), 55 | "abci_info": rpc.NewRPCFunc(ABCIInfo, ""), 56 | 57 | // cometmock specific API 58 | "advance_blocks": rpc.NewRPCFunc(AdvanceBlocks, "num_blocks"), 59 | "set_signing_status": rpc.NewRPCFunc(SetSigningStatus, "private_key_address,status"), 60 | "advance_time": rpc.NewRPCFunc(AdvanceTime, "duration_in_seconds"), 61 | "cause_double_sign": rpc.NewRPCFunc(CauseDoubleSign, "private_key_address"), 62 | "cause_light_client_attack": rpc.NewRPCFunc(CauseLightClientAttack, "private_key_address,misbehaviour_type"), 63 | } 64 | 65 | type ResultCauseLightClientAttack struct{} 66 | 67 | func CauseLightClientAttack(ctx *rpctypes.Context, privateKeyAddress, misbehaviourType string) (*ResultCauseLightClientAttack, error) { 68 | err := abci_client.GlobalClient.CauseLightClientAttack(privateKeyAddress, misbehaviourType) 69 | return &ResultCauseLightClientAttack{}, err 70 | } 71 | 72 | type ResultCauseDoubleSign struct{} 73 | 74 | func CauseDoubleSign(ctx *rpctypes.Context, privateKeyAddress string) (*ResultCauseDoubleSign, error) { 75 | err := abci_client.GlobalClient.CauseDoubleSign(privateKeyAddress) 76 | return &ResultCauseDoubleSign{}, err 77 | } 78 | 79 | type ResultAdvanceTime struct { 80 | NewTime time.Time `json:"new_time"` 81 | } 82 | 83 | // AdvanceTime advances the block time by the given duration. 84 | // This API is specific to CometMock. 85 | func AdvanceTime(ctx *rpctypes.Context, duration_in_seconds time.Duration) (*ResultAdvanceTime, error) { 86 | if duration_in_seconds < 0 { 87 | return nil, errors.New("duration to advance time by must be greater than 0") 88 | } 89 | 90 | res := abci_client.GlobalClient.TimeHandler.AdvanceTime(duration_in_seconds * time.Second) 91 | return &ResultAdvanceTime{res}, nil 92 | } 93 | 94 | type ResultSetSigningStatus struct { 95 | NewSigningStatusMap map[string]bool `json:"new_signing_status_map"` 96 | } 97 | 98 | func SetSigningStatus(ctx *rpctypes.Context, privateKeyAddress string, status string) (*ResultSetSigningStatus, error) { 99 | if status != "down" && status != "up" { 100 | return nil, errors.New("status must be either `up` to have the validator sign, or `down` to have the validator not sign") 101 | } 102 | 103 | err := abci_client.GlobalClient.SetSigningStatus(privateKeyAddress, status == "up") 104 | 105 | return &ResultSetSigningStatus{ 106 | NewSigningStatusMap: abci_client.GlobalClient.GetSigningStatusMap(), 107 | }, err 108 | } 109 | 110 | type ResultAdvanceBlocks struct{} 111 | 112 | // AdvanceBlocks advances the block height by numBlocks, running empty blocks. 113 | // This API is specific to CometMock. 114 | func AdvanceBlocks(ctx *rpctypes.Context, numBlocks int) (*ResultAdvanceBlocks, error) { 115 | if numBlocks < 1 { 116 | return nil, errors.New("num_blocks must be greater than 0") 117 | } 118 | 119 | err := abci_client.GlobalClient.RunEmptyBlocks(numBlocks) 120 | if err != nil { 121 | return nil, err 122 | } 123 | return &ResultAdvanceBlocks{}, nil 124 | } 125 | 126 | // BlockSearch searches for a paginated set of blocks matching BeginBlock and 127 | // EndBlock event search criteria. 128 | func BlockSearch( 129 | ctx *rpctypes.Context, 130 | query string, 131 | pagePtr, perPagePtr *int, 132 | orderBy string, 133 | ) (*ctypes.ResultBlockSearch, error) { 134 | q, err := cmtquery.New(query) 135 | if err != nil { 136 | return nil, err 137 | } 138 | 139 | results, err := abci_client.GlobalClient.BlockIndex.Search(ctx.Context(), q) 140 | if err != nil { 141 | return nil, err 142 | } 143 | 144 | // sort results (must be done before pagination) 145 | switch orderBy { 146 | case "desc", "": 147 | sort.Slice(results, func(i, j int) bool { return results[i] > results[j] }) 148 | 149 | case "asc": 150 | sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) 151 | 152 | default: 153 | return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") 154 | } 155 | 156 | // paginate results 157 | totalCount := len(results) 158 | perPage := validatePerPage(perPagePtr) 159 | 160 | page, err := validatePage(pagePtr, perPage, totalCount) 161 | if err != nil { 162 | return nil, err 163 | } 164 | 165 | skipCount := validateSkipCount(page, perPage) 166 | pageSize := cmtmath.MinInt(perPage, totalCount-skipCount) 167 | 168 | apiResults := make([]*ctypes.ResultBlock, 0, pageSize) 169 | for i := skipCount; i < skipCount+pageSize; i++ { 170 | block, err := abci_client.GlobalClient.Storage.GetBlock(results[i]) 171 | if err != nil { 172 | return nil, err 173 | } 174 | if block != nil { 175 | if err != nil { 176 | return nil, err 177 | } 178 | blockId, err := utils.GetBlockIdFromBlock(block) 179 | if err != nil { 180 | return nil, err 181 | } 182 | 183 | apiResults = append(apiResults, &ctypes.ResultBlock{ 184 | Block: block, 185 | BlockID: *blockId, 186 | }) 187 | } 188 | } 189 | 190 | return &ctypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil 191 | } 192 | 193 | // Tx allows you to query the transaction results. `nil` could mean the 194 | // transaction is in the mempool, invalidated, or was not sent in the first 195 | // place. 196 | // More: https://docs.tendermint.com/v0.34/rpc/#/Info/tx 197 | func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { 198 | txIndexer := abci_client.GlobalClient.TxIndex 199 | 200 | r, err := txIndexer.Get(hash) 201 | if err != nil { 202 | return nil, err 203 | } 204 | 205 | if r == nil { 206 | return nil, fmt.Errorf("tx (%X) not found", hash) 207 | } 208 | 209 | height := r.Height 210 | index := r.Index 211 | 212 | var proof types.TxProof 213 | if prove { 214 | block, err := abci_client.GlobalClient.Storage.GetBlock(height) 215 | if err != nil { 216 | return nil, err 217 | } 218 | proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines 219 | } 220 | 221 | return &ctypes.ResultTx{ 222 | Hash: hash, 223 | Height: height, 224 | Index: index, 225 | TxResult: r.Result, 226 | Tx: r.Tx, 227 | Proof: proof, 228 | }, nil 229 | } 230 | 231 | // TxSearch allows you to query for multiple transactions results. It returns a 232 | // list of transactions (maximum ?per_page entries) and the total count. 233 | // More: https://docs.tendermint.com/v0.34/rpc/#/Info/tx_search 234 | func TxSearch( 235 | ctx *rpctypes.Context, 236 | query string, 237 | prove bool, 238 | pagePtr, perPagePtr *int, 239 | orderBy string, 240 | ) (*ctypes.ResultTxSearch, error) { 241 | if len(query) > maxQueryLength { 242 | return nil, errors.New("maximum query length exceeded") 243 | } 244 | 245 | q, err := cmtquery.New(query) 246 | if err != nil { 247 | return nil, err 248 | } 249 | 250 | results, err := abci_client.GlobalClient.TxIndex.Search(ctx.Context(), q) 251 | if err != nil { 252 | return nil, err 253 | } 254 | 255 | // sort results (must be done before pagination) 256 | switch orderBy { 257 | case "desc": 258 | sort.Slice(results, func(i, j int) bool { 259 | if results[i].Height == results[j].Height { 260 | return results[i].Index > results[j].Index 261 | } 262 | return results[i].Height > results[j].Height 263 | }) 264 | case "asc", "": 265 | sort.Slice(results, func(i, j int) bool { 266 | if results[i].Height == results[j].Height { 267 | return results[i].Index < results[j].Index 268 | } 269 | return results[i].Height < results[j].Height 270 | }) 271 | default: 272 | return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") 273 | } 274 | 275 | // paginate results 276 | totalCount := len(results) 277 | perPage := validatePerPage(perPagePtr) 278 | 279 | page, err := validatePage(pagePtr, perPage, totalCount) 280 | if err != nil { 281 | return nil, err 282 | } 283 | 284 | skipCount := validateSkipCount(page, perPage) 285 | pageSize := cmtmath.MinInt(perPage, totalCount-skipCount) 286 | 287 | apiResults := make([]*ctypes.ResultTx, 0, pageSize) 288 | for i := skipCount; i < skipCount+pageSize; i++ { 289 | r := results[i] 290 | 291 | var proof types.TxProof 292 | if prove { 293 | block, err := abci_client.GlobalClient.Storage.GetBlock(r.Height) 294 | if err != nil { 295 | return nil, err 296 | } 297 | proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines 298 | } 299 | 300 | apiResults = append(apiResults, &ctypes.ResultTx{ 301 | Hash: types.Tx(r.Tx).Hash(), 302 | Height: r.Height, 303 | Index: r.Index, 304 | TxResult: r.Result, 305 | Tx: r.Tx, 306 | Proof: proof, 307 | }) 308 | } 309 | 310 | return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil 311 | } 312 | 313 | func getHeight(latestHeight int64, heightPtr *int64) (int64, error) { 314 | if heightPtr != nil { 315 | height := *heightPtr 316 | if height <= 0 { 317 | return 0, fmt.Errorf("height must be greater than 0, but got %d", height) 318 | } 319 | if height > latestHeight { 320 | return 0, fmt.Errorf("height %d must be less than or equal to the current blockchain height %d", 321 | height, latestHeight) 322 | } 323 | return height, nil 324 | } 325 | return latestHeight, nil 326 | } 327 | 328 | // // Header gets block header at a given height. 329 | // // If no height is provided, it will fetch the latest header. 330 | // // More: https://docs.cometbft.com/v0.37/rpc/#/Info/header 331 | // func Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, error) { 332 | // height, err := getHeight(abci_client.GlobalClient.LastBlock.Height, heightPtr) 333 | // if err != nil { 334 | // return nil, err 335 | // } 336 | 337 | // block, err := abci_client.GlobalClient.Storage.GetBlock(height) 338 | // if err != nil { 339 | // return nil, err 340 | // } 341 | 342 | // return &ctypes.ResultHeader{Header: &block.Header}, nil 343 | // } 344 | 345 | // Commit gets block commit at a given height. 346 | // If no height is provided, it will fetch the commit for the latest block. 347 | // More: https://docs.cometbft.com/main/rpc/#/Info/commit 348 | func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { 349 | height, err := getHeight(abci_client.GlobalClient.LastBlock.Height, heightPtr) 350 | if err != nil { 351 | return nil, err 352 | } 353 | 354 | commit, err := abci_client.GlobalClient.Storage.GetCommit(height) 355 | if err != nil { 356 | return nil, err 357 | } 358 | 359 | block, err := abci_client.GlobalClient.Storage.GetBlock(height) 360 | if err != nil { 361 | return nil, err 362 | } 363 | 364 | return ctypes.NewResultCommit(&block.Header, commit, true), nil 365 | } 366 | 367 | // ConsensusParams gets the consensus parameters at the given block height. 368 | // If no height is provided, it will fetch the latest consensus params. 369 | // More: https://docs.cometbft.com/v0.37/rpc/#/Info/consensus_params 370 | func ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultConsensusParams, error) { 371 | height, err := getHeight(abci_client.GlobalClient.LastBlock.Height, heightPtr) 372 | if err != nil { 373 | return nil, err 374 | } 375 | 376 | stateForHeight, err := abci_client.GlobalClient.Storage.GetState(height) 377 | if err != nil { 378 | return nil, err 379 | } 380 | 381 | consensusParams := stateForHeight.ConsensusParams 382 | 383 | return &ctypes.ResultConsensusParams{ 384 | BlockHeight: height, 385 | ConsensusParams: consensusParams, 386 | }, nil 387 | } 388 | 389 | // Status returns CometBFT status including node info, pubkey, latest block 390 | // hash, app hash, block height and time. 391 | // More: https://docs.cometbft.com/v0.37/rpc/#/Info/status 392 | func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { 393 | // return status as if we are the first validator 394 | curState := abci_client.GlobalClient.CurState 395 | validator := curState.Validators.Validators[0] 396 | 397 | nodeInfo := p2p.DefaultNodeInfo{ 398 | DefaultNodeID: p2p.PubKeyToID(validator.PubKey), 399 | Network: abci_client.GlobalClient.CurState.ChainID, 400 | Other: p2p.DefaultNodeInfoOther{ 401 | TxIndex: "on", 402 | }, 403 | Version: "0.38.0", 404 | ProtocolVersion: p2p.NewProtocolVersion( 405 | version.P2PProtocol, // global 406 | curState.Version.Consensus.Block, 407 | curState.Version.Consensus.App, 408 | ), 409 | } 410 | syncInfo := ctypes.SyncInfo{ 411 | LatestBlockHash: abci_client.GlobalClient.LastBlock.Hash(), 412 | LatestAppHash: abci_client.GlobalClient.LastBlock.AppHash, 413 | LatestBlockHeight: abci_client.GlobalClient.LastBlock.Height, 414 | LatestBlockTime: abci_client.GlobalClient.CurState.LastBlockTime, 415 | CatchingUp: false, 416 | } 417 | validatorInfo := ctypes.ValidatorInfo{ 418 | Address: validator.Address, 419 | PubKey: validator.PubKey, 420 | VotingPower: validator.VotingPower, 421 | } 422 | result := &ctypes.ResultStatus{ 423 | NodeInfo: nodeInfo, 424 | SyncInfo: syncInfo, 425 | ValidatorInfo: validatorInfo, 426 | } 427 | 428 | return result, nil 429 | } 430 | 431 | // Health gets node health. Returns empty result (200 OK) on success, no 432 | // response - in case of an error. 433 | func Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { 434 | return &ctypes.ResultHealth{}, nil 435 | } 436 | 437 | // CURRENTLY UNSUPPORTED - THIS IS BECAUSE IT IS DISCOURAGED TO USE THIS BY COMETBFT 438 | // needs some major changes to work with ABCI++ 439 | // BroadcastTxCommit broadcasts a transaction, 440 | // and wait until it is included in a block and and comitted. 441 | // In our case, this means running a block with just the the transition, 442 | // then return. 443 | func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { 444 | return nil, errors.New("BroadcastTxCommit is currently not supported. Try BroadcastTxSync or BroadcastTxAsync instead") 445 | } 446 | 447 | // BroadcastTxSync would normally broadcast a transaction and wait until it gets the result from CheckTx. 448 | // In our case, we run a block with just the transition in it, 449 | // then return. 450 | func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { 451 | abci_client.GlobalClient.Logger.Info( 452 | "BroadcastTxSync called", "tx", tx) 453 | 454 | resBroadcastTx, err := BroadcastTx(&tx) 455 | if err != nil { 456 | return nil, err 457 | } 458 | 459 | return &ctypes.ResultBroadcastTx{ 460 | Code: resBroadcastTx.CheckTx.Code, 461 | Data: resBroadcastTx.CheckTx.Data, 462 | Log: resBroadcastTx.CheckTx.Log, 463 | Hash: resBroadcastTx.Hash, 464 | Codespace: resBroadcastTx.CheckTx.Codespace, 465 | }, nil 466 | } 467 | 468 | // BroadcastTxAsync would normally broadcast a transaction and return immediately. 469 | // In our case, we always include the transition in the next block, and return when that block is committed. 470 | // ResultBroadcastTx is empty, since we do not return the result of CheckTx nor DeliverTx. 471 | func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { 472 | abci_client.GlobalClient.Logger.Info( 473 | "BroadcastTxAsync called", "tx", tx) 474 | 475 | _, err := BroadcastTx(&tx) 476 | if err != nil { 477 | return nil, err 478 | } 479 | 480 | return &ctypes.ResultBroadcastTx{}, nil 481 | } 482 | 483 | func BroadcastTx(tx *types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { 484 | abci_client.GlobalClient.Logger.Info( 485 | "BroadcastTxs called", "tx", tx) 486 | 487 | txBytes := []byte(*tx) 488 | checkTxResponse, err := abci_client.GlobalClient.SendCheckTx(abcitypes.CheckTxType_New, &txBytes) 489 | if err != nil { 490 | return nil, err 491 | } 492 | abci_client.GlobalClient.QueueTx(*tx) 493 | 494 | if abci_client.GlobalClient.AutoIncludeTx { 495 | go abci_client.GlobalClient.RunBlock() 496 | } 497 | 498 | return &ctypes.ResultBroadcastTxCommit{ 499 | CheckTx: *checkTxResponse, 500 | Hash: tx.Hash(), 501 | Height: abci_client.GlobalClient.CurState.LastBlockHeight, 502 | }, err 503 | } 504 | 505 | func ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { 506 | abci_client.GlobalClient.Logger.Info( 507 | "ABCIInfo called") 508 | 509 | response, err := abci_client.GlobalClient.SendAbciInfo() 510 | return &ctypes.ResultABCIInfo{Response: *response}, err 511 | } 512 | 513 | func ABCIQuery( 514 | ctx *rpctypes.Context, 515 | path string, 516 | data bytes.HexBytes, 517 | height int64, 518 | prove bool, 519 | ) (*ctypes.ResultABCIQuery, error) { 520 | abci_client.GlobalClient.Logger.Info( 521 | "ABCIQuery called", "path", "data", "height", "prove", path, data, height, prove) 522 | 523 | response, err := abci_client.GlobalClient.SendAbciQuery(data, path, height, prove) 524 | if err != nil { 525 | return nil, err 526 | } 527 | 528 | abci_client.GlobalClient.Logger.Info( 529 | "Response to ABCI query", response.String()) 530 | return &ctypes.ResultABCIQuery{Response: *response}, err 531 | } 532 | 533 | func Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) { 534 | height, err := getHeight(abci_client.GlobalClient.LastBlock.Height, heightPtr) 535 | if err != nil { 536 | return nil, err 537 | } 538 | 539 | pastState, err := abci_client.GlobalClient.Storage.GetState(height) 540 | if err != nil { 541 | return nil, err 542 | } 543 | 544 | validators := pastState.Validators 545 | 546 | totalCount := len(validators.Validators) 547 | perPage := validatePerPage(perPagePtr) 548 | page, err := validatePage(pagePtr, perPage, totalCount) 549 | if err != nil { 550 | return nil, err 551 | } 552 | 553 | skipCount := validateSkipCount(page, perPage) 554 | 555 | v := validators.Validators[skipCount : skipCount+cmtmath.MinInt(perPage, totalCount-skipCount)] 556 | 557 | return &ctypes.ResultValidators{ 558 | BlockHeight: height, 559 | Validators: v, 560 | Count: len(v), 561 | Total: totalCount, 562 | }, nil 563 | } 564 | 565 | // validatePage is adapted from https://github.com/cometbft/cometbft/blob/9267594e0a17c01cc4a97b399ada5eaa8a734db5/rpc/core/env.go#L107 566 | func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { 567 | if perPage < 1 { 568 | panic(fmt.Sprintf("zero or negative perPage: %d", perPage)) 569 | } 570 | 571 | if pagePtr == nil { // no page parameter 572 | return 1, nil 573 | } 574 | 575 | pages := ((totalCount - 1) / perPage) + 1 576 | if pages == 0 { 577 | pages = 1 // one page (even if it's empty) 578 | } 579 | page := *pagePtr 580 | if page <= 0 || page > pages { 581 | return 1, fmt.Errorf("page should be within [1, %d] range, given %d", pages, page) 582 | } 583 | 584 | return page, nil 585 | } 586 | 587 | // validatePerPage is adapted from https://github.com/cometbft/cometbft/blob/9267594e0a17c01cc4a97b399ada5eaa8a734db5/rpc/core/env.go#L128 588 | func validatePerPage(perPagePtr *int) int { 589 | if perPagePtr == nil { // no per_page parameter 590 | return defaultPerPage 591 | } 592 | 593 | perPage := *perPagePtr 594 | if perPage < 1 { 595 | return defaultPerPage 596 | } else if perPage > maxPerPage { 597 | return maxPerPage 598 | } 599 | return perPage 600 | } 601 | 602 | // validateSkipCount is adapted from https://github.com/cometbft/cometbft/blob/9267594e0a17c01cc4a97b399ada5eaa8a734db5/rpc/core/env.go#L171 603 | func validateSkipCount(page, perPage int) int { 604 | skipCount := (page - 1) * perPage 605 | if skipCount < 0 { 606 | return 0 607 | } 608 | 609 | return skipCount 610 | } 611 | 612 | func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { 613 | height, err := getHeight(abci_client.GlobalClient.LastBlock.Height, heightPtr) 614 | if err != nil { 615 | return nil, err 616 | } 617 | 618 | block, err := abci_client.GlobalClient.Storage.GetBlock(height) 619 | if err != nil { 620 | return nil, err 621 | } 622 | 623 | blockID, err := utils.GetBlockIdFromBlock(block) 624 | if err != nil { 625 | return nil, err 626 | } 627 | 628 | return &ctypes.ResultBlock{BlockID: *blockID, Block: block}, nil 629 | } 630 | 631 | // BlockResults gets ABCIResults at a given height. 632 | // If no height is provided, it will fetch results for the latest block. 633 | // 634 | // Results are for the height of the block containing the txs. 635 | // Thus response.results.deliver_tx[5] is the results of executing 636 | // getBlock(h).Txs[5] 637 | // More: https://docs.cometbft.com/v0.37/rpc/#/Info/block_results 638 | func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { 639 | height, err := getHeight(abci_client.GlobalClient.LastBlock.Height, heightPtr) 640 | if err != nil { 641 | return nil, err 642 | } 643 | 644 | results, err := abci_client.GlobalClient.Storage.GetResponses(height) 645 | if err != nil { 646 | return nil, err 647 | } 648 | 649 | return &ctypes.ResultBlockResults{ 650 | Height: height, 651 | TxsResults: results.TxResults, 652 | FinalizeBlockEvents: results.Events, 653 | ValidatorUpdates: results.ValidatorUpdates, 654 | ConsensusParamUpdates: results.ConsensusParamUpdates, 655 | }, nil 656 | } 657 | --------------------------------------------------------------------------------