├── .dockerignore ├── .env.example ├── .github ├── FUNDING.yml └── workflows │ ├── docker-release.yaml │ ├── docker.yaml │ ├── go-test.yaml │ ├── golangci-lint.yaml │ └── pre-commit.yaml ├── .gitignore ├── .golangci.yml ├── .pre-commit-config.yaml ├── Dockerfile ├── LICENSE ├── Makefile ├── NOTICE ├── README.md ├── cmd ├── index.go └── root.go ├── config.toml.example ├── config ├── common_configs.go ├── common_configs_test.go ├── cosmos_config.go ├── filter_config.go ├── filter_config_test.go ├── index_config.go ├── index_config_test.go └── logger.go ├── core ├── block_enqueue.go ├── block_events.go ├── decoding.go ├── processor.go ├── rpc_worker.go └── tx.go ├── cosmos ├── events │ └── normalization.go └── modules │ ├── denoms │ └── types.go │ ├── parsing.go │ └── tx │ ├── logic.go │ └── types.go ├── db ├── db.go ├── db_test.go ├── events.go ├── model_wrappers.go ├── models │ ├── address.go │ ├── block.go │ ├── chain.go │ ├── denom.go │ ├── parsers.go │ └── tx.go ├── parsers.go └── utils.go ├── docker-compose.yaml ├── docs ├── README.md ├── quickstart.md ├── reference │ ├── README.md │ ├── application_workflow │ │ ├── application_workflow.md │ │ └── index_command.md │ ├── custom_cosmos_module_extensions │ │ ├── cosmos_indexer_modules.md │ │ ├── custom_message_type_registration.md │ │ └── probe_codec_walkthrough.md │ ├── custom_data_indexing │ │ ├── custom_indexer_examples.md │ │ ├── custom_indexer_walkthrough.md │ │ ├── indexer_sdk_and_custom_parsers.md │ │ └── indexer_type.md │ ├── default_data_indexing │ │ ├── block_events_indexed_data.md │ │ ├── block_indexed_data.md │ │ └── transactions_indexed_data.md │ └── images │ │ ├── block-db.png │ │ ├── block-events-db.png │ │ ├── tx-db.png │ │ └── workflow.png └── usage │ ├── README.md │ ├── configuration.md │ ├── filtering.md │ ├── indexing.md │ └── installation.md ├── examples ├── block-sdk-indexer │ ├── .gitignore │ ├── demo.sql │ ├── filter.json │ ├── go.mod │ ├── go.sum │ └── main.go ├── gov-voting-patterns │ ├── demo-indexed-txes.sql │ ├── demo.sql │ └── main.go ├── ibc-patterns │ └── main.go ├── terra-classic-indexer │ ├── .gitignore │ ├── go.mod │ ├── go.sum │ └── main.go └── validator-delegation-patterns │ └── main.go ├── filter ├── block_event_filter_registry.go ├── message_filter.go ├── message_type_filters.go └── static_block_event_filters.go ├── go.mod ├── go.sum ├── indexer ├── db.go ├── process.go ├── registration.go └── types.go ├── main.go ├── notes ├── DatabaseSetup.png ├── delete-db.sql ├── message_types.txt └── transaction_log.txt ├── parsers ├── block_events.go └── messages.go ├── probe └── probe.go ├── rpc ├── blocks.go └── requests.go ├── setup ├── linux-createdb.sh ├── local-node.sh ├── pull-images.sh └── sample_data_1.sh ├── tools └── dump-failed-block-heights │ ├── .env.template │ ├── .gitignore │ ├── main.py │ └── requirements.txt └── util └── utils.go /.dockerignore: -------------------------------------------------------------------------------- 1 | config.toml 2 | .env 3 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | PRETTY_LOG=true 2 | LOG_LEVEL="info" 3 | INDEX_TRANSACTIONS=true 4 | INDEX_BLOCK_EVENTS=false 5 | START_BLOCK=1 6 | END_BLOCK=-1 7 | THROTTLING=3.0 8 | RPC_WORKERS=1 9 | REINDEX=false 10 | 11 | # update these for sure 12 | RPC_URL="" 13 | ACCOUNT_PREFIX="" 14 | CHAIN_ID="" 15 | CHAIN_NAME="" 16 | POSTGRES_DB="" 17 | POSTGRES_USER="" 18 | POSTGRES_PASSWORD="" 19 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | custom: ["https://wallet.keplr.app/chains/osmosis?modal=validator&chain=osmosis-1&validator_address=osmovaloper1dnmz4yzv73lr3lmauuaa0wpwn8zm8s20lg3pg9", defiantlabs.net] 2 | -------------------------------------------------------------------------------- /.github/workflows/docker-release.yaml: -------------------------------------------------------------------------------- 1 | name: Docker multi-arch Package 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | logLevel: 7 | description: 'Log level' 8 | required: false 9 | default: 'warning' 10 | type: choice 11 | options: 12 | - info 13 | - warning 14 | - debug 15 | tags: 16 | description: 'Test scenario tags' 17 | required: false 18 | type: boolean 19 | environment: 20 | description: 'Environment to run tests against' 21 | type: environment 22 | required: false 23 | release: 24 | types: [published] 25 | 26 | env: 27 | REGISTRY: ghcr.io 28 | IMAGE_NAME: ${{ github.repository }} 29 | 30 | jobs: 31 | build-and-push-image: 32 | runs-on: ubuntu-latest 33 | permissions: 34 | contents: read 35 | packages: write 36 | id-token: write 37 | 38 | steps: 39 | - name: Checkout repository 40 | uses: actions/checkout@v3 41 | 42 | - name: Set up QEMU 43 | uses: docker/setup-qemu-action@v1 44 | 45 | - name: Set up Docker Buildx 46 | uses: docker/setup-buildx-action@v1 47 | 48 | - name: Log in to the Container registry 49 | uses: docker/login-action@v1 50 | with: 51 | registry: ${{ env.REGISTRY }} 52 | username: ${{ github.actor }} 53 | password: ${{ secrets.GITHUB_TOKEN }} 54 | 55 | - name: Extract metadata (tags, labels) for Docker 56 | id: meta 57 | uses: docker/metadata-action@v4 58 | with: 59 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 60 | labels: | 61 | maintainer=danb 62 | org.opencontainers.image.source=https://github.com/defiantlabs/cosmos-indexer 63 | org.opencontainers.image.title=cosmos-indexer 64 | org.opencontainers.image.description=Opensource Cosmos Tax Tool 65 | org.opencontainers.image.vendor=Defiant 66 | tags: | 67 | type=sha,enable=true,prefix=sha-,suffix=,format=short,priority=1 68 | type=semver,pattern={{raw}},priority=2 69 | type=edge,branch=main,priority=3 70 | type=ref,event=branch,priority=4 71 | type=ref,event=tag,priority=5 72 | type=ref,event=pr,priority=6 73 | 74 | - name: Build and push Docker image 75 | uses: docker/build-push-action@v3 76 | with: 77 | context: . 78 | platforms: linux/amd64,linux/arm64 79 | file: Dockerfile 80 | push: true 81 | tags: ${{ steps.meta.outputs.tags }} 82 | labels: ${{ steps.meta.outputs.labels }} 83 | cache-from: type=gha 84 | cache-to: type=gha,mode=max 85 | -------------------------------------------------------------------------------- /.github/workflows/docker.yaml: -------------------------------------------------------------------------------- 1 | name: Docker Package 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | logLevel: 7 | description: 'Log level' 8 | required: false 9 | default: 'warning' 10 | type: choice 11 | options: 12 | - info 13 | - warning 14 | - debug 15 | tags: 16 | description: 'Test scenario tags' 17 | required: false 18 | type: boolean 19 | environment: 20 | description: 'Environment to run tests against' 21 | type: environment 22 | required: false 23 | pull_request: 24 | types: 25 | - opened 26 | - synchronize 27 | - reopened 28 | - ready_for_review 29 | push: 30 | branches: 31 | - main 32 | 33 | env: 34 | REGISTRY: ghcr.io 35 | IMAGE_NAME: ${{ github.repository }} 36 | 37 | jobs: 38 | build-and-push-image: 39 | runs-on: ubuntu-latest 40 | permissions: 41 | contents: read 42 | packages: write 43 | id-token: write 44 | 45 | steps: 46 | - name: Checkout repository 47 | uses: actions/checkout@v3 48 | 49 | - name: Set up QEMU 50 | uses: docker/setup-qemu-action@v1 51 | 52 | - name: Set up Docker Buildx 53 | uses: docker/setup-buildx-action@v1 54 | 55 | - name: Log in to the Container registry 56 | uses: docker/login-action@v1 57 | with: 58 | registry: ${{ env.REGISTRY }} 59 | username: ${{ github.actor }} 60 | password: ${{ secrets.GITHUB_TOKEN }} 61 | 62 | - name: Extract metadata (tags, labels) for Docker 63 | id: meta 64 | uses: docker/metadata-action@v4 65 | with: 66 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 67 | labels: | 68 | maintainer=danb 69 | org.opencontainers.image.source=https://github.com/defiantlabs/cosmos-indexer 70 | org.opencontainers.image.title=cosmos-indexer 71 | org.opencontainers.image.description=Opensource Cosmos Tax Tool 72 | org.opencontainers.image.vendor=Defiant 73 | tags: | 74 | type=sha,enable=true,prefix=sha-,suffix=,format=short,priority=1 75 | type=semver,pattern={{raw}},priority=2 76 | type=edge,branch=main,priority=3 77 | type=ref,event=branch,priority=4 78 | type=ref,event=tag,priority=5 79 | type=ref,event=pr,priority=6 80 | 81 | - name: Build and push Docker image 82 | uses: docker/build-push-action@v3 83 | with: 84 | context: . 85 | platforms: linux/amd64 86 | file: Dockerfile 87 | push: true 88 | tags: ${{ steps.meta.outputs.tags }} 89 | labels: ${{ steps.meta.outputs.labels }} 90 | cache-from: type=gha 91 | cache-to: type=gha,mode=max 92 | -------------------------------------------------------------------------------- /.github/workflows/go-test.yaml: -------------------------------------------------------------------------------- 1 | name: go-test 2 | on: 3 | pull_request: 4 | types: 5 | - opened 6 | - synchronize 7 | - reopened 8 | - ready_for_review 9 | push: 10 | branches: 11 | - main 12 | 13 | env: 14 | GO_VERSION: '^1.22' 15 | 16 | jobs: 17 | go-test: 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/setup-go@v3 21 | with: 22 | go-version: ${{ env.GO_VERSION }} 23 | - uses: actions/checkout@v3 24 | - name: test code 25 | run: | 26 | go test -timeout 30m -v ./... 27 | -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yaml: -------------------------------------------------------------------------------- 1 | name: golangci-lint 2 | on: 3 | push: 4 | branches: 5 | - master 6 | - main 7 | pull_request: 8 | 9 | permissions: 10 | contents: read 11 | # Optional: allow read access to pull request. Use with `only-new-issues` option. 12 | # pull-requests: read 13 | 14 | jobs: 15 | golangci: 16 | name: lint 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v3 20 | - uses: actions/setup-go@v4 21 | with: 22 | go-version: '1.22' 23 | cache: false 24 | - name: golangci-lint 25 | uses: golangci/golangci-lint-action@v3 26 | with: 27 | # Require: The version of golangci-lint to use. 28 | # When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version. 29 | # When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit. 30 | version: v1.55.2 31 | 32 | # Optional: working directory, useful for monorepos 33 | # working-directory: somedir 34 | 35 | # Optional: golangci-lint command line arguments. 36 | # 37 | # Note: By default, the `.golangci.yml` file should be at the root of the repository. 38 | # The location of the configuration file can be changed by using `--config=` 39 | # args: --timeout=30m --config=/my/path/.golangci.yml --issues-exit-code=0 40 | 41 | # Optional: show only new issues if it's a pull request. The default value is `false`. 42 | # only-new-issues: true 43 | 44 | # Optional: if set to true, then all caching functionality will be completely disabled, 45 | # takes precedence over all other caching options. 46 | # skip-cache: true 47 | 48 | # Optional: if set to true, then the action won't cache or restore ~/go/pkg. 49 | # skip-pkg-cache: true 50 | 51 | # Optional: if set to true, then the action won't cache or restore ~/.cache/go-build. 52 | # skip-build-cache: true 53 | 54 | # Optional: The mode to install golangci-lint. It can be 'binary' or 'goinstall'. 55 | # install-mode: "goinstall" 56 | -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yaml: -------------------------------------------------------------------------------- 1 | name: pre-commit checks 2 | on: 3 | pull_request: 4 | types: 5 | - opened 6 | - synchronize 7 | - reopened 8 | - ready_for_review 9 | push: 10 | branches: 11 | - main 12 | env: 13 | GO_VERSION: '^1.22' 14 | 15 | jobs: 16 | pre-commit: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v3 20 | - uses: actions/setup-go@v3.3.1 21 | with: 22 | go-version: ${{ env.GO_VERSION }} 23 | - run: go version 24 | - run: go install golang.org/x/tools/cmd/goimports@latest 25 | - run: go install golang.org/x/lint/golint@latest 26 | - run: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s - -b $(go env GOPATH)/bin v1.51.2 27 | - run: echo "PATH=$PATH:/home/runner/go/bin" >> $GITHUB_ENV 28 | - uses: pre-commit/action@v3.0.0 29 | - uses: pre-commit-ci/lite-action@v1.0.1 30 | if: always() 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | config.toml 2 | cosmos-indexer 3 | env 4 | vendor/ 5 | **/.DS_Store 6 | .idea/* 7 | .vscode/* 8 | log.txt 9 | .env 10 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | tests: true 3 | # timeout for analysis, e.g. 30s, 5m, default is 1m 4 | timeout: 5m 5 | 6 | linters: 7 | disable-all: true 8 | enable: 9 | - dogsled 10 | - exportloopref 11 | - errcheck 12 | - goconst 13 | - gocritic 14 | - gofumpt 15 | - gosec 16 | - gosimple 17 | - govet 18 | - ineffassign 19 | - misspell 20 | - nakedret 21 | - staticcheck 22 | - stylecheck 23 | - revive 24 | - typecheck 25 | - unconvert 26 | - unused 27 | - misspell 28 | 29 | issues: 30 | exclude-rules: 31 | - text: "unused-parameter" 32 | linters: 33 | - revive 34 | - text: "Use of weak random number generator" 35 | linters: 36 | - gosec 37 | - text: "ST1003:" 38 | linters: 39 | - stylecheck 40 | # FIXME: Disabled until golangci-lint updates stylecheck with this fix: 41 | # https://github.com/dominikh/go-tools/issues/389 42 | - text: "ST1016:" 43 | linters: 44 | - stylecheck 45 | max-issues-per-linter: 10000 46 | max-same-issues: 10000 47 | 48 | linters-settings: 49 | dogsled: 50 | max-blank-identifiers: 3 51 | maligned: 52 | # print struct with more effective memory layout or not, false by default 53 | suggest-new: true 54 | nolintlint: 55 | allow-unused: false 56 | allow-leading-space: true 57 | require-explanation: false 58 | require-specific: false 59 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.3.0 4 | hooks: 5 | - id: check-yaml 6 | - id: end-of-file-fixer 7 | - id: trailing-whitespace 8 | - id: check-case-conflict 9 | - id: check-merge-conflict 10 | - id: check-added-large-files 11 | - id: detect-private-key 12 | - repo: https://github.com/dnephin/pre-commit-golang 13 | rev: master 14 | hooks: 15 | - id: go-fmt 16 | # - id: go-vet 17 | # - id: go-lint 18 | - id: go-imports 19 | # - id: go-cyclo 20 | # args: [-over=15] 21 | - id: validate-toml 22 | # - id: no-go-testing 23 | # - id: go-critic 24 | # - id: go-unit-tests 25 | - id: go-build 26 | - id: go-mod-tidy 27 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.22-alpine3.18 AS build-env 2 | 3 | # Customize to your build env 4 | 5 | # TARGETPLATFORM should be one of linux/amd64 or linux/arm64 6 | ARG TARGETPLATFORM 7 | 8 | # Use muslc for static libs 9 | ARG BUILD_TAGS=muslc 10 | ARG LD_FLAGS=-linkmode=external -extldflags '-Wl,-z,muldefs -static' 11 | 12 | # Install cli tools for building and final image 13 | RUN apk add --update --no-cache curl make git libc-dev bash gcc linux-headers eudev-dev ncurses-dev libc6-compat jq htop atop iotop 14 | 15 | # Install build dependencies. 16 | RUN if [ "${TARGETPLATFORM}" = "linux/amd64" ] ; then \ 17 | wget -P /lib https://github.com/CosmWasm/wasmvm/releases/download/v1.2.3/libwasmvm_muslc.x86_64.a ; \ 18 | cp /lib/libwasmvm_muslc.x86_64.a /lib/libwasmvm_muslc.a ; \ 19 | fi 20 | 21 | RUN if [ "${TARGETPLATFORM}" = "linux/arm64" ] ; then \ 22 | wget -P /lib https://github.com/CosmWasm/wasmvm/releases/download/v1.2.3/libwasmvm_muslc.aarch64.a ; \ 23 | cp /lib/libwasmvm_muslc.aarch64.a /lib/libwasmvm_muslc.a ; \ 24 | fi 25 | 26 | # Build main app. 27 | WORKDIR /go/src/app 28 | COPY . . 29 | RUN if [ "${TARGETPLATFORM}" = "linux/amd64" ] ; then \ 30 | GOOS=linux GOARCH=amd64 CGO_ENABLED=1 go install -ldflags ${LD_FLAGS} -tags ${BUILD_TAGS} ; \ 31 | fi 32 | 33 | RUN if [ "${TARGETPLATFORM}" = "linux/arm64" ] ; then \ 34 | GOOS=linux GOARCH=arm64 CGO_ENABLED=1 go install -ldflags ${LD_FLAGS} -tags ${BUILD_TAGS} ; \ 35 | fi 36 | 37 | # Use busybox to create a user 38 | FROM busybox:stable-musl AS busybox 39 | RUN addgroup --gid 1137 -S cosmos-indexer && adduser --uid 1137 -S cosmos-indexer -G cosmos-indexer 40 | 41 | # Use scratch for the final image 42 | FROM scratch 43 | WORKDIR /home/cosmos-indexer 44 | 45 | # Label should match your github repo 46 | LABEL org.opencontainers.image.source="https://github.com/defiantlabs/cosmos-indexer" 47 | 48 | # Installs all binaries built with go. 49 | COPY --from=build-env /go/bin /bin 50 | 51 | # Other binaries we want to keep. 52 | COPY --from=build-env /usr/bin/ldd /bin/ldd 53 | COPY --from=build-env /usr/bin/curl /bin/curl 54 | COPY --from=build-env /usr/bin/jq /bin/jq 55 | COPY --from=build-env /usr/bin/htop /bin/htop 56 | COPY --from=build-env /usr/bin/atop /bin/atop 57 | 58 | # Install Libraries 59 | # cosmos-indexer 60 | COPY --from=build-env /usr/lib/libgcc_s.so.1 /lib/ 61 | COPY --from=build-env /lib/ld-musl*.so.1* /lib 62 | 63 | # jq Libraries 64 | COPY --from=build-env /usr/lib/libonig.so.5 /lib 65 | 66 | # curl Libraries 67 | COPY --from=build-env /usr/lib/libcurl.so.4 /lib 68 | COPY --from=build-env /lib/libz.so.1 /lib 69 | COPY --from=build-env /usr/lib/libnghttp2.so.14 /lib 70 | COPY --from=build-env /lib/libssl.so.3 /lib 71 | COPY --from=build-env /lib/libcrypto.so.3 /lib 72 | COPY --from=build-env /usr/lib/libbrotlidec.so.1 /lib 73 | COPY --from=build-env /usr/lib/libbrotlicommon.so.1 /lib 74 | 75 | # htop/atop libs 76 | COPY --from=build-env /usr/lib/libncursesw.so.6 /lib 77 | 78 | # Install trusted CA certificates 79 | COPY --from=build-env /etc/ssl/cert.pem /etc/ssl/cert.pem 80 | 81 | # Install cli tools from busybox 82 | COPY --from=busybox /bin/ln /bin/ln 83 | COPY --from=busybox /bin/dd /bin/dd 84 | COPY --from=busybox /bin/vi /bin/vi 85 | COPY --from=busybox /bin/chown /bin/chown 86 | COPY --from=busybox /bin/id /bin/id 87 | COPY --from=busybox /bin/cp /bin/cp 88 | COPY --from=busybox /bin/ls /bin/ls 89 | COPY --from=busybox /bin/busybox /bin/sh 90 | COPY --from=busybox /bin/cat /bin/cat 91 | COPY --from=busybox /bin/less /bin/less 92 | COPY --from=busybox /bin/grep /bin/grep 93 | COPY --from=busybox /bin/sleep /bin/sleep 94 | COPY --from=busybox /bin/env /bin/env 95 | COPY --from=busybox /bin/tar /bin/tar 96 | COPY --from=busybox /bin/tee /bin/tee 97 | COPY --from=busybox /bin/du /bin/du 98 | COPY --from=busybox /bin/df /bin/df 99 | COPY --from=busybox /bin/nc /bin/nc 100 | COPY --from=busybox /bin/netstat /bin/netstat 101 | 102 | # Copy user from busybox to scratch 103 | COPY --from=busybox /etc/passwd /etc/passwd 104 | COPY --from=busybox --chown=1137:1137 /home/cosmos-indexer /home/cosmos-indexer 105 | 106 | # Set home directory and user 107 | WORKDIR /home/cosmos-indexer 108 | RUN chown -R cosmos-indexer /home/cosmos-indexer 109 | USER cosmos-indexer 110 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make -f 2 | 3 | BRANCH := $(shell git rev-parse --abbrev-ref HEAD) 4 | COMMIT := $(shell git log -1 --format='%H') 5 | 6 | # don't override user values 7 | ifeq (,$(VERSION)) 8 | VERSION := $(shell git describe --tags) 9 | # if VERSION is empty, then populate it with branch's name and raw commit hash 10 | ifeq (,$(VERSION)) 11 | VERSION := $(BRANCH)-$(COMMIT) 12 | endif 13 | endif 14 | 15 | # default value, overide with: make -e FQCN="foo" 16 | FQCN = ghcr.io/defiantlabs/cosmos-indexer 17 | 18 | all: install 19 | 20 | install: go.sum 21 | go install . 22 | 23 | build: 24 | go build -o bin/cosmos-indexer . 25 | 26 | clean: 27 | rm -rf build 28 | 29 | build-docker-amd: 30 | docker build -t $(FQCN):$(VERSION) -f ./Dockerfile \ 31 | --build-arg TARGETPLATFORM=linux/amd64 . 32 | 33 | build-docker-arm: 34 | docker build -t $(FQCN):$(VERSION) -f ./Dockerfile \ 35 | --build-arg TARGETPLATFORM=linux/arm64 . 36 | 37 | .PHONY: lint 38 | lint: ## Run golangci-linter 39 | golangci-lint run --out-format=tab 40 | 41 | .PHONY: format 42 | format: ## Formats the code with gofumpt 43 | find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/*" | xargs gofumpt -w 44 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Defiant Labs cosmos-indexer 2 | Copyright [2021-2022] Defiant Labs, LLC 3 | 4 | This product includes software developed at 5 | Defiant Labs, LLC (https://defiantlabs.net/). 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Cosmos Indexer 2 | 3 | The Cosmos Indexer is an open-source application designed to index a Cosmos chain to a generalized Transaction/Event DB schema. Its mission is to offer a flexible DB schema compatible with all Cosmos SDK Chains while simplifying the indexing process to allow developers flexible ways to store custom indexed data. 4 | 5 | ## Quick Start 6 | 7 | You can use our `docker-compose` file for a quick demo of how to run the indexer, DB, web client, and UI. 8 | 9 | ```shell 10 | docker compose up 11 | ``` 12 | Keep an eye on the output for the index and access the web server through the provided link. 13 | 14 | ## Getting Started 15 | 16 | It's indexing time! Follow the steps below to get started. 17 | 18 | ### Prerequisites 19 | 20 | Before you can start indexing a chain, you need to set up the application's dependencies: 21 | 22 | #### PostgreSQL 23 | The application requires a PostgreSQL server with an established database and an owner user/role with password login. Here's a simple example of setting up a containerized database locally [here](https://towardsdatascience.com/local-development-set-up-of-postgresql-with-docker-c022632f13ea). 24 | 25 | #### Go 26 | The application is written in Go, so you need to build it from source. This requires a system installation of at minimum Go 1.22. Instructions for installing and configuring Go can be found [here](https://go.dev/doc/install). 27 | 28 | ## Indexing and Querying 29 | 30 | You are now ready to index and query the chain. For detailed steps, check out the [Indexing](#indexing) and [Querying](#querying) sections below. 31 | 32 | ## CLI Syntax 33 | 34 | The Cosmos Indexer tool provides several settings and commands which are accessible via a config file or through CLI flags. You can learn about the CLI flags and their function by running `go run main.go` to display the application help text. 35 | 36 | For more detailed information on the settings, refer to the [Config](#config) section. 37 | 38 | ### Config 39 | 40 | The config file, used to set up the Cosmos Tax CLI tool, is broken into four main 41 | 42 | sections: 43 | 44 | 1. [Log](#log) 45 | 2. [Database](#database) 46 | 3. [Base](#base) 47 | 4. [Probe](#probe) 48 | 49 | #### Log 50 | 51 | The Log section covers settings related to logging levels and formats, including log file paths and whether to use [ZeroLog's](https://github.com/rs/zerolog) pretty logging. 52 | 53 | #### Database 54 | 55 | The Database section defines the settings needed to connect to the database server and to configure the logging level of the ORM. 56 | 57 | #### Base 58 | 59 | The Base section contains the core settings for the tool, such as API endpoints, block ranges, indexing behavior, and more. 60 | 61 | #### Probe 62 | 63 | The probe section configures [probe](https://github.com/DefiantLabs/probe) used by the tool to read data from the blockchain. This is built into the application and doesn't need to be installed separately. 64 | 65 | For detailed descriptions of each setting in these sections, please refer to the [Detailed Config Explanation](#detailed-config-explanation) section below. 66 | 67 | ## Detailed Config Explanation 68 | 69 | This section provides an in-depth description of each setting available in the config file. For further details, refer to the inline documentation within the config file. 70 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "strings" 8 | "time" 9 | 10 | "github.com/DefiantLabs/cosmos-indexer/config" 11 | "github.com/DefiantLabs/cosmos-indexer/db" 12 | "github.com/spf13/cobra" 13 | "github.com/spf13/pflag" 14 | "github.com/spf13/viper" 15 | "gorm.io/gorm" 16 | ) 17 | 18 | var ( 19 | cfgFile string // config file location to load 20 | rootCmd = &cobra.Command{ 21 | Use: "cosmos-indexer", 22 | Short: "A CLI tool for indexing and querying on-chain data", 23 | Long: `Cosmos Tax CLI is a CLI tool for indexing and querying Cosmos-based blockchains, 24 | with a heavy focus on taxable events.`, 25 | } 26 | viperConf = viper.New() 27 | ) 28 | 29 | func GetRootCmd() *cobra.Command { 30 | return rootCmd 31 | } 32 | 33 | // Execute executes the root command. 34 | func Execute() error { 35 | return rootCmd.Execute() 36 | } 37 | 38 | func init() { 39 | cobra.OnInitialize(getViperConfig) 40 | rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location (default is /config.toml)") 41 | } 42 | 43 | func getViperConfig() { 44 | v := viper.New() 45 | 46 | if cfgFile != "" { 47 | v.SetConfigFile(cfgFile) 48 | v.SetConfigType("toml") 49 | } else { 50 | // Check in current working dir 51 | pwd, err := os.Getwd() 52 | if err != nil { 53 | log.Fatalf("Could not determine current working dir. Err: %v", err) 54 | } 55 | if _, err := os.Stat(fmt.Sprintf("%v/config.toml", pwd)); err == nil { 56 | cfgFile = pwd 57 | } else { 58 | // file not in current working dir. Check home dir instead 59 | // Find home directory. 60 | home, err := os.UserHomeDir() 61 | if err != nil { 62 | log.Fatalf("Failed to find user home dir. Err: %v", err) 63 | } 64 | cfgFile = fmt.Sprintf("%s/.cosmos-indexer", home) 65 | } 66 | v.AddConfigPath(cfgFile) 67 | v.SetConfigType("toml") 68 | v.SetConfigName("config") 69 | } 70 | 71 | // Load defaults into a file at $HOME? 72 | var noConfig bool 73 | err := v.ReadInConfig() 74 | if err != nil { 75 | switch { 76 | case strings.Contains(err.Error(), "Config File \"config\" Not Found"): 77 | noConfig = true 78 | case strings.Contains(err.Error(), "incomplete number"): 79 | log.Fatalf("Failed to read config file %v. This usually means you forgot to wrap a string in quotes.", err) 80 | default: 81 | log.Fatalf("Failed to read config file. Err: %v", err) 82 | } 83 | } 84 | 85 | if !noConfig { 86 | log.Println("CFG successfully read from: ", cfgFile) 87 | } 88 | 89 | viperConf = v 90 | } 91 | 92 | func GetViperConfig() *viper.Viper { 93 | return viperConf 94 | } 95 | 96 | // Set config vars from cpnfig file not already specified on command line. 97 | func BindFlags(cmd *cobra.Command, v *viper.Viper) { 98 | cmd.Flags().VisitAll(func(f *pflag.Flag) { 99 | configName := f.Name 100 | 101 | // Apply the viper config value to the flag when the flag is not set and viper has a value 102 | if !f.Changed && v.IsSet(configName) { 103 | val := v.Get(configName) 104 | err := cmd.Flags().Set(f.Name, fmt.Sprintf("%v", val)) 105 | if err != nil { 106 | log.Fatalf("Failed to bind config file value %v. Err: %v", configName, err) 107 | } 108 | } 109 | }) 110 | } 111 | 112 | func setupLogger(logLevel string, logPath string, prettyLogging bool) { 113 | config.DoConfigureLogger(logPath, logLevel, prettyLogging) 114 | } 115 | 116 | func ConnectToDBAndMigrate(dbConfig config.Database) (*gorm.DB, error) { 117 | database, err := db.PostgresDbConnect(dbConfig.Host, dbConfig.Port, dbConfig.Database, dbConfig.User, dbConfig.Password, strings.ToLower(dbConfig.LogLevel)) 118 | if err != nil { 119 | config.Log.Fatal("Could not establish connection to the database", err) 120 | } 121 | 122 | sqldb, _ := database.DB() 123 | sqldb.SetMaxIdleConns(10) 124 | sqldb.SetMaxOpenConns(100) 125 | sqldb.SetConnMaxLifetime(time.Hour) 126 | 127 | err = db.MigrateModels(database) 128 | if err != nil { 129 | config.Log.Error("Error running DB migrations", err) 130 | } 131 | 132 | return database, err 133 | } 134 | -------------------------------------------------------------------------------- /config.toml.example: -------------------------------------------------------------------------------- 1 | [log] 2 | level = "debug" 3 | path = "./log.txt" 4 | pretty = true 5 | 6 | #App configuration values 7 | [base] 8 | start-block = 1 # start indexing at beginning of the blockchain, -1 to resume from highest block indexed 9 | end-block = -1 # stop indexing at this block, -1 to never stop indexing 10 | throttling = 6.00 11 | block-timer = 10000 #print out how long it takes to process this many blocks 12 | wait-for-chain = false #if true, indexer will start when the node is caught up to the blockchain 13 | wait-for-chain-delay = 10 #seconds to wait between each check for node to catch up to the chain 14 | index-transactions = true #If false, we won't attempt to index the chain 15 | exit-when-caught-up = true #mainly used for Osmosis rewards indexing 16 | index-block-events = false #index block events for the particular chain 17 | dry = false # if true, indexing will occur but data will not be written to the database. 18 | rpc-workers = 1 19 | reindex = true 20 | reattempt-failed-blocks = false 21 | 22 | # Provides a filter configuration to skip block events or message types based on patterns 23 | # filter-file="filter-config.json" 24 | 25 | #Lens config options 26 | [probe] 27 | rpc = "http://public.rpc.updateme:443" 28 | account-prefix = "cosmos" 29 | chain-id = "cosmoshub-4" 30 | chain-name = "CosmosHub" 31 | 32 | # Flags for extending or modifying the indexed dataset 33 | [flags] 34 | index-tx-message-raw=false 35 | 36 | [database] 37 | host = "localhost" 38 | port = "5432" 39 | database = "" 40 | user = "" 41 | password = "" 42 | log-level = "" 43 | -------------------------------------------------------------------------------- /config/common_configs.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "reflect" 7 | "strings" 8 | 9 | "github.com/DefiantLabs/cosmos-indexer/util" 10 | "github.com/spf13/cobra" 11 | ) 12 | 13 | // These configs are used across multiple commands, and are not specific to a single command 14 | type log struct { 15 | Level string 16 | Path string 17 | Pretty bool 18 | } 19 | 20 | type Database struct { 21 | Host string 22 | Port string 23 | Database string 24 | User string 25 | Password string 26 | LogLevel string `mapstructure:"log-level"` 27 | } 28 | 29 | type Probe struct { 30 | RPC string 31 | AccountPrefix string `mapstructure:"account-prefix"` 32 | ChainID string `mapstructure:"chain-id"` 33 | ChainName string `mapstructure:"chain-name"` 34 | } 35 | 36 | type throttlingBase struct { 37 | Throttling float64 `mapstructure:"throttling"` 38 | } 39 | 40 | type retryBase struct { 41 | RequestRetryAttempts int64 `mapstructure:"request-retry-attempts"` 42 | RequestRetryMaxWait uint64 `mapstructure:"request-retry-max-wait"` 43 | } 44 | 45 | func SetupLogFlags(logConf *log, cmd *cobra.Command) { 46 | cmd.PersistentFlags().StringVar(&logConf.Level, "log.level", "info", "log level") 47 | cmd.PersistentFlags().BoolVar(&logConf.Pretty, "log.pretty", false, "pretty logs") 48 | cmd.PersistentFlags().StringVar(&logConf.Path, "log.path", "", "log path (default is $HOME/.cosmos-indexer/logs.txt") 49 | } 50 | 51 | func SetupDatabaseFlags(databaseConf *Database, cmd *cobra.Command) { 52 | cmd.PersistentFlags().StringVar(&databaseConf.Host, "database.host", "", "database host") 53 | cmd.PersistentFlags().StringVar(&databaseConf.Port, "database.port", "5432", "database port") 54 | cmd.PersistentFlags().StringVar(&databaseConf.Database, "database.database", "", "database name") 55 | cmd.PersistentFlags().StringVar(&databaseConf.User, "database.user", "", "database user") 56 | cmd.PersistentFlags().StringVar(&databaseConf.Password, "database.password", "", "database password") 57 | cmd.PersistentFlags().StringVar(&databaseConf.LogLevel, "database.log-level", "", "database loglevel") 58 | } 59 | 60 | func SetupProbeFlags(probeConf *Probe, cmd *cobra.Command) { 61 | cmd.PersistentFlags().StringVar(&probeConf.RPC, "probe.rpc", "", "node rpc endpoint") 62 | cmd.PersistentFlags().StringVar(&probeConf.AccountPrefix, "probe.account-prefix", "", "probe account prefix") 63 | cmd.PersistentFlags().StringVar(&probeConf.ChainID, "probe.chain-id", "", "probe chain ID") 64 | cmd.PersistentFlags().StringVar(&probeConf.ChainName, "probe.chain-name", "", "probe chain name") 65 | } 66 | 67 | func SetupThrottlingFlag(throttlingValue *float64, cmd *cobra.Command) { 68 | cmd.PersistentFlags().Float64Var(throttlingValue, "base.throttling", 0.5, "block enqueue throttle delay") 69 | } 70 | 71 | func validateDatabaseConf(dbConf Database) error { 72 | if util.StrNotSet(dbConf.Host) { 73 | return errors.New("database host must be set") 74 | } 75 | if util.StrNotSet(dbConf.Port) { 76 | return errors.New("database port must be set") 77 | } 78 | if util.StrNotSet(dbConf.Database) { 79 | return errors.New("database name (i.e. database) must be set") 80 | } 81 | if util.StrNotSet(dbConf.User) { 82 | return errors.New("database user must be set") 83 | } 84 | if util.StrNotSet(dbConf.Password) { 85 | return errors.New("database password must be set") 86 | } 87 | 88 | return nil 89 | } 90 | 91 | func validateProbeConf(probeConf Probe) (Probe, error) { 92 | if util.StrNotSet(probeConf.RPC) { 93 | return probeConf, errors.New("probe rpc must be set") 94 | } 95 | // add port if not set 96 | if strings.Count(probeConf.RPC, ":") != 2 { 97 | if strings.HasPrefix(probeConf.RPC, "https:") { 98 | probeConf.RPC = fmt.Sprintf("%s:443", probeConf.RPC) 99 | } else if strings.HasPrefix(probeConf.RPC, "http:") { 100 | probeConf.RPC = fmt.Sprintf("%s:80", probeConf.RPC) 101 | } 102 | } 103 | 104 | if util.StrNotSet(probeConf.AccountPrefix) { 105 | return probeConf, errors.New("probe account-prefix must be set") 106 | } 107 | if util.StrNotSet(probeConf.ChainID) { 108 | return probeConf, errors.New("probe chain-id must be set") 109 | } 110 | if util.StrNotSet(probeConf.ChainName) { 111 | return probeConf, errors.New("probe chain-name must be set") 112 | } 113 | return probeConf, nil 114 | } 115 | 116 | func validateThrottlingConf(throttlingConf throttlingBase) error { 117 | if throttlingConf.Throttling < 0 { 118 | return errors.New("throttling must be a positive number or 0") 119 | } 120 | return nil 121 | } 122 | 123 | // Reads the Viper mapstructure tag to get the valid keys for a given config struct 124 | func getValidConfigKeys(section any, baseName string) (keys []string) { 125 | v := reflect.ValueOf(section) 126 | typeOfS := v.Type() 127 | 128 | if baseName == "" { 129 | baseName = strings.ToLower(typeOfS.Name()) 130 | } 131 | 132 | for i := 0; i < v.NumField(); i++ { 133 | field := typeOfS.Field(i) 134 | 135 | // Hack to get around the fact that we have embedded struct inside a struct in some of our definitions 136 | if !strings.HasPrefix(field.Type.String(), "config.") { 137 | name := field.Tag.Get("mapstructure") 138 | if name == "" { 139 | name = field.Name 140 | } 141 | 142 | key := fmt.Sprintf("%v.%v", baseName, strings.ReplaceAll(strings.ToLower(name), " ", "")) 143 | keys = append(keys, key) 144 | } 145 | } 146 | return 147 | } 148 | 149 | func addDatabaseConfigKeys(validKeys map[string]struct{}) { 150 | for _, key := range getValidConfigKeys(Database{}, "") { 151 | validKeys[key] = struct{}{} 152 | } 153 | } 154 | 155 | func addLogConfigKeys(validKeys map[string]struct{}) { 156 | for _, key := range getValidConfigKeys(log{}, "") { 157 | validKeys[key] = struct{}{} 158 | } 159 | } 160 | 161 | func addProbeConfigKeys(validKeys map[string]struct{}) { 162 | for _, key := range getValidConfigKeys(Probe{}, "") { 163 | validKeys[key] = struct{}{} 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /config/common_configs_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/suite" 7 | ) 8 | 9 | type ConfigTestSuite struct { 10 | suite.Suite 11 | } 12 | 13 | func (suite *ConfigTestSuite) TestValidateDatabaseConf() { 14 | conf := Database{ 15 | Host: "", 16 | Port: "", 17 | Database: "", 18 | User: "", 19 | Password: "", 20 | } 21 | 22 | err := validateDatabaseConf(conf) 23 | suite.Require().Error(err) 24 | conf.Host = "fake-host" 25 | 26 | err = validateDatabaseConf(conf) 27 | suite.Require().Error(err) 28 | 29 | conf.Port = "5432" 30 | err = validateDatabaseConf(conf) 31 | suite.Require().Error(err) 32 | 33 | conf.Database = "fake-database" 34 | err = validateDatabaseConf(conf) 35 | suite.Require().Error(err) 36 | 37 | conf.User = "fake-user" 38 | err = validateDatabaseConf(conf) 39 | suite.Require().Error(err) 40 | 41 | conf.Password = "fake-password" 42 | err = validateDatabaseConf(conf) 43 | suite.Require().NoError(err) 44 | } 45 | 46 | func (suite *ConfigTestSuite) TestValidateProbeConf() { 47 | conf := Probe{ 48 | RPC: "", 49 | AccountPrefix: "", 50 | ChainID: "", 51 | ChainName: "", 52 | } 53 | 54 | _, err := validateProbeConf(conf) 55 | suite.Require().Error(err) 56 | 57 | conf.RPC = "fake-rpc" 58 | _, err = validateProbeConf(conf) 59 | suite.Require().Error(err) 60 | 61 | conf.AccountPrefix = "fake-account-prefix" 62 | _, err = validateProbeConf(conf) 63 | suite.Require().Error(err) 64 | 65 | conf.ChainID = "fake-chain-id" 66 | _, err = validateProbeConf(conf) 67 | suite.Require().Error(err) 68 | 69 | conf.ChainName = "fake-chain-name" 70 | _, err = validateProbeConf(conf) 71 | suite.Require().NoError(err) 72 | } 73 | 74 | func (suite *ConfigTestSuite) TestValidateThrottlingConf() { 75 | conf := throttlingBase{ 76 | Throttling: -1, 77 | } 78 | 79 | err := validateThrottlingConf(conf) 80 | suite.Require().Error(err) 81 | 82 | conf.Throttling = 0.5 83 | err = validateThrottlingConf(conf) 84 | suite.Require().NoError(err) 85 | } 86 | 87 | func TestConfigSuite(t *testing.T) { 88 | suite.Run(t, new(ConfigTestSuite)) 89 | } 90 | -------------------------------------------------------------------------------- /config/cosmos_config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | sdk "github.com/cosmos/cosmos-sdk/types" 5 | ) 6 | 7 | func setPrefixes(accountAddressPrefix string) { 8 | // Set prefixes 9 | accountPubKeyPrefix := accountAddressPrefix + "pub" 10 | validatorAddressPrefix := accountAddressPrefix + "valoper" 11 | validatorPubKeyPrefix := accountAddressPrefix + "valoperpub" 12 | consNodeAddressPrefix := accountAddressPrefix + "valcons" 13 | consNodePubKeyPrefix := accountAddressPrefix + "valconspub" 14 | 15 | // Set and seal config 16 | config := sdk.GetConfig() 17 | config.SetBech32PrefixForAccount(accountAddressPrefix, accountPubKeyPrefix) 18 | config.SetBech32PrefixForValidator(validatorAddressPrefix, validatorPubKeyPrefix) 19 | config.SetBech32PrefixForConsensusNode(consNodeAddressPrefix, consNodePubKeyPrefix) 20 | config.Seal() 21 | } 22 | 23 | // SetChainConfig Set the chain prefix e.g. juno (prefix for account addresses). 24 | func SetChainConfig(prefix string) { 25 | setPrefixes(prefix) 26 | } 27 | -------------------------------------------------------------------------------- /config/filter_config_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | 7 | "github.com/DefiantLabs/cosmos-indexer/db/models" 8 | "github.com/DefiantLabs/cosmos-indexer/filter" 9 | "github.com/stretchr/testify/suite" 10 | ) 11 | 12 | type FilterConfigTestSuite struct { 13 | suite.Suite 14 | } 15 | 16 | //nolint:dogsled 17 | func (suite *FilterConfigTestSuite) TestParseJSONFilterConfig() { 18 | conf := blockFilterConfigs{} 19 | 20 | beginFilterEventTypeInvalid, err := getMockEventTypeBytes(true) 21 | 22 | suite.Require().NoError(err) 23 | 24 | conf.BeginBlockFilters = []json.RawMessage{beginFilterEventTypeInvalid} 25 | 26 | confBytes, err := json.Marshal(conf) 27 | suite.Require().NoError(err) 28 | 29 | _, _, _, _, _, err = ParseJSONFilterConfig(confBytes) 30 | 31 | suite.Require().Error(err) 32 | 33 | beginFilterEventTypeValid, err := getMockEventTypeBytes(false) 34 | suite.Require().NoError(err) 35 | 36 | conf.BeginBlockFilters = []json.RawMessage{beginFilterEventTypeValid} 37 | 38 | confBytes, err = json.Marshal(conf) 39 | suite.Require().NoError(err) 40 | 41 | beginBlockFilters, _, _, _, _, err := ParseJSONFilterConfig(confBytes) 42 | 43 | suite.Require().NoError(err) 44 | suite.Require().Len(beginBlockFilters, 1) 45 | suite.Require().True(beginBlockFilters[0].EventMatches(filter.EventData{Event: models.BlockEvent{BlockEventType: models.BlockEventType{Type: "coin_received"}}})) 46 | suite.Require().False(beginBlockFilters[0].EventMatches(filter.EventData{Event: models.BlockEvent{BlockEventType: models.BlockEventType{Type: "dne"}}})) 47 | 48 | conf.BeginBlockFilters = []json.RawMessage{} 49 | 50 | messageTypeFilterInvalid, err := getMockMessageTypeBytes(true) 51 | suite.Require().NoError(err) 52 | 53 | conf.MessageTypeFilters = []json.RawMessage{messageTypeFilterInvalid} 54 | 55 | confBytes, err = json.Marshal(conf) 56 | suite.Require().NoError(err) 57 | 58 | _, _, _, _, _, err = ParseJSONFilterConfig(confBytes) 59 | suite.Require().Error(err) 60 | 61 | messageTypeFilterValid, err := getMockMessageTypeBytes(false) 62 | suite.Require().NoError(err) 63 | 64 | conf.MessageTypeFilters = []json.RawMessage{messageTypeFilterValid} 65 | 66 | confBytes, err = json.Marshal(conf) 67 | suite.Require().NoError(err) 68 | 69 | _, _, _, _, messageTypeFilters, err := ParseJSONFilterConfig(confBytes) 70 | 71 | suite.Require().NoError(err) 72 | suite.Require().Len(messageTypeFilters, 1) 73 | suite.Require().True(messageTypeFilters[0].MessageTypeMatches(filter.MessageTypeData{MessageType: "/cosmos.bank.v1beta1.MsgSend"})) 74 | suite.Require().False(messageTypeFilters[0].MessageTypeMatches(filter.MessageTypeData{MessageType: "dne"})) 75 | } 76 | 77 | func getMockEventTypeBytes(skipEventTypeKey bool) (json.RawMessage, error) { 78 | mockEventType := make(map[string]any) 79 | 80 | mockEventType["type"] = "event_type" 81 | if !skipEventTypeKey { 82 | mockEventType["event_type"] = "coin_received" 83 | } 84 | 85 | return json.Marshal(mockEventType) 86 | } 87 | 88 | func getMockMessageTypeBytes(skipMessageTypeKey bool) (json.RawMessage, error) { 89 | mockMessageType := make(map[string]any) 90 | 91 | mockMessageType["type"] = "message_type" 92 | if !skipMessageTypeKey { 93 | mockMessageType["message_type"] = "/cosmos.bank.v1beta1.MsgSend" 94 | } 95 | 96 | return json.Marshal(mockMessageType) 97 | } 98 | 99 | func TestFilterConfigTestSuite(t *testing.T) { 100 | suite.Run(t, new(FilterConfigTestSuite)) 101 | } 102 | -------------------------------------------------------------------------------- /config/index_config_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/suite" 7 | ) 8 | 9 | type IndexConfigTestSuite struct { 10 | suite.Suite 11 | } 12 | 13 | func (suite *IndexConfigTestSuite) TestIndexConfig() { 14 | conf := IndexConfig{ 15 | // Setup valid configs for everything but base, these are tested elsewhere 16 | Database: Database{ 17 | Host: "fake-host", 18 | Port: "5432", 19 | Database: "fake-database", 20 | User: "fake-user", 21 | Password: "fake-password", 22 | LogLevel: "info", 23 | }, 24 | Log: log{ 25 | Level: "info", 26 | Path: "", 27 | Pretty: false, 28 | }, 29 | Probe: Probe{ 30 | RPC: "fake-rpc", 31 | AccountPrefix: "cosmos", 32 | ChainID: "fake-chain-id", 33 | ChainName: "fake-chain-name", 34 | }, 35 | Flags: flags{ 36 | IndexTxMessageRaw: false, 37 | }, 38 | } 39 | 40 | err := conf.Validate() 41 | suite.Require().Error(err) 42 | 43 | conf.Base.TransactionIndexingEnabled = true 44 | 45 | err = conf.Validate() 46 | suite.Require().Error(err) 47 | 48 | conf.Base.StartBlock = 1 49 | err = conf.Validate() 50 | suite.Require().Error(err) 51 | 52 | conf.Base.EndBlock = 2 53 | err = conf.Validate() 54 | suite.Require().NoError(err) 55 | } 56 | 57 | func (suite *IndexConfigTestSuite) TestCheckSuperfluousIndexKeys() { 58 | keys := []string{ 59 | "fake-key", 60 | } 61 | validKeys := CheckSuperfluousIndexKeys(keys) 62 | suite.Require().Len(validKeys, 1) 63 | 64 | keys = append(keys, "base.start-block") 65 | 66 | validKeys = CheckSuperfluousIndexKeys(keys) 67 | suite.Require().Len(validKeys, 1) 68 | } 69 | 70 | func TestIndexConfig(t *testing.T) { 71 | suite.Run(t, new(IndexConfigTestSuite)) 72 | } 73 | -------------------------------------------------------------------------------- /config/logger.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "strings" 8 | 9 | "github.com/rs/zerolog" 10 | zlog "github.com/rs/zerolog/log" 11 | ) 12 | 13 | type Logger struct{} 14 | 15 | // Log is exposed on the config as a drop-in replacement for our old logger 16 | var Log *Logger 17 | 18 | // These functions are provided to reduce refactoring. 19 | func (l *Logger) Debug(msg string, err ...error) { 20 | if len(err) == 1 { 21 | zlog.Debug().Err(err[0]).Msg(msg) 22 | return 23 | } 24 | zlog.Debug().Msg(msg) 25 | } 26 | 27 | func (l *Logger) Debugf(msg string, args ...interface{}) { 28 | zlog.Debug().Msg(fmt.Sprintf(msg, args...)) 29 | } 30 | 31 | func (l *Logger) Info(msg string, err ...error) { 32 | if len(err) == 1 { 33 | zlog.Info().Err(err[0]).Msg(msg) 34 | return 35 | } 36 | zlog.Info().Msg(msg) 37 | } 38 | 39 | func (l *Logger) Infof(msg string, args ...interface{}) { 40 | zlog.Info().Msg(fmt.Sprintf(msg, args...)) 41 | } 42 | 43 | func (l *Logger) Warn(msg string, err ...error) { 44 | if len(err) == 1 { 45 | zlog.Warn().Err(err[0]).Msg(msg) 46 | return 47 | } 48 | zlog.Warn().Msg(msg) 49 | } 50 | 51 | func (l *Logger) Warnf(msg string, args ...interface{}) { 52 | zlog.Warn().Msg(fmt.Sprintf(msg, args...)) 53 | } 54 | 55 | func (l *Logger) Error(msg string, err ...error) { 56 | if len(err) == 1 { 57 | zlog.Error().Err(err[0]).Msg(msg) 58 | return 59 | } 60 | zlog.Error().Msg(msg) 61 | } 62 | 63 | func (l *Logger) Errorf(msg string, args ...interface{}) { 64 | zlog.Error().Msg(fmt.Sprintf(msg, args...)) 65 | } 66 | 67 | func (l *Logger) Fatal(msg string, err ...error) { 68 | if len(err) == 1 { 69 | zlog.Fatal().Err(err[0]).Msg(msg) 70 | return 71 | } 72 | zlog.Fatal().Msg(msg) 73 | } 74 | 75 | func (l *Logger) Fatalf(msg string, args ...interface{}) { 76 | zlog.Fatal().Msg(fmt.Sprintf(msg, args...)) 77 | } 78 | 79 | func DoConfigureLogger(logPath string, logLevel string, prettyLogging bool) { 80 | writers := io.MultiWriter(os.Stdout) 81 | if len(logPath) > 0 { 82 | if _, err := os.Stat(logPath); os.IsNotExist(err) { 83 | file, err := os.Create(logPath) 84 | if err != nil { 85 | panic(err) 86 | } 87 | writers = io.MultiWriter(os.Stdout, file) 88 | } else { 89 | file, err := os.OpenFile(logPath, os.O_APPEND|os.O_WRONLY, os.ModeAppend) 90 | if err != nil { 91 | panic(err) 92 | } 93 | writers = io.MultiWriter(os.Stdout, file) 94 | } 95 | } 96 | if prettyLogging { 97 | zlog.Logger = zlog.Output(zerolog.ConsoleWriter{Out: writers}) 98 | } else { 99 | zlog.Logger = zlog.Output(writers) 100 | } 101 | 102 | // Set the log level (default to info) 103 | switch strings.ToLower(logLevel) { 104 | case "debug": 105 | zerolog.SetGlobalLevel(zerolog.DebugLevel) 106 | case "info": 107 | zerolog.SetGlobalLevel(zerolog.InfoLevel) 108 | case "warn": 109 | zerolog.SetGlobalLevel(zerolog.WarnLevel) 110 | case "error": 111 | zerolog.SetGlobalLevel(zerolog.ErrorLevel) 112 | case "fatal": 113 | zerolog.SetGlobalLevel(zerolog.FatalLevel) 114 | case "panic": 115 | zerolog.SetGlobalLevel(zerolog.PanicLevel) 116 | default: 117 | zerolog.SetGlobalLevel(zerolog.InfoLevel) 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /core/block_events.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "encoding/base64" 5 | 6 | abci "github.com/cometbft/cometbft/abci/types" 7 | 8 | "github.com/DefiantLabs/cosmos-indexer/config" 9 | "github.com/DefiantLabs/cosmos-indexer/db" 10 | "github.com/DefiantLabs/cosmos-indexer/db/models" 11 | "github.com/DefiantLabs/cosmos-indexer/filter" 12 | "github.com/DefiantLabs/cosmos-indexer/parsers" 13 | "github.com/DefiantLabs/cosmos-indexer/rpc" 14 | ) 15 | 16 | func ProcessRPCBlockResults(conf config.IndexConfig, block models.Block, blockResults *rpc.CustomBlockResults, customBeginBlockParsers map[string][]parsers.BlockEventParser, customEndBlockParsers map[string][]parsers.BlockEventParser) (*db.BlockDBWrapper, error) { 17 | var blockDBWrapper db.BlockDBWrapper 18 | 19 | blockDBWrapper.Block = &block 20 | 21 | blockDBWrapper.UniqueBlockEventAttributeKeys = make(map[string]models.BlockEventAttributeKey) 22 | blockDBWrapper.UniqueBlockEventTypes = make(map[string]models.BlockEventType) 23 | 24 | var err error 25 | blockDBWrapper.BeginBlockEvents, err = ProcessRPCBlockEvents(blockDBWrapper.Block, blockResults.BeginBlockEvents, models.BeginBlockEvent, blockDBWrapper.UniqueBlockEventTypes, blockDBWrapper.UniqueBlockEventAttributeKeys, customBeginBlockParsers, conf) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | blockDBWrapper.EndBlockEvents, err = ProcessRPCBlockEvents(blockDBWrapper.Block, blockResults.EndBlockEvents, models.EndBlockEvent, blockDBWrapper.UniqueBlockEventTypes, blockDBWrapper.UniqueBlockEventAttributeKeys, customEndBlockParsers, conf) 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | return &blockDBWrapper, nil 36 | } 37 | 38 | func ProcessRPCBlockEvents(block *models.Block, blockEvents []abci.Event, blockLifecyclePosition models.BlockLifecyclePosition, uniqueEventTypes map[string]models.BlockEventType, uniqueAttributeKeys map[string]models.BlockEventAttributeKey, customParsers map[string][]parsers.BlockEventParser, conf config.IndexConfig) ([]db.BlockEventDBWrapper, error) { 39 | beginBlockEvents := make([]db.BlockEventDBWrapper, len(blockEvents)) 40 | 41 | for index, event := range blockEvents { 42 | eventType := models.BlockEventType{ 43 | Type: event.Type, 44 | } 45 | beginBlockEvents[index].BlockEvent = models.BlockEvent{ 46 | Index: uint64(index), 47 | LifecyclePosition: blockLifecyclePosition, 48 | Block: *block, 49 | BlockEventType: eventType, 50 | } 51 | 52 | uniqueEventTypes[event.Type] = eventType 53 | 54 | beginBlockEvents[index].Attributes = make([]models.BlockEventAttribute, len(event.Attributes)) 55 | 56 | for attrIndex, attribute := range event.Attributes { 57 | 58 | var value string 59 | var keyItem string 60 | if conf.Flags.BlockEventsBase64Encoded { 61 | // Should we even be decoding these from base64? What are the implications? 62 | valueBytes, err := base64.StdEncoding.DecodeString(attribute.Value) 63 | if err != nil { 64 | return nil, err 65 | } 66 | 67 | keyBytes, err := base64.StdEncoding.DecodeString(attribute.Key) 68 | if err != nil { 69 | return nil, err 70 | } 71 | 72 | value = string(valueBytes) 73 | keyItem = string(keyBytes) 74 | } else { 75 | value = attribute.Value 76 | keyItem = attribute.Key 77 | } 78 | 79 | key := models.BlockEventAttributeKey{ 80 | Key: keyItem, 81 | } 82 | 83 | beginBlockEvents[index].Attributes[attrIndex] = models.BlockEventAttribute{ 84 | Value: value, 85 | BlockEventAttributeKey: key, 86 | Index: uint64(attrIndex), 87 | } 88 | 89 | uniqueAttributeKeys[key.Key] = key 90 | 91 | } 92 | 93 | if customParsers != nil { 94 | if customBlockEventParsers, ok := customParsers[event.Type]; ok { 95 | for index, customParser := range customBlockEventParsers { 96 | // We deliberately ignore the error here, as we want to continue processing the block events even if a custom parser fails 97 | parsedData, err := customParser.ParseBlockEvent(event, conf) 98 | beginBlockEvents[index].BlockEventParsedDatasets = append(beginBlockEvents[index].BlockEventParsedDatasets, parsers.BlockEventParsedData{ 99 | Data: parsedData, 100 | Error: err, 101 | Parser: &customBlockEventParsers[index], 102 | }) 103 | } 104 | } 105 | } 106 | 107 | } 108 | 109 | return beginBlockEvents, nil 110 | } 111 | 112 | func FilterRPCBlockEvents(blockEvents []db.BlockEventDBWrapper, filterRegistry filter.StaticBlockEventFilterRegistry) ([]db.BlockEventDBWrapper, error) { 113 | // If there are no filters, just return the block events 114 | if len(filterRegistry.BlockEventFilters) == 0 && len(filterRegistry.RollingWindowEventFilters) == 0 { 115 | return blockEvents, nil 116 | } 117 | 118 | filterIndexes := make(map[int]bool) 119 | 120 | // If filters are defined, we treat filters as a whitelist, and only include block events that match the filters and are allowed 121 | // Filters are evaluated in order, and the first filter that matches is the one that is used. Single block event filters are preferred in ordering. 122 | for index, blockEvent := range blockEvents { 123 | filterEvent := filter.EventData{ 124 | Event: blockEvent.BlockEvent, 125 | Attributes: blockEvent.Attributes, 126 | } 127 | 128 | for _, filter := range filterRegistry.BlockEventFilters { 129 | patternMatch, err := filter.EventMatches(filterEvent) 130 | if err != nil { 131 | return nil, err 132 | } 133 | if patternMatch { 134 | filterIndexes[index] = filter.IncludeMatch() 135 | } 136 | } 137 | 138 | for _, rollingWindowFilter := range filterRegistry.RollingWindowEventFilters { 139 | if index+rollingWindowFilter.RollingWindowLength() <= len(blockEvents) { 140 | lastIndex := index + rollingWindowFilter.RollingWindowLength() 141 | blockEventSlice := blockEvents[index:lastIndex] 142 | 143 | filterEvents := make([]filter.EventData, len(blockEventSlice)) 144 | 145 | for index, blockEvent := range blockEventSlice { 146 | filterEvents[index] = filter.EventData{ 147 | Event: blockEvent.BlockEvent, 148 | Attributes: blockEvent.Attributes, 149 | } 150 | } 151 | 152 | patternMatches, err := rollingWindowFilter.EventsMatch(filterEvents) 153 | if err != nil { 154 | return nil, err 155 | } 156 | 157 | if patternMatches { 158 | for i := index; i < lastIndex; i++ { 159 | filterIndexes[i] = rollingWindowFilter.IncludeMatches() 160 | } 161 | } 162 | } 163 | } 164 | } 165 | 166 | // Filter the block events based on the indexes that matched the registered patterns 167 | filteredBlockEvents := make([]db.BlockEventDBWrapper, 0) 168 | 169 | for index, blockEvent := range blockEvents { 170 | if filterIndexes[index] { 171 | filteredBlockEvents = append(filteredBlockEvents, blockEvent) 172 | } 173 | } 174 | 175 | return filteredBlockEvents, nil 176 | } 177 | -------------------------------------------------------------------------------- /core/decoding.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "errors" 5 | 6 | probeClient "github.com/DefiantLabs/probe/client" 7 | sdk "github.com/cosmos/cosmos-sdk/types" 8 | "github.com/cosmos/cosmos-sdk/types/tx" 9 | ) 10 | 11 | // Provides an in-app tx decoder. 12 | // The primary use-case for this function is to allow fallback decoding if a TX fails to decode after RPC requests. 13 | // This can happen in a number of scenarios, but mainly due to missing proto definitions. 14 | // We can attempt a personal decode of the TX, and see if we can continue indexing based on in-app conditions (such as message type filters). 15 | // This function skips a large chunk of decoding validations, and is not recommended for general use. Its main point is to skip errors that in 16 | // default Cosmos TX decoders would cause the entire decode to fail. 17 | func InAppTxDecoder(cdc probeClient.Codec) sdk.TxDecoder { 18 | return func(txBytes []byte) (sdk.Tx, error) { 19 | var raw tx.TxRaw 20 | var err error 21 | 22 | err = cdc.Marshaler.Unmarshal(txBytes, &raw) 23 | if err != nil { 24 | return nil, err 25 | } 26 | 27 | var body tx.TxBody 28 | 29 | err = body.Unmarshal(raw.BodyBytes) 30 | if err != nil { 31 | return nil, errors.New("failed to unmarshal tx body") 32 | } 33 | 34 | for _, any := range body.Messages { 35 | var msg sdk.Msg 36 | // We deliberately ignore errors here to build up a 37 | // list of properly decoded messages for later analysis. 38 | cdc.Marshaler.UnpackAny(any, &msg) //nolint:errcheck 39 | } 40 | 41 | var authInfo tx.AuthInfo 42 | 43 | err = cdc.Marshaler.Unmarshal(raw.AuthInfoBytes, &authInfo) 44 | if err != nil { 45 | return nil, errors.New("failed to unmarshal auth info") 46 | } 47 | 48 | theTx := &tx.Tx{ 49 | Body: &body, 50 | AuthInfo: &authInfo, 51 | Signatures: raw.Signatures, 52 | } 53 | 54 | return theTx, nil 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /core/processor.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/DefiantLabs/cosmos-indexer/config" 7 | "github.com/DefiantLabs/cosmos-indexer/db/models" 8 | "github.com/DefiantLabs/cosmos-indexer/rpc" 9 | ctypes "github.com/cometbft/cometbft/rpc/core/types" 10 | sdkTypes "github.com/cosmos/cosmos-sdk/types" 11 | ) 12 | 13 | type BlockProcessingFailure int 14 | 15 | const ( 16 | NodeMissingBlockTxs BlockProcessingFailure = iota 17 | BlockQueryError 18 | UnprocessableTxError 19 | OsmosisNodeRewardLookupError 20 | OsmosisNodeRewardIndexError 21 | NodeMissingHistoryForBlock 22 | FailedBlockEventHandling 23 | ) 24 | 25 | type FailedBlockHandler func(height int64, code BlockProcessingFailure, err error) 26 | 27 | // Process RPC Block data into the model object used by the application. 28 | func ProcessBlock(blockData *ctypes.ResultBlock, blockResultsData *rpc.CustomBlockResults, chainID uint) (models.Block, error) { 29 | block := models.Block{ 30 | Height: blockData.Block.Height, 31 | ChainID: chainID, 32 | } 33 | 34 | propAddressFromHex, err := sdkTypes.ConsAddressFromHex(blockData.Block.ProposerAddress.String()) 35 | if err != nil { 36 | return block, err 37 | } 38 | 39 | block.ProposerConsAddress = models.Address{Address: propAddressFromHex.String()} 40 | block.TimeStamp = blockData.Block.Time 41 | 42 | return block, nil 43 | } 44 | 45 | // Log error to stdout. Not much else we can do to handle right now. 46 | func HandleFailedBlock(height int64, code BlockProcessingFailure, err error) { 47 | reason := "{unknown error}" 48 | switch code { 49 | case NodeMissingBlockTxs: 50 | reason = "node has no TX history for block" 51 | case BlockQueryError: 52 | reason = "failed to query block result for block" 53 | case OsmosisNodeRewardLookupError: 54 | reason = "Failed Osmosis rewards lookup for block" 55 | case OsmosisNodeRewardIndexError: 56 | reason = "Failed Osmosis rewards indexing for block" 57 | case NodeMissingHistoryForBlock: 58 | reason = "Node has no TX history for block" 59 | case FailedBlockEventHandling: 60 | reason = "Failed to process block event" 61 | } 62 | 63 | config.Log.Error(fmt.Sprintf("Block %v failed. Reason: %v", height, reason), err) 64 | } 65 | -------------------------------------------------------------------------------- /core/rpc_worker.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "sync" 7 | 8 | "github.com/DefiantLabs/cosmos-indexer/config" 9 | dbTypes "github.com/DefiantLabs/cosmos-indexer/db" 10 | "github.com/DefiantLabs/cosmos-indexer/rpc" 11 | "github.com/DefiantLabs/probe/client" 12 | abci "github.com/cometbft/cometbft/abci/types" 13 | ctypes "github.com/cometbft/cometbft/rpc/core/types" 14 | txTypes "github.com/cosmos/cosmos-sdk/types/tx" 15 | "gorm.io/gorm" 16 | ) 17 | 18 | // Wrapper types for gathering full dataset. 19 | type IndexerBlockEventData struct { 20 | BlockData *ctypes.ResultBlock 21 | BlockResultsData *rpc.CustomBlockResults 22 | BlockEventRequestsFailed bool 23 | GetTxsResponse *txTypes.GetTxsEventResponse 24 | TxRequestsFailed bool 25 | IndexBlockEvents bool 26 | IndexTransactions bool 27 | } 28 | 29 | // This function is responsible for making all RPC requests to the chain needed for later processing. 30 | // The indexer relies on a number of RPC endpoints for full block data, including block event and transaction searches. 31 | func BlockRPCWorker(wg *sync.WaitGroup, blockEnqueueChan chan *EnqueueData, chainID uint, chainStringID string, cfg *config.IndexConfig, chainClient *client.ChainClient, db *gorm.DB, outputChannel chan IndexerBlockEventData) { 32 | defer wg.Done() 33 | rpcClient := rpc.URIClient{ 34 | Address: chainClient.Config.RPCAddr, 35 | Client: &http.Client{}, 36 | } 37 | 38 | for { 39 | // Get the next block to process 40 | block, open := <-blockEnqueueChan 41 | if !open { 42 | config.Log.Debugf("Block enqueue channel closed. Exiting RPC worker.") 43 | break 44 | } 45 | 46 | currentHeightIndexerData := IndexerBlockEventData{ 47 | BlockEventRequestsFailed: false, 48 | TxRequestsFailed: false, 49 | IndexBlockEvents: block.IndexBlockEvents, 50 | IndexTransactions: block.IndexTransactions, 51 | } 52 | 53 | // Get the block from the RPC 54 | blockData, err := rpc.GetBlock(chainClient, block.Height) 55 | if err != nil { 56 | // This is the only response we continue on. If we can't get the block, we can't index anything. 57 | config.Log.Errorf("Error getting block %v from RPC. Err: %v", block, err) 58 | err := dbTypes.UpsertFailedEventBlock(db, block.Height, chainStringID, cfg.Probe.ChainName) 59 | if err != nil { 60 | config.Log.Fatal("Failed to insert failed block event", err) 61 | } 62 | err = dbTypes.UpsertFailedBlock(db, block.Height, chainStringID, cfg.Probe.ChainName) 63 | if err != nil { 64 | config.Log.Fatal("Failed to insert failed block", err) 65 | } 66 | continue 67 | } 68 | 69 | currentHeightIndexerData.BlockData = blockData 70 | 71 | if block.IndexBlockEvents { 72 | bresults, err := rpc.GetBlockResultWithRetry(rpcClient, block.Height, cfg.Base.RequestRetryAttempts, cfg.Base.RequestRetryMaxWait) 73 | 74 | if err != nil { 75 | config.Log.Errorf("Error getting block results for block %v from RPC. Err: %v", block, err) 76 | err := dbTypes.UpsertFailedEventBlock(db, block.Height, chainStringID, cfg.Probe.ChainName) 77 | if err != nil { 78 | config.Log.Fatal("Failed to insert failed block event", err) 79 | } 80 | currentHeightIndexerData.BlockResultsData = nil 81 | currentHeightIndexerData.BlockEventRequestsFailed = true 82 | } else { 83 | bresults, err = NormalizeCustomBlockResults(bresults) 84 | if err != nil { 85 | config.Log.Errorf("Error normalizing block results for block %v from RPC. Err: %v", block, err) 86 | err := dbTypes.UpsertFailedEventBlock(db, block.Height, chainStringID, cfg.Probe.ChainName) 87 | if err != nil { 88 | config.Log.Fatal("Failed to insert failed block event", err) 89 | } 90 | } else { 91 | currentHeightIndexerData.BlockResultsData = bresults 92 | } 93 | } 94 | } 95 | 96 | if block.IndexTransactions { 97 | var txsEventResp *txTypes.GetTxsEventResponse 98 | var err error 99 | if !cfg.Base.SkipBlockByHeightRPCRequest { 100 | txsEventResp, err = rpc.GetTxsByBlockHeight(chainClient, block.Height) 101 | } 102 | 103 | if err != nil || cfg.Base.SkipBlockByHeightRPCRequest { 104 | // Attempt to get block results to attempt an in-app codec decode of transactions. 105 | if currentHeightIndexerData.BlockResultsData == nil { 106 | 107 | bresults, err := rpc.GetBlockResultWithRetry(rpcClient, block.Height, cfg.Base.RequestRetryAttempts, cfg.Base.RequestRetryMaxWait) 108 | 109 | if err != nil { 110 | config.Log.Errorf("Error getting txs for block %v from RPC. Err: %v", block, err) 111 | err := dbTypes.UpsertFailedBlock(db, block.Height, chainStringID, cfg.Probe.ChainName) 112 | if err != nil { 113 | config.Log.Fatal("Failed to insert failed block", err) 114 | } 115 | currentHeightIndexerData.GetTxsResponse = nil 116 | currentHeightIndexerData.BlockResultsData = nil 117 | // Only set failed when we can't get the block results either. 118 | currentHeightIndexerData.TxRequestsFailed = true 119 | } else { 120 | bresults, err = NormalizeCustomBlockResults(bresults) 121 | if err != nil { 122 | config.Log.Errorf("Error normalizing block results for block %v from RPC. Err: %v", block, err) 123 | err := dbTypes.UpsertFailedBlock(db, block.Height, chainStringID, cfg.Probe.ChainName) 124 | if err != nil { 125 | config.Log.Fatal("Failed to insert failed block", err) 126 | } 127 | } else { 128 | currentHeightIndexerData.BlockResultsData = bresults 129 | } 130 | } 131 | 132 | } 133 | } else { 134 | currentHeightIndexerData.GetTxsResponse = txsEventResp 135 | } 136 | } 137 | 138 | outputChannel <- currentHeightIndexerData 139 | } 140 | } 141 | 142 | func NormalizeCustomBlockResults(blockResults *rpc.CustomBlockResults) (*rpc.CustomBlockResults, error) { 143 | if len(blockResults.FinalizeBlockEvents) != 0 { 144 | beginBlockEvents := []abci.Event{} 145 | endBlockEvents := []abci.Event{} 146 | 147 | for _, event := range blockResults.FinalizeBlockEvents { 148 | eventAttrs := []abci.EventAttribute{} 149 | isBeginBlock := false 150 | isEndBlock := false 151 | for _, attr := range event.Attributes { 152 | if attr.Key == "mode" { 153 | if attr.Value == "BeginBlock" { 154 | isBeginBlock = true 155 | } else if attr.Value == "EndBlock" { 156 | isEndBlock = true 157 | } 158 | } else { 159 | eventAttrs = append(eventAttrs, attr) 160 | } 161 | } 162 | 163 | switch { 164 | case isBeginBlock && isEndBlock: 165 | return nil, fmt.Errorf("finalize block event has both BeginBlock and EndBlock mode") 166 | case !isBeginBlock && !isEndBlock: 167 | return nil, fmt.Errorf("finalize block event has neither BeginBlock nor EndBlock mode") 168 | case isBeginBlock: 169 | beginBlockEvents = append(beginBlockEvents, abci.Event{Type: event.Type, Attributes: eventAttrs}) 170 | case isEndBlock: 171 | endBlockEvents = append(endBlockEvents, abci.Event{Type: event.Type, Attributes: eventAttrs}) 172 | } 173 | } 174 | 175 | blockResults.BeginBlockEvents = append(blockResults.BeginBlockEvents, beginBlockEvents...) 176 | blockResults.EndBlockEvents = append(blockResults.EndBlockEvents, endBlockEvents...) 177 | } 178 | 179 | return blockResults, nil 180 | } 181 | -------------------------------------------------------------------------------- /cosmos/events/normalization.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/DefiantLabs/cosmos-indexer/config" 8 | txtypes "github.com/DefiantLabs/cosmos-indexer/cosmos/modules/tx" 9 | cometAbciTypes "github.com/cometbft/cometbft/abci/types" 10 | "github.com/cosmos/cosmos-sdk/types" 11 | ) 12 | 13 | func NormalizedAttributesToAttributes(attrs []txtypes.Attribute) []types.Attribute { 14 | list := []types.Attribute{} 15 | for _, attr := range attrs { 16 | lma := types.Attribute{Key: attr.Key, Value: attr.Value} 17 | list = append(list, lma) 18 | } 19 | 20 | return list 21 | } 22 | 23 | func AttributesToNormalizedAttributes(attrs []types.Attribute) []txtypes.Attribute { 24 | list := []txtypes.Attribute{} 25 | for _, attr := range attrs { 26 | lma := txtypes.Attribute{Key: attr.Key, Value: attr.Value} 27 | list = append(list, lma) 28 | } 29 | 30 | return list 31 | } 32 | 33 | func EventAttributesToNormalizedAttributes(attrs []cometAbciTypes.EventAttribute) []txtypes.Attribute { 34 | list := []txtypes.Attribute{} 35 | for _, attr := range attrs { 36 | lma := txtypes.Attribute{Key: attr.Key, Value: attr.Value} 37 | list = append(list, lma) 38 | } 39 | 40 | return list 41 | } 42 | 43 | func StringEventstoNormalizedEvents(msgEvents types.StringEvents) (list []txtypes.LogMessageEvent) { 44 | for _, evt := range msgEvents { 45 | lme := txtypes.LogMessageEvent{Type: evt.Type, Attributes: AttributesToNormalizedAttributes(evt.Attributes)} 46 | list = append(list, lme) 47 | } 48 | 49 | return list 50 | } 51 | 52 | func toNormalizedEvents(msgEvents []cometAbciTypes.Event) (list []txtypes.LogMessageEvent) { 53 | for _, evt := range msgEvents { 54 | lme := txtypes.LogMessageEvent{Type: evt.Type, Attributes: EventAttributesToNormalizedAttributes(evt.Attributes)} 55 | list = append(list, lme) 56 | } 57 | 58 | return list 59 | } 60 | 61 | func ParseTxEventsToMessageIndexEvents(numMessages int, events []cometAbciTypes.Event) (types.ABCIMessageLogs, error) { 62 | parsedLogs := make(types.ABCIMessageLogs, numMessages) 63 | for index := range parsedLogs { 64 | parsedLogs[index] = types.ABCIMessageLog{ 65 | MsgIndex: uint32(index), 66 | } 67 | } 68 | 69 | // TODO: Fix this to be more efficient, no need to translate multiple times to hack this together 70 | logMessageEvents := toNormalizedEvents(events) 71 | for _, event := range logMessageEvents { 72 | loopEvent := event 73 | val, err := txtypes.GetValueForAttribute("msg_index", &loopEvent) 74 | 75 | if err == nil && val != "" { 76 | msgIndex, err := strconv.Atoi(val) 77 | if err != nil { 78 | config.Log.Error(fmt.Sprintf("Error parsing msg_index from event: %v", err)) 79 | return nil, err 80 | } 81 | 82 | if msgIndex >= 0 && msgIndex < len(parsedLogs) { 83 | parsedLogs[msgIndex].Events = append(parsedLogs[msgIndex].Events, types.StringEvent{Type: event.Type, Attributes: NormalizedAttributesToAttributes(event.Attributes)}) 84 | } 85 | } 86 | } 87 | 88 | return parsedLogs, nil 89 | } 90 | -------------------------------------------------------------------------------- /cosmos/modules/denoms/types.go: -------------------------------------------------------------------------------- 1 | package denoms 2 | 3 | import transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" 4 | 5 | type Pagination struct { 6 | NextKey string `json:"next_key"` 7 | Total string `json:"total"` 8 | } 9 | 10 | type GetDenomTracesResponse struct { 11 | DenomTraces transfertypes.Traces `json:"denom_traces"` 12 | Pagination Pagination `json:"pagination"` 13 | } 14 | -------------------------------------------------------------------------------- /cosmos/modules/parsing.go: -------------------------------------------------------------------------------- 1 | package parsing 2 | 3 | import "math/big" 4 | 5 | type MessageRelevantInformation struct { 6 | SenderAddress string 7 | ReceiverAddress string 8 | AmountSent *big.Int 9 | AmountReceived *big.Int 10 | DenominationSent string 11 | DenominationReceived string 12 | } 13 | -------------------------------------------------------------------------------- /cosmos/modules/tx/logic.go: -------------------------------------------------------------------------------- 1 | package tx 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "strings" 7 | "unicode" 8 | ) 9 | 10 | const EventAttributeAmount = "amount" 11 | 12 | func GetMessageLogForIndex(logs []LogMessage, index int) *LogMessage { 13 | for _, log := range logs { 14 | if log.MessageIndex == index { 15 | return &log 16 | } 17 | } 18 | 19 | return nil 20 | } 21 | 22 | func GetEventWithType(eventType string, msg *LogMessage) *LogMessageEvent { 23 | if msg == nil || msg.Events == nil { 24 | return nil 25 | } 26 | 27 | for _, logEvent := range msg.Events { 28 | if logEvent.Type == eventType { 29 | return &logEvent 30 | } 31 | } 32 | 33 | return nil 34 | } 35 | 36 | func GetAllEventsWithType(eventType string, msg *LogMessage) []LogMessageEvent { 37 | logEventMessages := []LogMessageEvent{} 38 | 39 | if msg == nil || msg.Events == nil { 40 | return logEventMessages 41 | } 42 | 43 | for _, logEvent := range msg.Events { 44 | if logEvent.Type == eventType { 45 | logEventMessages = append(logEventMessages, logEvent) 46 | } 47 | } 48 | 49 | return logEventMessages 50 | } 51 | 52 | func GetEventsWithType(eventType string, msg *LogMessage) []LogMessageEvent { 53 | events := []LogMessageEvent{} 54 | if msg == nil || msg.Events == nil { 55 | return nil 56 | } 57 | 58 | for _, logEvent := range msg.Events { 59 | if logEvent.Type == eventType { 60 | events = append(events, logEvent) 61 | } 62 | } 63 | 64 | return events 65 | } 66 | 67 | type TransferEvent struct { 68 | Recipient string 69 | Sender string 70 | Amount string 71 | } 72 | 73 | // Transfer events should have attributes in the order recipient, sender, amount. 74 | func ParseTransferEvent(evt LogMessageEvent) ([]TransferEvent, error) { 75 | errInvalidTransfer := errors.New("not a valid transfer event") 76 | transfers := []TransferEvent{} 77 | if evt.Type != "transfer" { 78 | return nil, errInvalidTransfer 79 | } 80 | 81 | for i := 0; i < len(evt.Attributes); i++ { 82 | attrRecipient := evt.Attributes[i] 83 | if attrRecipient.Key == "recipient" { 84 | attrSenderIdx := i + 1 85 | attrAmountIdx := i + 2 86 | if attrAmountIdx < len(evt.Attributes) { 87 | attrSender := evt.Attributes[attrSenderIdx] 88 | attrAmount := evt.Attributes[attrAmountIdx] 89 | if attrSender.Key == "sender" && attrAmount.Key == EventAttributeAmount { 90 | transfers = append(transfers, TransferEvent{ 91 | Recipient: attrRecipient.Value, 92 | Sender: attrSender.Value, 93 | Amount: attrAmount.Value, 94 | }) 95 | } else { 96 | return nil, errInvalidTransfer 97 | } 98 | } else { 99 | return nil, errInvalidTransfer 100 | } 101 | } else if i%3 == 0 { // every third attr should be "recipient" 102 | return nil, errInvalidTransfer 103 | } 104 | } 105 | 106 | return transfers, nil 107 | } 108 | 109 | // If order is reversed, the last attribute containing the given key will be returned 110 | // otherwise the first attribute will be returned 111 | func GetValueForAttribute(key string, evt *LogMessageEvent) (string, error) { 112 | if evt == nil || evt.Attributes == nil { 113 | return "", nil 114 | } 115 | 116 | for _, attr := range evt.Attributes { 117 | if attr.Key == key { 118 | return attr.Value, nil 119 | } 120 | } 121 | 122 | return "", fmt.Errorf("Attribute %s missing from event", key) 123 | } 124 | 125 | func GetCoinsSpent(spender string, evts []LogMessageEvent) []string { 126 | coinsSpent := []string{} 127 | 128 | if len(evts) == 0 { 129 | return coinsSpent 130 | } 131 | 132 | for _, evt := range evts { 133 | for i := 0; i < len(evt.Attributes); i++ { 134 | attr := evt.Attributes[i] 135 | if attr.Key == "spender" && attr.Value == spender { 136 | attrAmountIdx := i + 1 137 | if attrAmountIdx < len(evt.Attributes) { 138 | attrNext := evt.Attributes[attrAmountIdx] 139 | if attrNext.Key == EventAttributeAmount { 140 | commaSeperatedCoins := attrNext.Value 141 | currentCoins := strings.Split(commaSeperatedCoins, ",") 142 | for _, coin := range currentCoins { 143 | if coin != "" { 144 | coinsSpent = append(coinsSpent, coin) 145 | } 146 | } 147 | } 148 | } 149 | } 150 | } 151 | } 152 | 153 | return coinsSpent 154 | } 155 | 156 | func GetCoinsReceived(receiver string, evts []LogMessageEvent) []string { 157 | coinsReceived := []string{} 158 | 159 | if len(evts) == 0 { 160 | return coinsReceived 161 | } 162 | 163 | for _, evt := range evts { 164 | for i := 0; i < len(evt.Attributes); i++ { 165 | attr := evt.Attributes[i] 166 | if attr.Key == "receiver" && attr.Value == receiver { 167 | attrAmountIdx := i + 1 168 | if attrAmountIdx < len(evt.Attributes) { 169 | attrNext := evt.Attributes[attrAmountIdx] 170 | if attrNext.Key == EventAttributeAmount { 171 | commaSeperatedCoins := attrNext.Value 172 | currentCoins := strings.Split(commaSeperatedCoins, ",") 173 | for _, coin := range currentCoins { 174 | if coin != "" { 175 | coinsReceived = append(coinsReceived, coin) 176 | } 177 | } 178 | } 179 | } 180 | } 181 | } 182 | } 183 | 184 | return coinsReceived 185 | } 186 | 187 | // Get the Nth value for the given key (starting at 1) 188 | func GetNthValueForAttribute(key string, n int, evt *LogMessageEvent) string { 189 | if evt == nil || evt.Attributes == nil { 190 | return "" 191 | } 192 | var count int 193 | for i := 0; i < len(evt.Attributes); i++ { 194 | attr := evt.Attributes[i] 195 | if attr.Key == key { 196 | count++ 197 | if count == n { 198 | return attr.Value 199 | } 200 | } 201 | } 202 | 203 | return "" 204 | } 205 | 206 | func GetLastValueForAttribute(key string, evt *LogMessageEvent) string { 207 | if evt == nil || evt.Attributes == nil { 208 | return "" 209 | } 210 | 211 | for i := len(evt.Attributes) - 1; i >= 0; i-- { 212 | attr := evt.Attributes[i] 213 | if attr.Key == key { 214 | return attr.Value 215 | } 216 | } 217 | 218 | return "" 219 | } 220 | 221 | func IsMessageActionEquals(msgType string, msg *LogMessage) bool { 222 | logEvent := GetEventWithType("message", msg) 223 | altMsgType := getAltMsgType(msgType) 224 | if logEvent == nil { 225 | return false 226 | } 227 | 228 | for _, attr := range logEvent.Attributes { 229 | if attr.Key == "action" { 230 | if attr.Value == msgType || attr.Value == altMsgType { 231 | return true 232 | } 233 | } 234 | } 235 | 236 | return false 237 | } 238 | 239 | var altMsgMap = map[string]string{ 240 | "/cosmos.staking.v1beta1.MsgUndelegate": "begin_unbonding", 241 | } 242 | 243 | func getAltMsgType(msgType string) string { 244 | if altMsg, ok := altMsgMap[msgType]; ok { 245 | return altMsg 246 | } 247 | 248 | var output string 249 | msgParts := strings.Split(msgType, ".Msg") 250 | if len(msgParts) == 2 { 251 | msgSuffix := msgParts[1] 252 | for i, char := range msgSuffix { 253 | if unicode.IsUpper(char) { 254 | if i != 0 { 255 | output = fmt.Sprintf("%v_", output) 256 | } 257 | } 258 | output = fmt.Sprintf("%v%v", output, string(unicode.ToLower(char))) 259 | } 260 | } 261 | return output 262 | } 263 | -------------------------------------------------------------------------------- /cosmos/modules/tx/types.go: -------------------------------------------------------------------------------- 1 | package tx 2 | 3 | import ( 4 | cosmTx "github.com/cosmos/cosmos-sdk/types/tx" 5 | 6 | sdk "github.com/cosmos/cosmos-sdk/types" 7 | ) 8 | 9 | type IndexerTx struct { 10 | Body Body `json:"body"` 11 | AuthInfo cosmTx.AuthInfo 12 | } 13 | 14 | type Response struct { 15 | TxHash string `json:"txhash"` 16 | Height string `json:"height"` 17 | TimeStamp string `json:"timestamp"` 18 | Code uint32 `json:"code"` 19 | RawLog string `json:"raw_log"` 20 | Log []LogMessage `json:"logs"` 21 | } 22 | 23 | // TxLogMessage: 24 | // Cosmos blockchains return Transactions with an array of "logs" e.g. 25 | // 26 | // "logs": [ 27 | // 28 | // { 29 | // "msg_index": 0, 30 | // "events": [ 31 | // { 32 | // "type": "coin_received", 33 | // "attributes": [ 34 | // { 35 | // "key": "receiver", 36 | // "value": "juno128taw6wkhfq29u83lmh5qyfv8nff6h0w577vsy" 37 | // }, ... 38 | // ] 39 | // } ... 40 | // 41 | // The individual log always has a msg_index corresponding to the Message from the Transaction. 42 | // But the events are specific to each Message type, for example MsgSend might be different from 43 | // any other message type. 44 | // 45 | // This struct just parses the KNOWN fields and leaves the other fields as raw JSON. 46 | // More specific type parsers for each message type can parse those fields if they choose to. 47 | type LogMessage struct { 48 | MessageIndex int `json:"msg_index"` 49 | Events []LogMessageEvent `json:"events"` 50 | } 51 | 52 | type Attribute struct { 53 | Key string 54 | Value string 55 | } 56 | 57 | type LogMessageEvent struct { 58 | Type string `json:"type"` 59 | Attributes []Attribute `json:"attributes"` 60 | } 61 | 62 | type Body struct { 63 | Messages []sdk.Msg `json:"messages"` 64 | } 65 | 66 | type AuthInfo struct { 67 | TxFee Fee `json:"fee"` 68 | TxSignerInfos []SignerInfo `json:"signer_infos"` // this is used in REST but not RPC parsers 69 | } 70 | 71 | type Fee struct { 72 | TxFeeAmount []FeeAmount `json:"amount"` 73 | GasLimit string `json:"gas_limit"` 74 | } 75 | 76 | type FeeAmount struct { 77 | Denom string `json:"denom"` 78 | Amount string `json:"amount"` 79 | } 80 | 81 | type SignerInfo struct { 82 | PublicKey PublicKey `json:"public_key"` 83 | } 84 | 85 | type PublicKey struct { 86 | Type string `json:"@type"` 87 | Key string `json:"key"` 88 | } 89 | 90 | // In the json, TX data is split into 2 arrays, used to merge the full dataset 91 | type MergedTx struct { 92 | Tx IndexerTx 93 | TxResponse Response 94 | } 95 | -------------------------------------------------------------------------------- /db/db_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "log" 5 | "testing" 6 | "time" 7 | 8 | "github.com/DefiantLabs/cosmos-indexer/db/models" 9 | "github.com/ory/dockertest/v3" 10 | "github.com/stretchr/testify/suite" 11 | "gorm.io/gorm" 12 | ) 13 | 14 | // TODO: Optimize tests to use a single database instance, clean database after each test, and teardown database after all tests are done 15 | 16 | type DBTestSuite struct { 17 | suite.Suite 18 | db *gorm.DB 19 | clean func() 20 | } 21 | 22 | func (suite *DBTestSuite) SetupTest() { 23 | clean, db, err := SetupTestDatabase() 24 | suite.Require().NoError(err) 25 | 26 | suite.db = db 27 | suite.clean = clean 28 | } 29 | 30 | func (suite *DBTestSuite) TearDownTest() { 31 | if suite.clean != nil { 32 | suite.clean() 33 | } 34 | 35 | suite.db = nil 36 | suite.clean = nil 37 | } 38 | 39 | func (suite *DBTestSuite) TestMigrateModels() { 40 | err := MigrateModels(suite.db) 41 | suite.Require().NoError(err) 42 | } 43 | 44 | func (suite *DBTestSuite) TestGetDBChainID() { 45 | err := MigrateModels(suite.db) 46 | suite.Require().NoError(err) 47 | 48 | initChain := models.Chain{ 49 | ChainID: "testchain-1", 50 | } 51 | 52 | err = suite.db.Create(&initChain).Error 53 | suite.Require().NoError(err) 54 | 55 | chainID, err := GetDBChainID(suite.db, initChain) 56 | suite.Require().NoError(err) 57 | suite.Assert().NotZero(chainID) 58 | } 59 | 60 | func SetupTestDatabase() (func(), *gorm.DB, error) { 61 | // TODO: allow environment overrides to skip creating mock database 62 | pool, err := dockertest.NewPool("") 63 | if err != nil { 64 | return nil, nil, err 65 | } 66 | 67 | err = pool.Client.Ping() 68 | if err != nil { 69 | return nil, nil, err 70 | } 71 | 72 | resource, err := pool.Run("postgres", "15-alpine", []string{"POSTGRES_USER=test", "POSTGRES_PASSWORD=test", "POSTGRES_DB=test"}) 73 | if err != nil { 74 | return nil, nil, err 75 | } 76 | 77 | var db *gorm.DB 78 | if err := pool.Retry(func() error { 79 | var err error 80 | db, err = PostgresDbConnect(resource.GetBoundIP("5432/tcp"), resource.GetPort("5432/tcp"), "test", "test", "test", "debug") 81 | if err != nil { 82 | return err 83 | } 84 | return nil 85 | }); err != nil { 86 | return nil, nil, err 87 | } 88 | 89 | clean := func() { 90 | if err := pool.Purge(resource); err != nil { 91 | log.Fatalf("Could not purge resource: %s", err) 92 | } 93 | } 94 | 95 | return clean, db, nil 96 | } 97 | 98 | func createMockBlock(mockDb *gorm.DB, chain models.Chain, address models.Address, height int64, txIndexed bool, eventIndexed bool) (models.Block, error) { 99 | block := models.Block{ 100 | Chain: chain, 101 | Height: height, 102 | TimeStamp: time.Now(), 103 | TxIndexed: txIndexed, 104 | BlockEventsIndexed: eventIndexed, 105 | ProposerConsAddress: address, 106 | } 107 | 108 | err := mockDb.Create(&block).Error 109 | return block, err 110 | } 111 | 112 | func (suite *DBTestSuite) TestGetHighestBlockFunctions() { 113 | err := MigrateModels(suite.db) 114 | suite.Require().NoError(err) 115 | 116 | initChain := models.Chain{ 117 | ChainID: "testchain-1", 118 | } 119 | 120 | err = suite.db.Create(&initChain).Error 121 | suite.Require().NoError(err) 122 | 123 | initConsAddress := models.Address{ 124 | Address: "testchainaddress", 125 | } 126 | 127 | err = suite.db.Create(&initConsAddress).Error 128 | suite.Require().NoError(err) 129 | 130 | block1, err := createMockBlock(suite.db, initChain, initConsAddress, 1, true, true) 131 | suite.Require().NoError(err) 132 | 133 | txBlock := GetHighestIndexedBlock(suite.db, initChain.ID) 134 | eventBlock, err := GetHighestEventIndexedBlock(suite.db, initChain.ID) 135 | suite.Require().NoError(err) 136 | 137 | suite.Assert().Equal(block1.Height, txBlock.Height) 138 | suite.Assert().Equal(block1.Height, eventBlock.Height) 139 | 140 | _, err = createMockBlock(suite.db, initChain, initConsAddress, 2, false, false) 141 | suite.Require().NoError(err) 142 | 143 | txBlock = GetHighestIndexedBlock(suite.db, initChain.ID) 144 | eventBlock, err = GetHighestEventIndexedBlock(suite.db, initChain.ID) 145 | suite.Require().NoError(err) 146 | 147 | suite.Assert().Equal(block1.Height, txBlock.Height) 148 | suite.Assert().Equal(block1.Height, eventBlock.Height) 149 | 150 | block3, err := createMockBlock(suite.db, initChain, initConsAddress, 3, true, true) 151 | suite.Require().NoError(err) 152 | 153 | txBlock = GetHighestIndexedBlock(suite.db, initChain.ID) 154 | eventBlock, err = GetHighestEventIndexedBlock(suite.db, initChain.ID) 155 | suite.Require().NoError(err) 156 | 157 | suite.Assert().Equal(block3.Height, txBlock.Height) 158 | suite.Assert().Equal(block3.Height, eventBlock.Height) 159 | } 160 | 161 | func TestDBSuite(t *testing.T) { 162 | suite.Run(t, new(DBTestSuite)) 163 | } 164 | -------------------------------------------------------------------------------- /db/model_wrappers.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "github.com/DefiantLabs/cosmos-indexer/db/models" 5 | "github.com/DefiantLabs/cosmos-indexer/parsers" 6 | ) 7 | 8 | const ( 9 | OsmosisRewardDistribution uint = iota 10 | TendermintLiquidityDepositCoinsToPool 11 | TendermintLiquidityDepositPoolCoinReceived 12 | TendermintLiquiditySwapTransactedCoinIn 13 | TendermintLiquiditySwapTransactedCoinOut 14 | TendermintLiquiditySwapTransactedFee 15 | TendermintLiquidityWithdrawPoolCoinSent 16 | TendermintLiquidityWithdrawCoinReceived 17 | TendermintLiquidityWithdrawFee 18 | OsmosisProtorevDeveloperRewardDistribution 19 | ) 20 | 21 | type BlockDBWrapper struct { 22 | Block *models.Block 23 | BeginBlockEvents []BlockEventDBWrapper 24 | EndBlockEvents []BlockEventDBWrapper 25 | UniqueBlockEventTypes map[string]models.BlockEventType 26 | UniqueBlockEventAttributeKeys map[string]models.BlockEventAttributeKey 27 | } 28 | 29 | type BlockEventDBWrapper struct { 30 | BlockEvent models.BlockEvent 31 | Attributes []models.BlockEventAttribute 32 | BlockEventParsedDatasets []parsers.BlockEventParsedData 33 | } 34 | 35 | // Store transactions with their messages for easy database creation 36 | type TxDBWrapper struct { 37 | Tx models.Tx 38 | Messages []MessageDBWrapper 39 | UniqueMessageTypes map[string]models.MessageType 40 | UniqueMessageEventTypes map[string]models.MessageEventType 41 | UniqueMessageAttributeKeys map[string]models.MessageEventAttributeKey 42 | } 43 | 44 | type MessageDBWrapper struct { 45 | Message models.Message 46 | MessageEvents []MessageEventDBWrapper 47 | MessageParsedDatasets []parsers.MessageParsedData 48 | } 49 | 50 | type MessageEventDBWrapper struct { 51 | MessageEvent models.MessageEvent 52 | Attributes []models.MessageEventAttribute 53 | } 54 | 55 | type DenomDBWrapper struct { 56 | Denom models.Denom 57 | } 58 | -------------------------------------------------------------------------------- /db/models/address.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | type Address struct { 4 | ID uint 5 | Address string `gorm:"uniqueIndex"` 6 | } 7 | -------------------------------------------------------------------------------- /db/models/block.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type Block struct { 8 | ID uint 9 | TimeStamp time.Time 10 | Height int64 `gorm:"uniqueIndex:chainheight"` 11 | ChainID uint `gorm:"uniqueIndex:chainheight"` 12 | Chain Chain 13 | ProposerConsAddress Address 14 | ProposerConsAddressID uint 15 | TxIndexed bool 16 | // TODO: Should block event indexing be split out or rolled up? 17 | BlockEventsIndexed bool 18 | } 19 | 20 | // Used to keep track of BeginBlock and EndBlock events 21 | type BlockLifecyclePosition int 22 | 23 | const ( 24 | BeginBlockEvent BlockLifecyclePosition = iota 25 | EndBlockEvent 26 | ) 27 | 28 | type BlockEvent struct { 29 | ID uint 30 | // These fields uniquely identify every block event 31 | // Index refers to the position of the event in the block event lifecycle array 32 | // LifecyclePosition refers to whether the event is a BeginBlock or EndBlock event 33 | Index uint64 `gorm:"uniqueIndex:eventBlockPositionIndex,priority:3"` 34 | LifecyclePosition BlockLifecyclePosition `gorm:"uniqueIndex:eventBlockPositionIndex,priority:2"` 35 | BlockID uint `gorm:"uniqueIndex:eventBlockPositionIndex,priority:1"` 36 | Block Block 37 | BlockEventTypeID uint 38 | BlockEventType BlockEventType 39 | } 40 | 41 | type BlockEventType struct { 42 | ID uint 43 | Type string `gorm:"uniqueIndex"` 44 | } 45 | 46 | type BlockEventAttribute struct { 47 | ID uint 48 | BlockEvent BlockEvent 49 | BlockEventID uint `gorm:"uniqueIndex:eventAttributeIndex,priority:1"` 50 | Value string 51 | Index uint64 `gorm:"uniqueIndex:eventAttributeIndex,priority:2"` 52 | // Keys are limited to a smallish subset of string values set by the Cosmos SDK and external modules 53 | // Save DB space by storing the key as a foreign key 54 | BlockEventAttributeKeyID uint 55 | BlockEventAttributeKey BlockEventAttributeKey 56 | } 57 | 58 | type BlockEventAttributeKey struct { 59 | ID uint 60 | Key string `gorm:"uniqueIndex"` 61 | } 62 | 63 | type FailedBlock struct { 64 | ID uint 65 | Height int64 `gorm:"uniqueIndex:failedchainheight"` 66 | BlockchainID uint `gorm:"uniqueIndex:failedchainheight"` 67 | Chain Chain `gorm:"foreignKey:BlockchainID"` 68 | } 69 | 70 | type FailedEventBlock struct { 71 | ID uint 72 | Height int64 `gorm:"uniqueIndex:failedchaineventheight"` 73 | BlockchainID uint `gorm:"uniqueIndex:failedchaineventheight"` 74 | Chain Chain `gorm:"foreignKey:BlockchainID"` 75 | } 76 | -------------------------------------------------------------------------------- /db/models/chain.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | type Chain struct { 4 | ID uint `gorm:"primaryKey"` 5 | ChainID string `gorm:"uniqueIndex"` // e.g. osmosis-1 6 | Name string // e.g. Osmosis 7 | } 8 | -------------------------------------------------------------------------------- /db/models/denom.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | type Denom struct { 4 | ID uint 5 | Base string `gorm:"uniqueIndex"` 6 | } 7 | -------------------------------------------------------------------------------- /db/models/parsers.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | type BlockEventParser struct { 4 | ID uint 5 | BlockLifecyclePosition BlockLifecyclePosition `gorm:"uniqueIndex:idx_block_event_parser_identifier_lifecycle_position"` 6 | Identifier string `gorm:"uniqueIndex:idx_block_event_parser_identifier_lifecycle_position"` 7 | } 8 | 9 | type BlockEventParserError struct { 10 | ID uint 11 | BlockEventParserID uint 12 | BlockEventParser BlockEventParser 13 | BlockEventID uint 14 | BlockEvent BlockEvent 15 | Error string 16 | } 17 | 18 | type MessageParser struct { 19 | ID uint 20 | // Should the message type be added here for clarity purposes? 21 | Identifier string `gorm:"uniqueIndex:idx_message_parser_identifier"` 22 | } 23 | 24 | type MessageParserError struct { 25 | ID uint 26 | MessageParserID uint 27 | MessageParser MessageParser 28 | MessageID uint 29 | Message Message 30 | Error string 31 | } 32 | -------------------------------------------------------------------------------- /db/models/tx.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "github.com/shopspring/decimal" 5 | "gorm.io/gorm" 6 | "gorm.io/gorm/clause" 7 | ) 8 | 9 | type Tx struct { 10 | ID uint 11 | Hash string `gorm:"uniqueIndex"` 12 | Code uint32 13 | BlockID uint 14 | Block Block 15 | Memo string 16 | SignerAddresses []Address `gorm:"many2many:tx_signer_addresses;"` 17 | Fees []Fee 18 | } 19 | 20 | type FailedTx struct { 21 | ID uint 22 | Hash string `gorm:"uniqueIndex"` 23 | BlockID uint 24 | Block Block 25 | } 26 | 27 | type Fee struct { 28 | ID uint `gorm:"primaryKey"` 29 | TxID uint `gorm:"uniqueIndex:txDenomFee"` 30 | Amount decimal.Decimal `gorm:"type:decimal(78,0);"` 31 | DenominationID uint `gorm:"uniqueIndex:txDenomFee"` 32 | Denomination Denom `gorm:"foreignKey:DenominationID"` 33 | PayerAddressID uint `gorm:"index:idx_payer_addr"` 34 | PayerAddress Address `gorm:"foreignKey:PayerAddressID"` 35 | } 36 | 37 | // This lifecycle function ensures the on conflict statement is added for Fees which are associated to Txes by the Gorm slice association method for has_many 38 | func (b *Fee) BeforeCreate(tx *gorm.DB) (err error) { 39 | tx.Statement.AddClause(clause.OnConflict{ 40 | Columns: []clause.Column{{Name: "tx_id"}, {Name: "denomination_id"}}, 41 | DoUpdates: clause.AssignmentColumns([]string{"amount"}), 42 | }) 43 | return nil 44 | } 45 | 46 | type MessageType struct { 47 | ID uint `gorm:"primaryKey"` 48 | MessageType string `gorm:"uniqueIndex;not null"` 49 | } 50 | 51 | type Message struct { 52 | ID uint 53 | TxID uint `gorm:"uniqueIndex:messageIndex,priority:1"` 54 | Tx Tx 55 | MessageTypeID uint `gorm:"foreignKey:MessageTypeID,index:idx_txid_typeid"` 56 | MessageType MessageType 57 | MessageIndex int `gorm:"uniqueIndex:messageIndex,priority:2"` 58 | MessageBytes []byte 59 | } 60 | 61 | type FailedMessage struct { 62 | ID uint 63 | MessageIndex int 64 | TxID uint 65 | Tx Tx 66 | } 67 | 68 | type MessageEvent struct { 69 | ID uint 70 | // These fields uniquely identify every message event 71 | // Index refers to the position of the event in the message event array 72 | Index uint64 `gorm:"uniqueIndex:messageEventIndex,priority:2"` 73 | MessageID uint `gorm:"uniqueIndex:messageEventIndex,priority:1"` 74 | Message Message 75 | MessageEventTypeID uint 76 | MessageEventType MessageEventType 77 | } 78 | 79 | type MessageEventType struct { 80 | ID uint 81 | Type string `gorm:"uniqueIndex"` 82 | } 83 | 84 | type MessageEventAttribute struct { 85 | ID uint 86 | MessageEvent MessageEvent 87 | MessageEventID uint `gorm:"uniqueIndex:messageAttributeIndex,priority:1"` 88 | Value string 89 | Index uint64 `gorm:"uniqueIndex:messageAttributeIndex,priority:2"` 90 | // Keys are limited to a smallish subset of string values set by the Cosmos SDK and external modules 91 | // Save DB space by storing the key as a foreign key 92 | MessageEventAttributeKeyID uint 93 | MessageEventAttributeKey MessageEventAttributeKey 94 | } 95 | 96 | type MessageEventAttributeKey struct { 97 | ID uint 98 | Key string `gorm:"uniqueIndex"` 99 | } 100 | -------------------------------------------------------------------------------- /db/parsers.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "github.com/DefiantLabs/cosmos-indexer/db/models" 5 | "gorm.io/gorm" 6 | ) 7 | 8 | func FindOrCreateCustomBlockEventParsers(db *gorm.DB, parsers map[string]models.BlockEventParser) error { 9 | err := db.Transaction(func(dbTransaction *gorm.DB) error { 10 | for key := range parsers { 11 | currParser := parsers[key] 12 | res := db.FirstOrCreate(&currParser, &currParser) 13 | 14 | if res.Error != nil { 15 | return res.Error 16 | } 17 | parsers[key] = currParser 18 | } 19 | return nil 20 | }) 21 | return err 22 | } 23 | 24 | func FindOrCreateCustomMessageParsers(db *gorm.DB, parsers map[string]models.MessageParser) error { 25 | err := db.Transaction(func(dbTransaction *gorm.DB) error { 26 | for key := range parsers { 27 | currParser := parsers[key] 28 | res := db.FirstOrCreate(&currParser, &currParser) 29 | 30 | if res.Error != nil { 31 | return res.Error 32 | } 33 | parsers[key] = currParser 34 | } 35 | return nil 36 | }) 37 | return err 38 | } 39 | 40 | func CreateBlockEventParserError(db *gorm.DB, blockEvent models.BlockEvent, parser models.BlockEventParser, parserError error) error { 41 | err := db.Transaction(func(dbTransaction *gorm.DB) error { 42 | res := db.Create(&models.BlockEventParserError{ 43 | BlockEventParserID: parser.ID, 44 | BlockEventID: blockEvent.ID, 45 | Error: parserError.Error(), 46 | }) 47 | return res.Error 48 | }) 49 | return err 50 | } 51 | 52 | func DeleteCustomBlockEventParserError(db *gorm.DB, blockEvent models.BlockEvent, parser models.BlockEventParser) error { 53 | err := db.Transaction(func(dbTransaction *gorm.DB) error { 54 | parserError := models.BlockEventParserError{ 55 | BlockEventParserID: parser.ID, 56 | BlockEventID: blockEvent.ID, 57 | } 58 | res := db.Where(&parserError).Delete(&parserError) 59 | return res.Error 60 | }) 61 | return err 62 | } 63 | 64 | func CreateMessageParserError(db *gorm.DB, message models.Message, parser models.MessageParser, parserError error) error { 65 | err := db.Transaction(func(dbTransaction *gorm.DB) error { 66 | res := db.Create(&models.MessageParserError{ 67 | Error: parserError.Error(), 68 | MessageParserID: parser.ID, 69 | MessageID: message.ID, 70 | }) 71 | return res.Error 72 | }) 73 | return err 74 | } 75 | 76 | func DeleteCustomMessageParserError(db *gorm.DB, message models.Message, parser models.MessageParser) error { 77 | err := db.Transaction(func(dbTransaction *gorm.DB) error { 78 | parserError := models.MessageParserError{ 79 | MessageParserID: parser.ID, 80 | MessageID: message.ID, 81 | } 82 | res := db.Where(&parserError).Delete(&parserError) 83 | return res.Error 84 | }) 85 | return err 86 | } 87 | -------------------------------------------------------------------------------- /db/utils.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/DefiantLabs/cosmos-indexer/db/models" 7 | "gorm.io/gorm" 8 | ) 9 | 10 | func FindOrCreateDenomByBase(db *gorm.DB, base string) (models.Denom, error) { 11 | if base == "" { 12 | return models.Denom{}, errors.New("base is required") 13 | } 14 | 15 | denom := models.Denom{ 16 | Base: base, 17 | } 18 | err := db.Where(&denom).FirstOrCreate(&denom).Error 19 | return denom, err 20 | } 21 | 22 | func FindOrCreateAddressByAddress(db *gorm.DB, address string) (models.Address, error) { 23 | if address == "" { 24 | return models.Address{}, errors.New("address is required") 25 | } 26 | 27 | addr := models.Address{ 28 | Address: address, 29 | } 30 | err := db.Where(&addr).FirstOrCreate(&addr).Error 31 | return addr, err 32 | } 33 | 34 | func GetChains(db *gorm.DB) ([]models.Chain, error) { 35 | var chains []models.Chain 36 | if err := db.Find(&chains).Error; err != nil { 37 | return nil, err 38 | } 39 | return chains, nil 40 | } 41 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | services: 4 | postgres: 5 | restart: "unless-stopped" 6 | image: postgres:15-alpine 7 | stop_grace_period: 1m 8 | volumes: 9 | - /etc/localtime:/etc/localtime:ro 10 | - postgres:/var/lib/postgresql/data 11 | environment: 12 | - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} 13 | - POSTGRES_USER=${POSTGRES_USER} 14 | - POSTGRES_DB=${POSTGRES_DB} 15 | ports: 16 | - 5432:5432/tcp 17 | healthcheck: 18 | test: ["CMD", "nc", "-z", "-v", "localhost", "5432"] 19 | interval: 5s 20 | timeout: 5s 21 | retries: 5 22 | networks: 23 | default: 24 | aliases: 25 | - cosmos-indexer 26 | 27 | indexer: 28 | restart: "no" 29 | build: 30 | dockerfile: Dockerfile 31 | user: cosmos-indexer 32 | stop_grace_period: 10s 33 | depends_on: 34 | postgres: 35 | condition: service_healthy 36 | links: 37 | - postgres 38 | networks: 39 | default: 40 | aliases: 41 | - cosmos-indexer 42 | environment: 43 | - EXTENDED_ARGS="" 44 | - LOG_LEVEL="info" 45 | command: 46 | - /bin/sh 47 | - -c 48 | - | 49 | cosmos-indexer index \ 50 | --log.pretty=${PRETTY_LOG} \ 51 | --log.level=${LOG_LEVEL} \ 52 | --base.index-transactions=${INDEX_TRANSACTIONS} \ 53 | --base.index-block-events=${INDEX_BLOCK_EVENTS} \ 54 | --base.start-block=${START_BLOCK} \ 55 | --base.end-block=${END_BLOCK} \ 56 | --base.throttling=${THROTTLING} \ 57 | --base.rpc-workers=${RPC_WORKERS} \ 58 | --base.reindex=${REINDEX} \ 59 | --base.reattempt-failed-blocks=false \ 60 | --probe.rpc=${RPC_URL} \ 61 | --probe.account-prefix=${ACCOUNT_PREFIX} \ 62 | --probe.chain-id=${CHAIN_ID} \ 63 | --probe.chain-name=${CHAIN_NAME} \ 64 | --database.host=postgres \ 65 | --database.database=${POSTGRES_DB} \ 66 | --database.user=${POSTGRES_USER} \ 67 | --database.password=${POSTGRES_PASSWORD} \ 68 | ${EXTENDED_ARGS} 69 | 70 | volumes: 71 | postgres: 72 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Cosmos Indexer Docs 2 | 3 | The documentation found here is in-repo documentation intended for use in documenting and explaining workflows. 4 | 5 | * [Quickstart](./quickstart.md) - Get up and running with the indexer quickly 6 | * [Usage](./usage/README.md) - Basic and advanced usage of the indexer 7 | * [Reference](./reference/README.md) - Reference documentation on how the indexer works and how to use the codebase as an SDK 8 | 9 | You may find more information in the [Cosmos Indexer Wiki](https://github.com/DefiantLabs/cosmos-indexer/wiki). 10 | -------------------------------------------------------------------------------- /docs/quickstart.md: -------------------------------------------------------------------------------- 1 | # Quickstart 2 | 3 | This guide will help you get up and running with the Cosmos Indexer quickly. The Cosmos Indexer is a tool for indexing and querying data from Cosmos SDK based blockchains. 4 | 5 | ## Installation 6 | 7 | Download the latest release from the [Releases](https://github.com/DefiantLabs/cosmos-indexer/releases) page. 8 | 9 | ## Configuration 10 | 11 | The Cosmos Indexer uses a `.toml` configuration file to set up the indexer. The configuration file is used to set up the database connection, the chain configuration, and the indexer configuration. 12 | 13 | 1. Take the configuration file example (config.toml.example) from the root of the repository and copy it to a new file named `config.toml`. 14 | 2. Edit the `config.toml` file to match your database connection and chain configuration. 15 | 3. Save the `config.toml` file in a location that the indexer can access. 16 | 17 | ## Running the Indexer 18 | 19 | The indexer can be run using the following command: 20 | 21 | ```bash 22 | cosmos-indexer index --config /path/to/config.toml 23 | ``` 24 | 25 | The indexer will start and begin indexing blocks from the chain. The indexer will continue to run based on the configuration values passed in for start and end blocks. 26 | 27 | ## Important Configuration Values 28 | 29 | The following configuration values are important to understand when setting up the indexer: 30 | 31 | - `database.*` - The database connection configuration 32 | - `probe.*` - The probe configuration, used to determine the RPC connection to the chain 33 | - `base.start-block` - The block to start indexing from 34 | - `base.end-block` - The block to end indexing at 35 | - `base.index-transactions` - Whether to index transactions 36 | - `base.index-block-events` - Whether to index block events 37 | -------------------------------------------------------------------------------- /docs/reference/README.md: -------------------------------------------------------------------------------- 1 | # Reference 2 | 3 | This sections provides reference documentation on how the codebase works. It also provides documentation on how to use the `cosmos-indexer` codebase as an SDK to build a custom indexer. 4 | 5 | ## Application Workflow 6 | 7 | * [`index` Command](./application_workflow/index_command.md) - The main command that starts the application built using the [cobra](https://cobra.dev/) framework 8 | * [Application Workflow](./application_workflow/application_workflow.md) - The multi-processing workflow used by the application 9 | 10 | ## Default Data Indexing 11 | 12 | The application indexes data into a default shape. The following sections provide details on the datasets that are pulled from the blockchain and the default data indexing: 13 | 14 | * [Block Indexed Data](./default_data_indexing/block_indexed_data.md) - The shape of the data for blocks and how the application indexes it 15 | * [Block Events Indexed Data](./default_data_indexing/block_events_indexed_data.md) - The shape of the data for block events and how the application indexes it 16 | * [Transactions Indexed Data](./default_data_indexing/transactions_indexed_data.md) - The shape of the data for transactions and how the application indexes it 17 | 18 | ## Custom Data Indexing 19 | 20 | The application allows for custom data indexing by providing developer access to the underlying types used by the indexer. The following sections provide details on how to use the `cosmos-indexer` codebase as an SDK to build a custom indexer: 21 | 22 | * [Indexer Type](./custom_data_indexing/indexer_type.md) - The main controller for indexer behavior and how to modify it 23 | * [Indexer SDK and Custom Parsers](./custom_data_indexing/indexer_sdk_and_custom_parsers.md) - Reference documentation on custom parsers and how to register them 24 | * [Walkthrough](./custom_data_indexing/custom_indexer_walkthrough.md) - A walkthrough of a real world example of creating a custom indexer 25 | * [Examples](./custom_data_indexing/custom_indexer_examples.md) - An explanation of the examples provided in the codebase [examples](https://github.com/DefiantLabs/cosmos-indexer/tree/main/examples) directory 26 | 27 | ## Custom Cosmos Module Extensions 28 | 29 | The application allows for extending the supported transaction message types by providing developer access to the underlying types used by the indexer. This allows developers to bring in custom cosmos modules into the indexer, either through the usage of custom AppModuleBasic implementations with chain-specific message types or through the usage of registering custom message types in the indexer. 30 | 31 | Depending on certain factors, such as the version of the Cosmos SDK the custom chain module is based on, developers may need to implement custom message types to be able to decode the transaction messages found on the chain. 32 | 33 | The following sections provide details on how to use the `cosmos-indexer` codebase as an SDK to extend the supported transaction message types: 34 | 35 | * [Custom Message Type Registration](./custom_cosmos_module_extensions/custom_message_type_registration.md) - Reference documentation on how to register custom message types in the indexer 36 | * [Cosmos Indexer Modules](./custom_cosmos_module_extensions/cosmos_indexer_modules.md) - Reference documentation on the strategy for modules provided by the `cosmos-indexer-modules` package for extending the supported transaction message types 37 | * [Probe Codec Walkthrough](./custom_cosmos_module_extensions/probe_codec_walkthrough.md) - Reference documentation on the probe package and its codec for decoding JSON RPC responses and their Protobuf encoded Transaction Messages 38 | -------------------------------------------------------------------------------- /docs/reference/application_workflow/application_workflow.md: -------------------------------------------------------------------------------- 1 | # Application Workflow 2 | 3 | The application uses a multi-processing workflow to achieve the following: 4 | 5 | 1. Splitting of concerns along different data processing pipelines 6 | 2. Easier to follow application execution 7 | 8 | ## System Workflow Diagram 9 | 10 | ![](images/workflow.png) 11 | 12 | ## Block Enqueue 13 | 14 | The Block Enqueue worker's only responsibility is to write blocks for processing along a block height channel. Creating this as a separate worker has the following intentions: 15 | 16 | 1. Handling complex logic for what blocks to enqueue - allows for more fine-grained control over which blocks get indexed 17 | 2. Managing the rate of block enqueue - allows throttling the entire application to slow down block processing if needed 18 | 19 | The block enqueue functionality is currently customizeable in a number of ways. There are built-in block enqueue functions that are driven by configuration options passed in the command line or config files. However, block enqueue is entirely overwriteable with new functionality for custom block enqueue functions. 20 | 21 | ## RPC Workers 22 | 23 | The RPC Workers are responsible with gathering raw data from the RPC nodes based on the current application configuration requirements. The application allows configuring a number of RPC Workers in parallel, which will allow the data for multiple blocks to gathered at the same time. Isolating RPC requests to this set of workers has the following intentions: 24 | 25 | 1. External network requests are the biggest application bottleneck, running them concurrently on a number of blocks at once can reduce this pressure 26 | 2. The number of concurrent workers can be increased/decreased based on how many requests the application should be making at the same time 27 | 3. Gathering of raw data in one location for later parsing 28 | 29 | ## Parser Worker 30 | 31 | The parser worker is responsible for taking the raw, on-chain data from the RPC Workers and transforming it into application-specific types. This is used in particular to transform the raw RPC data into the database types for later database indexing. The parser worker handles database-specific data transform requirements. It also handles filtering mechanisms for reducing the size of the dataset for indexing based on configuration requirements. 32 | 33 | ## DB Worker 34 | 35 | The database worker is responsible for inserting the parsed application types into the database. It is responsible for building up the data associations according to the data schema defined by the application. 36 | -------------------------------------------------------------------------------- /docs/reference/application_workflow/index_command.md: -------------------------------------------------------------------------------- 1 | # Index Command 2 | 3 | The `index` command is the main command that starts the indexer workflow. It is built using the [cobra](https://cobra.dev/) framework. 4 | 5 | The command can be configured using CLI flags or a passed in `.toml` configuration file. See the [Configuration](../usage/configuration.md) documentation for more information on how to configure the indexer. 6 | 7 | The command has the following workflow, implemented through the `root` parent command and the `index` child command: 8 | 9 | 1. The program is started with the first command being `index` 10 | 2. Cobra initialization functions run first 11 | 1. The configuration file parser function is called by the cobra `OnInitialize` function 12 | 2. The `index` command's PreRunE function is called, which calls the `setupIndex` function in the `cmd/index.go` file 13 | 1. This function is responsible for loading configuration values, validating the configuration values and database initializing connections 14 | 3. The `index` command's Run function is called, which calls the `index` function in the `cmd/index.go` file 15 | 1. This function is responsible for starting the indexer workflow, see the [Indexer Workflow](application_workflow.md) documentation for more information 16 | -------------------------------------------------------------------------------- /docs/reference/custom_cosmos_module_extensions/cosmos_indexer_modules.md: -------------------------------------------------------------------------------- 1 | # Cosmos Indexer Modules 2 | 3 | The [`cosmos-indexer-modules`](https://github.com/DefiantLabs/cosmos-indexer-modules) package provides a set of modules that extend the supported transaction message types in the Cosmos Indexer. These modules are used to extend the supported transaction message types in the Cosmos Indexer by providing custom message types for Cosmos SDK modules that are not part of the base Cosmos SDK. 4 | 5 | ## Strategy 6 | 7 | The `cosmos-indexer-modules` package provides a set of packages that allow access to a type URL mapping for custom message types. These mappings can be used to register custom message types with the codec in the Cosmos Indexer. 8 | 9 | The types defined in the subpackages must fit the Cosmos SDK `Msg` interface, which is used to define the transaction message types in the Cosmos SDK. These types are protobuf messages that are used to define the transaction messages that are sent to the blockchain and returned in the blockchain responses. 10 | 11 | The `cosmos-indexer-modules` package includes full `Msg` implementations for various (and growing) Cosmos SDK modules. This is achieved by generating the protobuf message types for the modules and implementing the `Msg` interface for each message type. These are then provided in a module-specific type URL mapping that can be used to register the custom message types with the codec in the Cosmos Indexer. 12 | 13 | ## Usage 14 | 15 | The following shows usage of how one of the `cosmos-indexer-modules` packages can be used to extend the supported transaction message types in the Cosmos Indexer. The `cosmos-indexer-modules` package contains a [`block-sdk`](https://github.com/DefiantLabs/cosmos-indexer-modules/tree/main/block-sdk) subpackage that provides a set of custom message types for the [Skip MEV `blocksdk`](https://github.com/skip-mev/block-sdk) module. 16 | 17 | The `block-sdk` package defines a [`GetBlockSDKTypeMap` function](https://github.com/DefiantLabs/cosmos-indexer-modules/blob/main/block-sdk/msg_types.go#L17-L26) that returns a map of type URLs to the custom message types for the `blocksdk` module. This map can be used to register the custom message types with the codec in the Cosmos Indexer. The underlying types have been generated using protobuf definitions for the `blocksdk` module. 18 | 19 | These can be passed to the `RegisterCustomMsgTypesByTypeURLs` method in the `Indexer` type in the Cosmos Indexer to register the custom message types with the codec. This allows the Cosmos Indexer to decode and encode the custom message types to Go types at runtime. 20 | 21 | ```go 22 | package main 23 | 24 | import ( 25 | "log" 26 | 27 | blockSDKModules "github.com/DefiantLabs/cosmos-indexer-modules/block-sdk" 28 | "github.com/DefiantLabs/cosmos-indexer/cmd" 29 | ) 30 | 31 | func main() { 32 | indexer := cmd.GetBuiltinIndexer() 33 | 34 | indexer.RegisterCustomMsgTypesByTypeURLs(blockSDKModules.GetBlockSDKTypeMap()) 35 | 36 | err := cmd.Execute() 37 | if err != nil { 38 | log.Fatalf("Failed to execute. Err: %v", err) 39 | } 40 | } 41 | ``` 42 | 43 | By providing the custom message types for the `blocksdk` module, the Cosmos Indexer can now decode custom message types to Go types at runtime. This allows the Cosmos Indexer to extend the supported transaction message types and index the custom message types for the `blocksdk` module. 44 | 45 | This example can be found in the `cosmos-indexer-modules` [examples/block-sdk-indexer](https://github.com/DefiantLabs/cosmos-indexer/tree/d020840f44775bf1680765867d54338592ac3caa/examples/block-sdk-indexer) codebase. This example also provides an example `filter.json` file for indexing only the `blocksdk` module messages, which conforms to the filter file creation requirements documented in the [Filtering](../../usage/filtering.md) doc. 46 | -------------------------------------------------------------------------------- /docs/reference/custom_cosmos_module_extensions/custom_message_type_registration.md: -------------------------------------------------------------------------------- 1 | # Custom Message Type Registration 2 | 3 | The Cosmos Indexer comes with a built-in codec that is used to decode JSON RPC responses and their Protobuf encoded transaction messages. The codec is used to decode the response into Go types where appropriate. By default, the codec is set up to handle the base Cosmos SDK modules. 4 | 5 | However, the codec provides a way to register custom message types with the codec, allowing for decoding and encoding of custom message types to Go types at runtime by type URL. 6 | 7 | The two main ways to register custom message types with the codec are: 8 | 9 | 1. Using the Cosmos SDK `AppModuleBasics` interface to register an entire Cosmos SDK module with the codec by providing the module's `AppModuleBasic` implementation to the `Indexer` type before application execution 10 | 2. Using the custom message type URL tied to an underlying type to register custom message types with the codec 11 | 12 | These methods are described in detail below. 13 | 14 | ## Module Registration using AppModuleBasics 15 | 16 | In normal usage of the Cosmos SDK, message types are registered with the codec using the `AppModuleBasics` interface. The `RegisterInterfaces` method of the `module.BasicManager` interface is used to register custom message types with the codec. This is how the base Cosmos SDK modules are provided in the `probe` package and are used by the Indexer by default. 17 | 18 | This is done by: 19 | 20 | 1. Pulling the base Cosmos SDK modules into the `probe` client package and providing them in the `DefaultModuleBasics` variable, as can be seen in the [probe client package config.go file](https://github.com/DefiantLabs/probe/blob/main/client/config.go#L30-L31) 21 | 2. During `probe` client creation, using these module basics to register the base Cosmos SDK modules with the `probe` `ChainClientConfig` type, as can be seen in the [cosmos-indexer probe probe.go file](https://github.com/DefiantLabs/cosmos-indexer/blob/main/probe/probe.go#L26-L27) 22 | 3. These module basics then have thier module-specific interfaces registered with the codec during `probe` client creation, as can be seen in the [probe client encoding.go file](https://github.com/DefiantLabs/probe/blob/main/client/encoding.go#L30) `MakeCodec` function. 23 | 24 | The list of AppModuleBasics registered to the probe client can be extended to include new modules. The `Indexer` type provides a `RegisterCustomModuleBasics` in the [indexer package types.go file](https://github.com/DefiantLabs/cosmos-indexer/blob/main/indexer/registration.go#L14-L16) method that registers custom module basics with the indexer. This provides the `probe` client with the ability to register module-specific message types with the codec. 25 | 26 | The main difficulty with this approach is that it requires the developer to have access to the module's `AppModuleBasic` implementation. This is not always possible, especially when dealing with custom modules that are not part of the base Cosmos SDK. For example, the following list of reasons, amongst others, may prevent the developer from using the `AppModuleBasic` interface: 27 | 28 | 1. The module is not part of the base Cosmos SDK and does not use the exact version of the Cosmos SDK that the `cosmos-indexer` package is built on. 29 | 2. The module is not open source and the developer does not have access to the module's `AppModuleBasic` implementation for registration. 30 | 31 | In these cases, the developer can use the custom message type URL registration method instead. 32 | 33 | ## Custom Message Type Registration using Type URL 34 | 35 | This is useful for extending the supported transaction message types in the Cosmos Indexer. For instance, the `RegisterCustomTypeURL` function in the [client codec types package interface_registry.go file](https://github.com/DefiantLabs/probe/blob/main/client/codec/types/interface_registry.go) can be used to register custom message types with the codec. 36 | 37 | This is exactly how the Cosmos Indexer extends the supported transaction message types. The `Indexer` provides a [`RegisterCustomMsgTypesByTypeURLs`]((https://github.com/DefiantLabs/cosmos-indexer/blob/main/indexer/registration.go#L18-L19)) method that registers custom message types with the indexer. During application setup, custom message types are registered with the `probe` package codec during `ChainClient` creation. This process is handled in the setup in the following manner: 38 | 39 | 1. During the setup of the Indexer in the [cosmos-indexer/cmd package index.go file](https://github.com/DefiantLabs/cosmos-indexer/blob/main/cmd/index.go#L200-L201), the `GetProbeClient` function is called with the registered custom message types. 40 | 2. The `GetProbeClient` function in the [cosmos-indexer/probe package probe.go file](https://github.com/DefiantLabs/cosmos-indexer/blob/main/probe/probe.go#L10) creates a `ChainClientConfig` with the custom message types registered 41 | 3. The `ChainClientConfig` is passed to the `NewChainClient` function in the [probe/client package client.go file](https://github.com/DefiantLabs/probe/blob/main/client/client.go#L28) 42 | 4. The `ChainClient` is created with the custom message types registered with the codec during the `MakeCodec` function in the [probe client encoding.go file](https://github.com/DefiantLabs/probe/blob/main/client/encoding.go#L30) `MakeCodec` function. 43 | -------------------------------------------------------------------------------- /docs/reference/custom_cosmos_module_extensions/probe_codec_walkthrough.md: -------------------------------------------------------------------------------- 1 | # Probe Codec Walkthrough 2 | 3 | The Cosmos Indexer uses the [probe](https://github.com/DefiantLabs/probe) package for Cosmos SDK protobuf codec management, RPC client generation, and blockchain RPC data querying/processing. 4 | 5 | ## Cosmos SDK Codec Management 6 | 7 | The Cosmos SDK uses Protobuf for encoding and decoding transaction messages, as well as other blockchain data returned in RPC responses. The `probe` package provides a codec for decoding JSON RPC responses and their Protobuf encoded transaction messages. Types are registered with the codec to allow for decoding and encoding of Protobuf messages to Go types at runtime. 8 | 9 | This allows data to be passed from the blockchain to the `probe` package in a format that can be decoded into Go types, allowing for easy processing and indexing of blockchain data. 10 | 11 | ## RPC Client Generation 12 | 13 | The `probe` package also provides a client generator that can be used to generate a client for a specific blockchain. The client provides methods for querying the blockchain for data, such as blocks, transactions, and events. 14 | 15 | The `ChainClient` type defined in the [client package client.go file](https://github.com/DefiantLabs/probe/blob/main/client/client.go) is used to generate the client for a specific blockchain. The client is generated using the `NewChainClient` function, which takes a `ChainClientConfig` type as an argument that contains the configuration for the client, such as RPC endpoint, chain ID and others. 16 | 17 | ## Blockchain RPC Data Querying/Processing 18 | 19 | The `ChainClient` type is attached to a `Query` type defined in the [query package query.go file](https://github.com/DefiantLabs/probe/blob/main/query/query.go) that provides methods for querying the blockchain for data. During JSON RPC response decoding, the codec is used to decode the response into Go types where appropriate. 20 | 21 | ## Probe Interface Registry 22 | 23 | The `probe` package provides an interface registry that allows for registering custom message types with the codec. This registry provides methods for registering custom message types with the codec, allowing for decoding and encoding of custom message types to Go types at runtime by type URL. 24 | 25 | There are two main ways to register custom message types with the codec, using the Cosmos SDK `AppModuleBasics` interface to register an entire Cosmos SDK module with the codec, or by using the custom message type URL tied to an underlying type. See the [Custom Message Type Registration](./custom_message_type_registration.md) docs for more details. 26 | -------------------------------------------------------------------------------- /docs/reference/custom_data_indexing/custom_indexer_examples.md: -------------------------------------------------------------------------------- 1 | # Custom Indexer Examples 2 | 3 | The `cosmos-indexer` codebase provides a number of examples in the [examples](https://github.com/DefiantLabs/cosmos-indexer/tree/main/examples) directory. These examples are intended to provide a starting point for building custom indexers using the `cosmos-indexer` codebase as an Indexer SDK. 4 | 5 | ## IBC Patterns Example 6 | 7 | The IBC Patterns example demonstrates how to build a custom indexer that indexes IBC packets and acknowledgements. This example indexer is the subject of the [Custom Indexer Walkthrough](./custom_indexer_walkthrough.md) documentation, see that document for a detailed explanation of how it works. 8 | 9 | ## Governance Patterns Example 10 | 11 | The Governance Patterns example demonstrates how to build a custom indexer that indexes governance proposals and votes. 12 | 13 | It takes message data from the `cosmos-sdk/x/gov` module and indexes it into a database. The example indexer listens for `MsgSubmitProposal` and `MsgVote` messages and indexes them into a custom model. 14 | 15 | The example also implements a filter mechanism to filter out message types that are not of interest to this indexer. This significantly reduces the amount of data that needs to be indexed. 16 | 17 | ## Validator Delegations Patterns Example 18 | 19 | The Validator Delegations Patterns example demonstrates how to build a custom indexer that indexes validator delegations and undelegations. 20 | 21 | It takes message data from the `cosmos-sdk/x/staking` module and indexes it into a database. The example indexer listens for `MsgDelegate` and `MsgUndelegate` messages and indexes them into a custom model. 22 | 23 | The example also implements a filter mechanism to filter out message types that are not of interest to this indexer. This significantly reduces the amount of data that needs to be indexed. 24 | -------------------------------------------------------------------------------- /docs/reference/custom_data_indexing/indexer_sdk_and_custom_parsers.md: -------------------------------------------------------------------------------- 1 | # Indexer SDK and Custom Parsers 2 | 3 | The `cosmos-indexer` relies on the `Indexer` type from the `indexer` package to control the behavior of the indexer. The `Indexer` type is instantiated in the `index` command and is available to the application through a getter function in the `cmd` package. 4 | 5 | ## Getter 6 | 7 | The `cmd` package provides a `GetBuiltinIndexer() *indexerPackage.Indexer` function that returns the `Indexer` instance. This allows for certain overrides or calling functions available on the instance. 8 | 9 | ```go 10 | indexer := cmd.GetBuiltinIndexer() 11 | ``` 12 | 13 | Certain changes made to the indexer type will be persisted when calling the `index` command 14 | 15 | ## Custom Type Registration 16 | 17 | The `Indexer` type provides registration functions that will modify the behavior of the indexer. The following registration functions are available on the `Indexer` type in the [registration.go file](https://github.com/DefiantLabs/cosmos-indexer/blob/30f689fc4914f41cb5b7599a9e6ef730d71a7c3d/indexer/registration.go) in the `indexer` package: 18 | 19 | 1. `RegisterCustomModuleBasics` - Registers custom module basics for the chain, used for injecting custom Cosmos SDK modules into the Codec for the chain to allow RPC parsing of custom module transaction messages 20 | 2. `RegisterMessageTypeFilter` - Registers a message type filter for the chain, used for filtering out transaction messages that should not be indexed. Allows SDK access to the UX-provided message type filter described in the [filtering](../usage/filtering.md) documentation 21 | 3. `RegisterCustomModels` - Registers custom models into the application's database schema. These will be migrated into the database when the application starts. Used for custom data storage. 22 | 4. `RegisterCustomBeginBlockEventParser` - Registers a custom begin block event parser for the chain, used for parsing custom begin block events into custom data types 23 | 5. `RegisterCustomEndBlockEventParser` - Registers a custom end block event parser for the chain, used for parsing custom end block events into custom data types 24 | 6. `RegisterCustomMessageParser` - Registers a custom message parser for the chain, used for parsing custom transaction messages into custom data types 25 | 26 | When these functions are called before the `index` command is executed, the custom behavior will be persisted in the indexer instance. During the application workflow, the indexer will call custom parsers during data processing and database insertion steps. 27 | 28 | ## Custom Parser Interfaces 29 | 30 | The `cosmos-indexer` application provides interfaces for custom parsers to implement. These interfaces are used by the indexer to call custom parsing functions during the indexing workflow. You can find the definitions of the interfaces in the [parsers package](https://github.com/DefiantLabs/cosmos-indexer/tree/main/parsers).There are 2 types of custom parser interfaces available in the application: 31 | 32 | 1. `BlockEventParser` - Used for parsing block events into custom data types 33 | 2. `MessageParser` - Used for parsing transaction messages into custom data types 34 | 35 | These are highly generalized interfaces with a reliance on type wrappers and Go `any` types to transport the parsed dataset along the workflow. 36 | 37 | SDK developer users should implement these interfaces in their custom parsers to ensure that the indexer can call the custom parsing functions during the indexing workflow. 38 | 39 | Each of the custom parser registration functions in the `Indexer` type will take a custom parser that implements one of these interfaces and a unique identifier. The custom parser will be called during the indexing workflow to parse the data into custom data types and insert it into the database. 40 | -------------------------------------------------------------------------------- /docs/reference/custom_data_indexing/indexer_type.md: -------------------------------------------------------------------------------- 1 | # Indexer Type 2 | 3 | The `Indexer` type is the main controller for the indexer behavior. It is responsible for managing the indexer workflow and the underlying components that make up the indexer. 4 | 5 | For full implementation details, see the [indexer package](https://github.com/DefiantLabs/cosmos-indexer/tree/main/indexer) 6 | 7 | ## Indexer Type and `index` Command Instantiation 8 | 9 | The `Indexer` type contains the following notable elements: 10 | 11 | 1. A database connection 12 | 2. A chain RPC client from the Probe [client package](https://github.com/DefiantLabs/probe/tree/main/client) 13 | 3. A Block Enqueue function that handles passing block heights to be indexed to the processors 14 | 4. Filter configurations according to the chain's [filter](../usage/filtering.md) configuration 15 | 5. Custom Parser types for block events and transaction messages 16 | 17 | The `index` command in the application instantiates the `Indexer` type at application runtime and ensures that it is properly configured before starting the indexing workflow. 18 | 19 | There are some built-in behaviors that should not be overriden in the `Indexer` type. These are handled by the setup function called in the `index` command. Examples are: 20 | 21 | 1. Database connection based on the configuration 22 | 2. Chain RPC client based on the configuration 23 | 24 | However, behavior of the `Indexer` type that can be modified will be noted here. 25 | 26 | ## Getting the Indexer Instance 27 | 28 | The `Indexer` type is instantiated in the `index` command and is available to the application as a `cmd` package global variable. 29 | 30 | The `cmd` package provides a `GetBuiltinIndexer() *indexerPackage.Indexer` function that returns this instance, allowing for overrides or calling functions available on the instance. 31 | 32 | ## Block Enqueue Function - Design and Modification 33 | 34 | The Block Enqueue function is responsible for passing blocks to be indexed to the processors. It is the main entrypoint of the indexer workflow. Its only responsibility is to determine the next block height to be processed and pass it to the processors. 35 | 36 | For this reason, the `Indexer` type allows for overriding the default Block Enqueue functions based on developer requirements. This can be done by modifying the `EnqueueBlock` function in the `Indexer` instance that is available to the `index` command. 37 | 38 | The `BlockEnqueueFunction` function signature is as follows: 39 | 40 | ```go 41 | func(chan *core.EnqueueData) error 42 | ``` 43 | 44 | The function takes a channel of `core.EnqueueData` and returns an error. The `core.EnqueueData` type is a struct that contains the block height to be indexed and what data should be pulled from the RPC node during RPC requests. 45 | 46 | ```go 47 | type EnqueueData struct { 48 | Height int64 49 | IndexBlockEvents bool 50 | IndexTransactions bool 51 | } 52 | ``` 53 | 54 | The `Height` field is the block height to be indexed. The `IndexBlockEvents` and `IndexTransactions` fields are flags that determine if block events and transactions should be indexed for the block. 55 | 56 | At runtime, the `BlockEnqueueFunction` is called with a channel of `core.EnqueueData` that is used to pass blocks to be indexed by the processors. 57 | 58 | This allows for various developer overrides of which blocks should be processed during the indexing workflow. 59 | 60 | For examples of in-application block enqueue functions see the [core package block_enqueue.go](https://github.com/DefiantLabs/cosmos-indexer/blob/main/core/block_enqueue.go) file. The functions in this package return closures that define highly customized block enqueue functions. These are the built-in block enqueue functions that can be triggered by various configuration variables. 61 | 62 | ## DB Instance - Gorm Database Connection to PostgreSQL and Modification 63 | 64 | The application relies on the [Gorm](https://gorm.io/docs/) ORM library for interacting with the underlying PostgreSQL database. The `Indexer` type contains a `DB` field that is a pointer to the Gorm database connection. 65 | 66 | During `index` command setup, the application will connect to the database based on the passed in configuration. The `DB` field is then set on the `Indexer` instance. 67 | 68 | However, if a customized Gorm instance is desired, the application will respect `DB` field overrides on the `Indexer` instance if it is not `nil` before setup runs. 69 | 70 | ## Chain RPC Client - Probe Client Connection 71 | 72 | The application relies on the Probe [client package](https://github.com/DefiantLabs/probe/tree/main/client) for interacting with the chain's RPC node. The `Indexer` type contains a `Client` field that is a pointer to the Probe client. 73 | 74 | The client package provides functionality that uses built-in Cosmos SDK functionality to make requests to the chain's RPC for raw blockchain data. 75 | 76 | ## Post Indexing Workflow 77 | 78 | The `Indexer` type contains a `PostIndexCustomMessage` function that is called after the indexing workflow is complete. This function is responsible for any post-processing that should be done after the indexer has finished indexing blocks. 79 | 80 | This function can be overridden by the developer to add custom post-processing logic. Leaving the function nil will result in no post-processing being done. 81 | 82 | The dataset passed to the function is a `core.PostIndexData` struct that contains the following datasets: 83 | 84 | 1. The Indexer Config 85 | 2. The DB connection 86 | 3. Whether the indexer is in Dry Run mode 87 | 4. The entire indexed dataset, including custom message parser datasets (if they were filled out during single indexing, they will contain their respective data) 88 | 5. Trackers for the custom message parsers by Identifier 89 | -------------------------------------------------------------------------------- /docs/reference/default_data_indexing/block_events_indexed_data.md: -------------------------------------------------------------------------------- 1 | # Block Events Indexed Data 2 | 3 | The application indexes Block BeginBlocker and EndBlocker events into a well-structured data shape. In this section, you will find an overview of what Block BeginBlocker and EndBlocker events are and how they are indexed by the application. 4 | 5 | ## Anatomy of a Block, BeginBlock and EndBlock Events 6 | 7 | ### Blocks 8 | 9 | In Cosmos, every block has the following workflow (amongst other execution steps): 10 | 11 | 1. BeginBlocker - Execution of application-specific logic before transactions execute 12 | 2. Transaction Execution 13 | 3. EndBlocker - Execution of application-specific logic after transactions execute 14 | 15 | The execution of the application-specific logic in BeginBlocker and EndBlocker steps results in events being emitted that are found in the block results dataset like so: 16 | 17 | ```json 18 | { 19 | "result": { 20 | "begin_block_events": [...] 21 | "end_block_events": [...] 22 | } 23 | } 24 | ``` 25 | 26 | These events can contain very useful information that can be used by indexers to track application execution. 27 | 28 | ### Block Events 29 | 30 | Block events have the following data shape: 31 | 32 | ```json 33 | { 34 | "type": "", 35 | "attributes": [ 36 | { 37 | "key": "", 38 | "value": "", 39 | "index": true 40 | }, 41 | ... 42 | ] 43 | } 44 | ``` 45 | 46 | Each block event has a type, indicated by the top level `type` field. They also have any number of attributes which contain the arbitrary key/value pairs of data points associated with the event. 47 | 48 | For a concrete example of a block event: 49 | 50 | ``` 51 | { 52 | "type": "coin_received", 53 | "attributes": [ 54 | { 55 | "key": "receiver", 56 | "value": "cosmos1m3h30wlvsf8llruxtpukdvsy0km2kum8g38c8q", 57 | "index": true 58 | }, 59 | { 60 | "key": "amount", 61 | "value": "8987882uatom", 62 | "index": true 63 | } 64 | ] 65 | } 66 | ``` 67 | 68 | ### Block Event Windows 69 | 70 | It is important to keep the following in mind: 71 | 72 | 1. A single execution of an action in a block can emit multiple events 73 | 2. Block events are stored in an array in the block results dataset 74 | 75 | This means that a single action may be found to require associating multiple events in a row to figure out exactly what happened. For instance, the following set of events seem to be associated to the same action (taken from an actual block on Cosmoshub - `19,744,082`): 76 | 77 | ``` 78 | { 79 | "type": "coin_received", 80 | "attributes": [ 81 | { 82 | "key": "receiver", 83 | "value": "cosmos1m3h30wlvsf8llruxtpukdvsy0km2kum8g38c8q", 84 | "index": true 85 | }, 86 | { 87 | "key": "amount", 88 | "value": "8987882uatom", 89 | "index": true 90 | } 91 | ] 92 | }, 93 | { 94 | "type": "coinbase", 95 | "attributes": [ 96 | { 97 | "key": "minter", 98 | "value": "cosmos1m3h30wlvsf8llruxtpukdvsy0km2kum8g38c8q", 99 | "index": true 100 | }, 101 | { 102 | "key": "amount", 103 | "value": "8987882uatom", 104 | "index": true 105 | } 106 | ] 107 | }, 108 | { 109 | "type": "coin_spent", 110 | "attributes": [ 111 | { 112 | "key": "spender", 113 | "value": "cosmos1m3h30wlvsf8llruxtpukdvsy0km2kum8g38c8q", 114 | "index": true 115 | }, 116 | { 117 | "key": "amount", 118 | "value": "8987882uatom", 119 | "index": true 120 | } 121 | ] 122 | } 123 | ``` 124 | 125 | The application applies the term **Block Event Windows** to this concept. 126 | 127 | ## Indexing Block Events 128 | 129 | The application indexes block events into a well-structured data shape. For implementation details, see the [block.go](ttps://github.com/DefiantLabs/cosmos-indexer/blob/main/db/models/block.go) file in the models package. 130 | 131 | The indexed dataset has the following general overview: 132 | 133 | 1. Block BeginBlocker events are indexed per Block 134 | 2. Block EndBlocker events are indexed per Block 135 | 3. Block Events are indexed per Lifecycle Position (BeginBlocker or EndBlocker) 136 | 4. Block Event Attributes are indexed per Block Event 137 | 138 | See the below database diagram for complete details on how the data is structured and what relationships exist between the different entities. 139 | 140 | ![Block Events Indexed Data Diagram](images/block-events-db.png) 141 | -------------------------------------------------------------------------------- /docs/reference/default_data_indexing/block_indexed_data.md: -------------------------------------------------------------------------------- 1 | # Block Indexed Data 2 | 3 | The application indexes block data into a well-structured data shape. In this section, you will find an overview of what blocks are and how they are indexed by the application. 4 | 5 | ## Anatomy of a Block 6 | 7 | In Cosmos, every block contains a standard set of data that may be of interest to indexers. The block data shape that is of interest to the current iteration of Cosmos Indexer is as follows: 8 | 9 | ```json 10 | { 11 | "header": { 12 | "height": "", 13 | "time": "", 14 | "proposer_address": "
" 15 | }, 16 | ... 17 | } 18 | ``` 19 | 20 | Each block has a header that contains the block height, the time the block was committed, and the address of the block proposer. This data is useful for tracking the progress of the blockchain and the actors that are participating in the network. 21 | 22 | ## Indexing Block Data 23 | 24 | ### Block Model Use Case 25 | 26 | The Block database model is used in 2 general ways: 27 | 28 | 1. Indexing the block data from the blockchain 29 | 2. Tracking the workflow of the application as it indexes blocks 30 | 31 | For this reason, the block model contains a few, small complexities that enable the application to track the progress of the blockchain and the application's workflow in one location. See the [block.go](ttps://github.com/DefiantLabs/cosmos-indexer/blob/main/db/models/block.go) for more information. 32 | 33 | ### Chain Model Extension 34 | 35 | For maximum extensibility, the application has included a Chain model. This model is referenced in the Block model and is used to indicate which Chain the block is for. 36 | 37 | This allows the indexer to track multiple chains in one database and to query blocks by chain. 38 | 39 | ### Block Model Dataset 40 | 41 | The indexed dataset has the following general overview: 42 | 43 | 1. Block data is indexed with the following data: 44 | - `height`: The height of the block 45 | - `time`: The time the block was committed 46 | - `proposer_address`: The address of the block proposer 47 | 2. Application Block processing workflow is tracked with the following data: 48 | - `tx_indexed`: A boolean indicating if the block has been indexed for transactions 49 | - `block_events_indexed`: A boolean indicating if the block has been indexed for events 50 | 51 | See the below database diagram for complete details on how the data is structured and what relationships exist between the different entities. 52 | 53 | ![Block Indexed Data Diagram](images/block-db.png) 54 | -------------------------------------------------------------------------------- /docs/reference/default_data_indexing/transactions_indexed_data.md: -------------------------------------------------------------------------------- 1 | # Transactions Indexed Data 2 | 3 | The application indexes Transactions and the Messages that are executed in them into a well-structured data shape. In this section, you will find an overview of what Transactions and Messages are and how they are indexed by the application. 4 | 5 | ## Anatomy of a Transaction and its Messages 6 | 7 | ### Transactions 8 | 9 | In Cosmos, every block has a list of transactions that are executed. Each transaction has any number of messages attached that define the actions that are executed in the transaction. 10 | 11 | When transactions for a block are requested through RPC, the returned dataset has the following shape (from the GetTxsEvent RPC service endpoint): 12 | 13 | ```json 14 | { 15 | "txs": [ 16 | "body": { 17 | "messages": [ 18 | { 19 | "type_url": "", 20 | "value": "" 21 | }, 22 | ... 23 | ] 24 | } 25 | ], 26 | "tx_responses": [ 27 | { 28 | "code": "", 29 | "logs": [], 30 | }, 31 | ... 32 | ] 33 | } 34 | ``` 35 | 36 | Each item in the `txs` array is a transaction that was executed in the block. Each transaction has a `body` field that contains the messages that were executed in the transaction. 37 | 38 | The `tx_responses` array contains the response data for each transaction. The `code` field contains the response code for the transaction and the `logs` field contains the event logs that were emitted during the transaction execution. 39 | 40 | ### Transaction Messages 41 | 42 | Transaction messages have the following data shape: 43 | 44 | ```json 45 | { 46 | "type_url": "", 47 | "value": "" 48 | } 49 | ``` 50 | 51 | Each message has a `type_url` field that indicates the type of message that was executed. The `value` field contains the protobuf encoded message, which contains message-specific data. 52 | 53 | ### Message Events and Attributes 54 | 55 | When a message is executed, it emits events that are logged in the transaction response. Each event has a set of attributes that provide additional context about the event. 56 | 57 | The event data shape is as follows: 58 | 59 | ```json 60 | { 61 | "type": "", 62 | "attributes": [ 63 | { 64 | "key": "", 65 | "value": "" 66 | }, 67 | ... 68 | ] 69 | } 70 | ``` 71 | 72 | ## Indexing Transactions and Messages 73 | 74 | The application indexes transactions and their messages into a well-structured data shape. For implementation details, see the [transactions.go](https://github.com/DefiantLabs/cosmos-indexer/blob/main/db/models/tx.go) file in the models package. 75 | 76 | The indexed dataset has the following general overview: 77 | 78 | 1. Transactions are indexed per Block 79 | 1. Transaction Fees are indexed per Transaction 80 | 2. Transaction Signers are indexed per Transaction 81 | 2. Messages are indexed per Transaction 82 | 1. Each message is indexed with the following data: 83 | - `type_url`: The type of message that was executed 84 | - `value`: The protobuf encoded message 85 | 3. Message Events are indexed per Message 86 | 4. Message Event Attributes are indexed per Message Event 87 | 88 | See the below database diagram for complete details on how the data is structured and what relationships exist between the different entities. 89 | 90 | ![Transactions Indexed Data Diagram](images/tx-db.png) 91 | -------------------------------------------------------------------------------- /docs/reference/images/block-db.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bryanlabs/cosmos-indexer-sdk/8d95bd817714fad49404b47eebe4d5dccf59dff7/docs/reference/images/block-db.png -------------------------------------------------------------------------------- /docs/reference/images/block-events-db.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bryanlabs/cosmos-indexer-sdk/8d95bd817714fad49404b47eebe4d5dccf59dff7/docs/reference/images/block-events-db.png -------------------------------------------------------------------------------- /docs/reference/images/tx-db.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bryanlabs/cosmos-indexer-sdk/8d95bd817714fad49404b47eebe4d5dccf59dff7/docs/reference/images/tx-db.png -------------------------------------------------------------------------------- /docs/reference/images/workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bryanlabs/cosmos-indexer-sdk/8d95bd817714fad49404b47eebe4d5dccf59dff7/docs/reference/images/workflow.png -------------------------------------------------------------------------------- /docs/usage/README.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | This section provides documentation on basic indexer usage. It will provide a high-level introduction. 4 | 5 | * [Installation](installation.md) - How to get the application installed into your environment 6 | * [Configuration](configuration.md) - How to best configure the application to suit your needs 7 | * [Indexing](indexing.md) - How to spin up the indexer 8 | * [Filtering](filtering.md) - How to reduce the size of the indexed dataset to fit your requirements 9 | -------------------------------------------------------------------------------- /docs/usage/configuration.md: -------------------------------------------------------------------------------- 1 | # Configuration 2 | 3 | The application provides extensive configuration options to the `index` command that modify the behavior of the application. 4 | 5 | You can run `cosmos-indexer index --help` to get a full list of flag usages, or read on here for a detailed explanation of all the flags. 6 | 7 | ## Config 8 | 9 | All of the following configuration flags can be created in a `.toml` config file and passed to the application that way. See [config.toml.example](https://github.com/DefiantLabs/cosmos-indexer/blob/main/config.toml.example) for an example that will require further setup. 10 | 11 | - **Configuration File** 12 | - Description: config file location. 13 | - Flag: `--config` 14 | - Default Value: `""` 15 | - Note: default is `/config.toml` 16 | 17 | ## Base Settings - Main 18 | 19 | The main base settings are the most important to understand and set. 20 | 21 | - **Start Block** 22 | - Description: Block to start indexing at. 23 | - Flag: `--base.start-block` 24 | - Default Value: `0` 25 | - Note: Use `-1` to resume from the highest block indexed. 26 | 27 | - **End Block** 28 | - Description: Block to stop indexing at. 29 | - Flag: `--base.end-block` 30 | - Default Value: `-1` 31 | - Note: Use `-1` to index indefinitely. 32 | 33 | - **Block Input File** 34 | - Description: A file location containing a JSON list of block heights to index. This flag will override start and end block flags. 35 | - Flag: `--base.block-input-file` 36 | - Default Value: `""` 37 | 38 | - **Reindex** 39 | - Description: If true, this will re-attempt to index blocks that have already been indexed. 40 | - Flag: `--base.reindex` 41 | - Default Value: `false` 42 | 43 | - **Reattempt Failed Blocks** 44 | - Description: Re-enqueue failed blocks for reattempts at startup. 45 | - Flag: `--base.reattempt-failed-blocks` 46 | - Default Value: `false` 47 | 48 | - **Reindex Message Type** 49 | - Description: A Cosmos message type URL. When set, the block enqueue method will reindex all blocks between start and end block that contain this message type. 50 | - Flag: `--base.reindex-message-type` 51 | - Default Value: `""` 52 | 53 | - **Block Enqueue Throttle Delay** 54 | - Description: Block enqueue throttle delay. 55 | - Flag: `--base.throttling` 56 | - Default Value: `0.5` 57 | 58 | ## Base Indexing 59 | 60 | These flags indicate what will be indexed during the main indexing loop. 61 | 62 | - **Transaction Indexing Enabled** 63 | - Description: Enable transaction indexing. 64 | - Flag: `--base.index-transactions` 65 | - Default Value: `false` 66 | 67 | - **Block Event Indexing Enabled** 68 | - Description: Enable block beginblocker and endblocker event indexing. 69 | - Flag: `--base.index-block-events` 70 | - Default Value: `false` 71 | 72 | ## Filter Configurations 73 | 74 | - **Filter File** 75 | - Description: Path to a file containing a JSON config of block event and message type filters to apply to beginblocker events, endblocker events, and TX messages. See [Filtering](./filtering.md) for how to create filters. 76 | - Flag: `--base.filter-file` 77 | - Default Value: `""` 78 | 79 | ## Other Base Settings 80 | 81 | - **Dry** 82 | - Description: Index the chain but don't insert data in the DB. 83 | - Flag: `--base.dry` 84 | - Default Value: `false` 85 | 86 | - **RPC Workers** 87 | - Description: The number of concurrent RPC request workers to spin up. 88 | - Flag: `--base.rpc-workers` 89 | - Default Value: `1` 90 | 91 | - **Wait For Chain** 92 | - Description: Wait for chain to be in sync. 93 | - Flag: `--base.wait-for-chain` 94 | - Default Value: `false` 95 | 96 | - **Wait For Chain Delay** 97 | - Description: Seconds to wait between each check for the node to catch up to the chain. 98 | - Flag: `--base.wait-for-chain-delay` 99 | - Default Value: `10` 100 | 101 | - **Block Timer** 102 | - Description: Print out how long it takes to process this many blocks. 103 | - Flag: `--base.block-timer` 104 | - Default Value: `10000` 105 | 106 | - **Exit When Caught Up** 107 | - Description: Gets the latest block at runtime and exits when this block has been reached. 108 | - Flag: `--base.exit-when-caught-up` 109 | - Default Value: `false` 110 | 111 | - **Request Retry Attempts** 112 | - Description: Number of RPC query retries to make. 113 | - Flag: `--base.request-retry-attempts` 114 | - Default Value: `0` 115 | 116 | - **Request Retry Max Wait** 117 | - Description: Max retry incremental backoff wait time in seconds. 118 | - Flag: `--base.request-retry-max-wait` 119 | - Default Value: `30` 120 | 121 | ## Flags 122 | 123 | Extended flags that modify how the indexer handles parsed datasets. 124 | 125 | - **Index Tx Message Raw** 126 | - Description: If true, this will index the raw message bytes. This will significantly increase the size of the database. 127 | - Flag: `--flags.index-tx-message-raw` 128 | - Default Value: `false` 129 | 130 | - **Block Events Base64 Encoded** 131 | - Description: If true, decode the block event attributes and keys as base64. Some versions of CometBFT encode the block event attributes and keys as base64 in the response from RPC. 132 | - Flag: `--flags.block-events-base64-encoded` 133 | - Default Value: `false` 134 | 135 | ### Logging Configuration 136 | 137 | - **Log Level** 138 | - Description: Log level. 139 | - Flag: `--log.level` 140 | - Default Value: `info` 141 | 142 | - **Pretty Logs** 143 | - Description: Enable pretty logs. 144 | - Flag: `--log.pretty` 145 | - Default Value: `false` 146 | 147 | - **Log Path** 148 | - Description: Log path. Default is `$HOME/.cosmos-indexer/logs.txt`. 149 | - Flag: `--log.path` 150 | - Default Value: `""` 151 | 152 | ### Database Configuration 153 | 154 | - **Database Host** 155 | - Description: Database host. 156 | - Flag: `--database.host` 157 | - Default Value: `""` 158 | 159 | - **Database Port** 160 | - Description: Database port. 161 | - Flag: `--database.port` 162 | - Default Value: `5432` 163 | 164 | - **Database Name** 165 | - Description: Database name. 166 | - Flag: `--database.database` 167 | - Default Value: `""` 168 | 169 | - **Database User** 170 | - Description: Database user. 171 | - Flag: `--database.user` 172 | - Default Value: `""` 173 | 174 | - **Database Password** 175 | - Description: Database password. 176 | - Flag: `--database.password` 177 | - Default Value: `""` 178 | 179 | - **Database Log Level** 180 | - Description: Database log level. 181 | - Flag: `--database.log-level` 182 | - Default Value: `""` 183 | 184 | ### Probe Configuration 185 | 186 | These flags modify the behavior of the usage of the [probe](https://github.com/DefiantLabs/probe) package, which is the main way the application uses to get data from the RPC server. 187 | 188 | - **Node RPC Endpoint** 189 | - Description: Node RPC endpoint. 190 | - Flag: `--probe.rpc` 191 | - Default Value: `""` 192 | 193 | - **Probe Account Prefix** 194 | - Description: Probe account prefix. 195 | - Flag: `--probe.account-prefix` 196 | - Default Value: `""` 197 | 198 | - **Probe Chain ID** 199 | - Description: Probe chain ID. 200 | - Flag: `--probe.chain-id` 201 | - Default Value: `""` 202 | 203 | - **Probe Chain Name** 204 | - Description: Probe chain name. 205 | - Flag: `--probe.chain-name` 206 | - Default Value: `""` 207 | -------------------------------------------------------------------------------- /docs/usage/indexing.md: -------------------------------------------------------------------------------- 1 | # Indexing 2 | 3 | The indexer is built as a single binary to be run against and RPC server to retrieve block and transaction data. This section describes how to run the indexer and what to expect from its behavior. 4 | 5 | ## Running the indexer 6 | 7 | ### Basic Usage 8 | 9 | The most basic way to run the indexer is like so: 10 | 11 | `cosmos-indexer index` 12 | 13 | The application will do the following: 14 | 15 | 1. Look for a config file in the current working directory and use the flags specified in that file (see [Configuration](./configuration.md)) for details 16 | 2. Connect to the database 17 | 3. Begin the block enqueue process 18 | 4. As blocks are enqueued: 19 | 1. Blocks are picked up by RPC request workers 20 | 2. RPC request workers get the data from the blockchain 21 | 3. A processing worker picks up the RPC data and turns it into app-specific data types 22 | 4. App specific data types are picked up by a database worker and inserted into the database 23 | 24 | ### Providing Flags 25 | 26 | Flags can be passed to the indexer on the CLI or through a configuration `.toml` file. Either: 27 | 28 | 1. Provide a `config.toml` file in CWD or at a path specified with the `--config` flag that defines all required flags 29 | 2. Specify all flags that you want to override the default values for at the CLI 30 | 31 | Indexer CLI flags are scoped to a generalized structure using `.` syntax to improve clarity. For example, base level flags are specified at the `base` scope like `base.start-block` for the block to start indexing from. 32 | 33 | **Note**: CLI Flags will take precedence over flags provided in the config file. 34 | 35 | ### Docker and docker-compose 36 | 37 | The application provides a basic, light-weight Dockerfile and docker-compose setup for using the application. 38 | 39 | After building the application docker configurations as details in [Installation](./installation.md), the application can be run in the following manner: 40 | 41 | Docker: 42 | ```bash 43 | docker run -it cosmos-indexer index 44 | ``` 45 | 46 | docker-compose: 47 | 48 | 1. Fill out the `.env.example` file and change its name to `.env` (or provide the environment variables according to the [Docker Compose docs](https://docs.docker.com/compose/environment-variables/set-environment-variables/#use-the-environment-attribute)). 49 | 2. Bring up the docker-compose: 50 | ``` 51 | docker-compose up 52 | ``` 53 | 54 | ## Advanced Usage 55 | 56 | The application behavior can be changed in different ways based on flags and provided configurations. Some examples follow: 57 | 58 | ### Block List File 59 | 60 | A set of specific blocks can be indexed explicitly by providing a block input file. 61 | 62 | 1. Create a file of block heights like so: 63 | * block-heights.json: 64 | 65 | ```json 66 | [ 67 | 1, 68 | 2, 69 | 3 70 | ] 71 | ``` 72 | 2. Provide the block input file to the application with the `--base.block-input-file` flag 73 | ``` 74 | cosmos-indexer index --config="" --base.block-input-file="block-heights.json" 75 | ``` 76 | 77 | All flags specific to which blocks to index will be ignored and the indexer will only index the blocks in the file and then exit. 78 | 79 | ### Message Type Reindexing 80 | 81 | It can be useful to enqueue blocks that have a specific message type in the transactions of the block. This behavior has been built directly into a flag. 82 | 83 | Start the indexer with the message type URL passed to the `--base.reindex-message-type` flag, e.g.: 84 | 85 | ``` 86 | cosmos-indexer index --config="" --base.reindex-message-type="/cosmos.bank.v1beta1.MsgSend" 87 | ``` 88 | 89 | The indexer will do the following: 90 | 91 | 1. Find all blocks in the database that have Transactions that contain the specified message type 92 | 2. Pass these blocks through the block enqueue process to the indexer workflow 93 | 3. Reindex all data for the blocks found 94 | 95 | ### Indexer Application SDK - Customized Indexing Parsers and Datasets 96 | 97 | Advanced users/golang application developers may wish to extend the application to fit their app-specific needs beyond the built-in use-cases presented by the base application. To support this, the cosmos-indexer developers have developed ways to inject custom parsers and models into the application workflow by extending the golang application into a new binary. 98 | 99 | The application provides extensive customization methods to insert custom behavior into the main indexing loop, such as: 100 | 101 | 1. Registering custom models 102 | 2. Turning block events or Transaction Messages into custom models 103 | 3. Inserting custom models into the application 104 | 105 | For examples see the [examples/](https://github.com/DefiantLabs/cosmos-indexer/tree/main/examples) subfolder in the repository. 106 | 107 | For reference documentation on how to customize the application code to fit your needs, see the [reference](../reference/README.md) documentation. 108 | -------------------------------------------------------------------------------- /docs/usage/installation.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | The indexer provides multiple installation methods based on preference. 4 | 5 | ## Releases 6 | 7 | Visit the [Releases](https://github.com/DefiantLabs/cosmos-indexer/releases) page to keep up to date with the latest releases. 8 | 9 | ## Building from Source 10 | 11 | You may install the application by building from source. 12 | 13 | Prerequisites: 14 | 15 | 1. make 16 | 2. Go 1.19+ 17 | 3. The repository downloaded 18 | 19 | 20 | From the root of the codebase run: 21 | 22 | ``` 23 | make install 24 | ``` 25 | 26 | Run the following to ensure the installation is available: 27 | 28 | ``` 29 | cosmos-indexer --help 30 | ``` 31 | 32 | ## Dockerfile 33 | 34 | The root of the codebase provides a Dockerfile for building a light-weight image that contains the installation of the indexer. 35 | 36 | Prerequisites: 37 | 38 | 1. Docker 39 | 2. (Optional) docker-compose 40 | 41 | 42 | From the root of the codebase run: 43 | 44 | ``` 45 | docker build -t cosmos-indexer . 46 | ``` 47 | 48 | Run the following to ensure the docker build was successful: 49 | 50 | ``` 51 | docker run -it cosmos-indexer cosmos-indexer --help 52 | ``` 53 | 54 | Optionally, the repo has provided a generalized `docker-compose.yaml` file that makes use of: 55 | 56 | 1. A PostgreSQL container with a volume for the database 57 | 2. An indexer service that uses the repo Dockerfile and a set of required environment variables at runtime 58 | 59 | From the root of the codebase run: 60 | 61 | ``` 62 | docker-compose build 63 | ``` 64 | -------------------------------------------------------------------------------- /examples/block-sdk-indexer/.gitignore: -------------------------------------------------------------------------------- 1 | terra-classic-indexer 2 | -------------------------------------------------------------------------------- /examples/block-sdk-indexer/demo.sql: -------------------------------------------------------------------------------- 1 | SELECT b.height, tx.hash, met.message_type from txes tx 2 | JOIN messages me on me.tx_id = tx.id 3 | JOIN blocks b on b.id = tx.block_id 4 | JOIN message_types met on met.id = me.message_type_id; 5 | -------------------------------------------------------------------------------- /examples/block-sdk-indexer/filter.json: -------------------------------------------------------------------------------- 1 | { 2 | "message_type_filters": [ 3 | { 4 | "type": "message_type_regex", 5 | "message_type_regex": ".*osmosis.*", 6 | "should_ignore": true 7 | }, 8 | { 9 | "type": "message_type_regex", 10 | "message_type_regex": ".*cosmos.*", 11 | "should_ignore": true 12 | }, 13 | { 14 | "type": "message_type", 15 | "message_type": "/sdk.auction.v1.MsgAuctionBid" 16 | } 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /examples/block-sdk-indexer/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | 6 | blockSDKModules "github.com/DefiantLabs/cosmos-indexer-modules/block-sdk" 7 | "github.com/DefiantLabs/cosmos-indexer/cmd" 8 | ) 9 | 10 | func main() { 11 | indexer := cmd.GetBuiltinIndexer() 12 | 13 | indexer.RegisterCustomMsgTypesByTypeURLs(blockSDKModules.GetBlockSDKTypeMap()) 14 | 15 | err := cmd.Execute() 16 | if err != nil { 17 | log.Fatalf("Failed to execute. Err: %v", err) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /examples/gov-voting-patterns/demo-indexed-txes.sql: -------------------------------------------------------------------------------- 1 | SELECT b.height, tx.hash, met.message_type from txes tx 2 | JOIN messages me on me.tx_id = tx.id 3 | JOIN blocks b on b.id = tx.block_id 4 | JOIN message_types met on met.id = me.message_type_id; 5 | -------------------------------------------------------------------------------- /examples/gov-voting-patterns/demo.sql: -------------------------------------------------------------------------------- 1 | select b.height, a.address, p.proposal_id, CASE 2 | WHEN vo.option = 1 THEN 'yes' 3 | WHEN vo.option = 2 THEN 'abstain' 4 | WHEN vo.option = 3 THEN 'no' 5 | WHEN vo.option = 4 THEN 'veto' 6 | ELSE 'empty' END as vote 7 | FROM votes vo 8 | JOIN messages me on me.id = vo.msg_id 9 | JOIN proposals p on p.id = vo.proposal_id 10 | JOIN txes tx on tx.id = me.tx_id 11 | JOIN blocks b on b.id = tx.block_id 12 | JOIN message_types met on met.id = me.message_type_id 13 | JOIN addresses a on vo.address_id = a.id; 14 | -------------------------------------------------------------------------------- /examples/terra-classic-indexer/.gitignore: -------------------------------------------------------------------------------- 1 | terra-classic-indexer 2 | -------------------------------------------------------------------------------- /examples/terra-classic-indexer/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | 6 | terraModules "github.com/DefiantLabs/cosmos-indexer-modules/terra-classic" 7 | "github.com/DefiantLabs/cosmos-indexer/cmd" 8 | ) 9 | 10 | func main() { 11 | indexer := cmd.GetBuiltinIndexer() 12 | 13 | indexer.RegisterCustomMsgTypesByTypeURLs(terraModules.GetTerraClassicTypeMap()) 14 | 15 | err := cmd.Execute() 16 | if err != nil { 17 | log.Fatalf("Failed to execute. Err: %v", err) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /filter/block_event_filter_registry.go: -------------------------------------------------------------------------------- 1 | package filter 2 | 3 | type StaticBlockEventFilterRegistry struct { 4 | BlockEventFilters []BlockEventFilter 5 | RollingWindowEventFilters []RollingWindowBlockEventFilter 6 | } 7 | 8 | func (r *StaticBlockEventFilterRegistry) RegisterBlockEventFilter(filter BlockEventFilter) { 9 | r.BlockEventFilters = append(r.BlockEventFilters, filter) 10 | } 11 | 12 | func (r *StaticBlockEventFilterRegistry) RegisterRollingWindowBlockEventFilter(filter RollingWindowBlockEventFilter) { 13 | r.RollingWindowEventFilters = append(r.RollingWindowEventFilters, filter) 14 | } 15 | 16 | func (r *StaticBlockEventFilterRegistry) NumFilters() int { 17 | return len(r.BlockEventFilters) + len(r.RollingWindowEventFilters) 18 | } 19 | -------------------------------------------------------------------------------- /filter/message_filter.go: -------------------------------------------------------------------------------- 1 | package filter 2 | 3 | import ( 4 | "github.com/DefiantLabs/cosmos-indexer/cosmos/modules/tx" 5 | "github.com/cosmos/cosmos-sdk/types" 6 | ) 7 | 8 | type MessageFilter interface { 9 | ShouldIndex(types.Msg, tx.LogMessage) bool 10 | } 11 | -------------------------------------------------------------------------------- /filter/message_type_filters.go: -------------------------------------------------------------------------------- 1 | package filter 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "regexp" 7 | ) 8 | 9 | type MessageTypeFilter interface { 10 | MessageTypeMatches(MessageTypeData) (bool, error) 11 | Ignore() bool 12 | Valid() (bool, error) 13 | } 14 | 15 | type MessageTypeData struct { 16 | MessageType string 17 | } 18 | 19 | type DefaultMessageTypeFilter struct { 20 | MessageType string `json:"message_type"` 21 | } 22 | 23 | type MessageTypeRegexFilter struct { 24 | MessageTypeRegexPattern string `json:"message_type_regex"` 25 | messageTypeRegex *regexp.Regexp 26 | ShouldIgnore bool `json:"should_ignore"` 27 | } 28 | 29 | func (f DefaultMessageTypeFilter) MessageTypeMatches(messageTypeData MessageTypeData) (bool, error) { 30 | return messageTypeData.MessageType == f.MessageType, nil 31 | } 32 | 33 | func (f MessageTypeRegexFilter) MessageTypeMatches(messageTypeData MessageTypeData) (bool, error) { 34 | return f.messageTypeRegex.MatchString(messageTypeData.MessageType), nil 35 | } 36 | 37 | func (f DefaultMessageTypeFilter) Ignore() bool { 38 | return false 39 | } 40 | 41 | func (f DefaultMessageTypeFilter) Valid() (bool, error) { 42 | if f.MessageType != "" { 43 | return true, nil 44 | } 45 | 46 | return false, errors.New("MessageType must be set") 47 | } 48 | 49 | func (f MessageTypeRegexFilter) Valid() (bool, error) { 50 | if f.messageTypeRegex != nil && f.MessageTypeRegexPattern != "" { 51 | return true, nil 52 | } 53 | 54 | return false, errors.New("MessageTypeRegexPattern must be set") 55 | } 56 | 57 | func (f MessageTypeRegexFilter) Ignore() bool { 58 | return f.ShouldIgnore 59 | } 60 | 61 | func NewRegexMessageTypeFilter(messageTypeRegexPattern string, shouldIgnore bool) (MessageTypeRegexFilter, error) { 62 | messageTypeRegex, err := regexp.Compile(messageTypeRegexPattern) 63 | if err != nil { 64 | return MessageTypeRegexFilter{}, fmt.Errorf("error compiling message type regex: %s", err) 65 | } 66 | 67 | return MessageTypeRegexFilter{ 68 | MessageTypeRegexPattern: messageTypeRegexPattern, 69 | messageTypeRegex: messageTypeRegex, 70 | ShouldIgnore: shouldIgnore, 71 | }, nil 72 | } 73 | -------------------------------------------------------------------------------- /filter/static_block_event_filters.go: -------------------------------------------------------------------------------- 1 | package filter 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "regexp" 7 | 8 | "github.com/DefiantLabs/cosmos-indexer/db/models" 9 | ) 10 | 11 | type EventData struct { 12 | Event models.BlockEvent 13 | Attributes []models.BlockEventAttribute 14 | } 15 | 16 | type BlockEventFilter interface { 17 | EventMatches(EventData) (bool, error) 18 | IncludeMatch() bool 19 | Valid() (bool, error) 20 | } 21 | 22 | type DefaultBlockEventTypeFilter struct { 23 | EventType string `json:"event_type"` 24 | Inclusive bool `json:"inclusive"` 25 | } 26 | 27 | func (f DefaultBlockEventTypeFilter) EventMatches(eventData EventData) (bool, error) { 28 | return eventData.Event.BlockEventType.Type == f.EventType, nil 29 | } 30 | 31 | func (f DefaultBlockEventTypeFilter) IncludeMatch() bool { 32 | return f.Inclusive 33 | } 34 | 35 | func (f DefaultBlockEventTypeFilter) Valid() (bool, error) { 36 | if f.EventType != "" { 37 | return true, nil 38 | } 39 | 40 | return false, errors.New("EventType must be set") 41 | } 42 | 43 | type RegexBlockEventTypeFilter struct { 44 | EventTypeRegexPattern string `json:"event_type_regex"` 45 | eventTypeRegex *regexp.Regexp 46 | Inclusive bool `json:"inclusive"` 47 | } 48 | 49 | func (f RegexBlockEventTypeFilter) EventMatches(eventData EventData) (bool, error) { 50 | return f.eventTypeRegex.MatchString(eventData.Event.BlockEventType.Type), nil 51 | } 52 | 53 | func (f RegexBlockEventTypeFilter) IncludeMatch() bool { 54 | return f.Inclusive 55 | } 56 | 57 | func (f RegexBlockEventTypeFilter) Valid() (bool, error) { 58 | if f.eventTypeRegex != nil && f.EventTypeRegexPattern != "" { 59 | return true, nil 60 | } 61 | 62 | return false, errors.New("EventTypeRegexPattern must be set") 63 | } 64 | 65 | type DefaultBlockEventTypeAndAttributeValueFilter struct { 66 | EventType string `json:"event_type"` 67 | AttributeKey string `json:"attribute_key"` 68 | AttributeValue string `json:"attribute_value"` 69 | Inclusive bool `json:"inclusive"` 70 | } 71 | 72 | func (f DefaultBlockEventTypeAndAttributeValueFilter) EventMatches(eventData EventData) (bool, error) { 73 | if eventData.Event.BlockEventType.Type != f.EventType { 74 | return false, nil 75 | } 76 | 77 | for _, attr := range eventData.Attributes { 78 | if attr.BlockEventAttributeKey.Key == f.AttributeKey && attr.Value == f.AttributeValue { 79 | return true, nil 80 | } 81 | } 82 | 83 | return false, nil 84 | } 85 | 86 | func (f DefaultBlockEventTypeAndAttributeValueFilter) IncludeMatch() bool { 87 | return f.Inclusive 88 | } 89 | 90 | func (f DefaultBlockEventTypeAndAttributeValueFilter) Valid() (bool, error) { 91 | if f.EventType != "" && f.AttributeKey != "" && f.AttributeValue != "" { 92 | return true, nil 93 | } 94 | 95 | return false, errors.New("EventType, AttributeKey and AttributeValue must be set") 96 | } 97 | 98 | type RollingWindowBlockEventFilter interface { 99 | EventsMatch([]EventData) (bool, error) 100 | RollingWindowLength() int 101 | IncludeMatches() bool 102 | Valid() (bool, error) 103 | } 104 | 105 | type DefaultRollingWindowBlockEventFilter struct { 106 | EventPatterns []BlockEventFilter 107 | includeMatches bool 108 | } 109 | 110 | func (f DefaultRollingWindowBlockEventFilter) EventsMatch(eventData []EventData) (bool, error) { 111 | if len(eventData) < f.RollingWindowLength() { 112 | return false, nil 113 | } 114 | 115 | for i, pattern := range f.EventPatterns { 116 | patternMatches, err := pattern.EventMatches(eventData[i]) 117 | if !patternMatches || err != nil { 118 | return false, err 119 | } 120 | } 121 | 122 | return true, nil 123 | } 124 | 125 | func (f DefaultRollingWindowBlockEventFilter) IncludeMatches() bool { 126 | return f.includeMatches 127 | } 128 | 129 | func (f DefaultRollingWindowBlockEventFilter) RollingWindowLength() int { 130 | return len(f.EventPatterns) 131 | } 132 | 133 | func (f DefaultRollingWindowBlockEventFilter) Valid() (bool, error) { 134 | if len(f.EventPatterns) == 0 { 135 | return false, errors.New("eventPatterns must be set") 136 | } 137 | 138 | for index, pattern := range f.EventPatterns { 139 | valid, err := pattern.Valid() 140 | if !valid || err != nil { 141 | return false, fmt.Errorf("error parsing eventPatterns at index %d: %s", index, err) 142 | } 143 | } 144 | 145 | return true, nil 146 | } 147 | 148 | func NewDefaultBlockEventTypeFilter(eventType string, inclusive bool) BlockEventFilter { 149 | return &DefaultBlockEventTypeFilter{EventType: eventType, Inclusive: inclusive} 150 | } 151 | 152 | func NewDefaultBlockEventTypeAndAttributeValueFilter(eventType string, attributeKey string, attributeValue string, inclusive bool) BlockEventFilter { 153 | return &DefaultBlockEventTypeAndAttributeValueFilter{EventType: eventType, AttributeKey: attributeKey, AttributeValue: attributeValue, Inclusive: inclusive} 154 | } 155 | 156 | func NewRegexBlockEventFilter(eventTypeRegex string, inclusive bool) (BlockEventFilter, error) { 157 | re, err := regexp.Compile(eventTypeRegex) 158 | if err != nil { 159 | return nil, err 160 | } 161 | return &RegexBlockEventTypeFilter{EventTypeRegexPattern: eventTypeRegex, eventTypeRegex: re, Inclusive: inclusive}, nil 162 | } 163 | 164 | func NewDefaultRollingWindowBlockEventFilter(eventPatterns []BlockEventFilter, includeMatches bool) RollingWindowBlockEventFilter { 165 | return &DefaultRollingWindowBlockEventFilter{EventPatterns: eventPatterns, includeMatches: includeMatches} 166 | } 167 | -------------------------------------------------------------------------------- /indexer/db.go: -------------------------------------------------------------------------------- 1 | package indexer 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "time" 7 | 8 | "github.com/DefiantLabs/cosmos-indexer/config" 9 | dbTypes "github.com/DefiantLabs/cosmos-indexer/db" 10 | ) 11 | 12 | // doDBUpdates will read the data out of the db data chan that had been processed by the workers 13 | // if this is a dry run, we will simply empty the channel and track progress 14 | // otherwise we will index the data in the DB. 15 | // it will also read rewars data and index that. 16 | func (indexer *Indexer) DoDBUpdates(wg *sync.WaitGroup, txDataChan chan *DBData, blockEventsDataChan chan *BlockEventsDBData, dbChainID uint) { 17 | blocksProcessed := 0 18 | dbWrites := 0 19 | dbReattempts := 0 20 | timeStart := time.Now() 21 | defer wg.Done() 22 | 23 | for { 24 | // break out of loop once all channels are fully consumed 25 | if txDataChan == nil && blockEventsDataChan == nil { 26 | config.Log.Info("DB updates complete") 27 | break 28 | } 29 | 30 | select { 31 | // read tx data from the data chan 32 | case data, ok := <-txDataChan: 33 | if !ok { 34 | txDataChan = nil 35 | continue 36 | } 37 | dbWrites++ 38 | // While debugging we'll sometimes want to turn off INSERTS to the DB 39 | // Note that this does not turn off certain reads or DB connections. 40 | indexedBlock := data.block 41 | indexedDataset := data.txDBWrappers 42 | 43 | if !indexer.DryRun { 44 | var err error 45 | config.Log.Info(fmt.Sprintf("Indexing %v TXs from block %d", len(data.txDBWrappers), data.block.Height)) 46 | indexedBlock, indexedDataset, err = dbTypes.IndexNewBlock(indexer.DB, data.block, data.txDBWrappers, *indexer.Config) 47 | if err != nil { 48 | // Do a single reattempt on failure 49 | dbReattempts++ 50 | _, _, err = dbTypes.IndexNewBlock(indexer.DB, data.block, data.txDBWrappers, *indexer.Config) 51 | if err != nil { 52 | config.Log.Fatal(fmt.Sprintf("Error indexing block %v.", data.block.Height), err) 53 | } 54 | } 55 | 56 | err = dbTypes.IndexCustomMessages(*indexer.Config, indexer.DB, indexer.DryRun, indexedDataset, indexer.CustomMessageParserTrackers) 57 | if err != nil { 58 | config.Log.Fatal(fmt.Sprintf("Error indexing custom messages for block %d", data.block.Height), err) 59 | } 60 | 61 | config.Log.Info(fmt.Sprintf("Finished indexing %v TXs from block %d", len(data.txDBWrappers), data.block.Height)) 62 | } else { 63 | config.Log.Info(fmt.Sprintf("Processing block %d (dry run, block data will not be stored in DB).", data.block.Height)) 64 | } 65 | 66 | if indexer.PostIndexCustomMessageFunction != nil { 67 | config.Log.Info(fmt.Sprintf("Running PostIndexCustomMessageFunction for block %d", data.block.Height)) 68 | 69 | dataset := &PostIndexCustomMessageDataset{ 70 | Config: *indexer.Config, 71 | DB: indexer.DB, 72 | DryRun: indexer.DryRun, 73 | IndexedDataset: &indexedDataset, 74 | MessageParser: indexer.CustomMessageParserTrackers, 75 | IndexedBlock: indexedBlock, 76 | } 77 | 78 | err := indexer.PostIndexCustomMessageFunction(dataset) 79 | if err != nil { 80 | config.Log.Fatal(fmt.Sprintf("Error running PostIndexCustomMessageFunction for block %d", data.block.Height), err) 81 | } 82 | } 83 | 84 | // Just measuring how many blocks/second we can process 85 | if indexer.Config.Base.BlockTimer > 0 { 86 | blocksProcessed++ 87 | if blocksProcessed%int(indexer.Config.Base.BlockTimer) == 0 { 88 | totalTime := time.Since(timeStart) 89 | config.Log.Info(fmt.Sprintf("Processing %d blocks took %f seconds. %d total blocks have been processed.\n", indexer.Config.Base.BlockTimer, totalTime.Seconds(), blocksProcessed)) 90 | timeStart = time.Now() 91 | } 92 | if float64(dbReattempts)/float64(dbWrites) > .1 { 93 | config.Log.Fatalf("More than 10%% of the last %v DB writes have failed.", dbWrites) 94 | } 95 | } 96 | case eventData, ok := <-blockEventsDataChan: 97 | if !ok { 98 | blockEventsDataChan = nil 99 | continue 100 | } 101 | dbWrites++ 102 | numEvents := len(eventData.blockDBWrapper.BeginBlockEvents) + len(eventData.blockDBWrapper.EndBlockEvents) 103 | config.Log.Info(fmt.Sprintf("Indexing %v Block Events from block %d", numEvents, eventData.blockDBWrapper.Block.Height)) 104 | identifierLoggingString := fmt.Sprintf("block %d", eventData.blockDBWrapper.Block.Height) 105 | 106 | indexedDataset, err := dbTypes.IndexBlockEvents(indexer.DB, indexer.DryRun, eventData.blockDBWrapper, identifierLoggingString) 107 | if err != nil { 108 | config.Log.Fatal(fmt.Sprintf("Error indexing block events for %s.", identifierLoggingString), err) 109 | } 110 | 111 | err = dbTypes.IndexCustomBlockEvents(*indexer.Config, indexer.DB, indexer.DryRun, indexedDataset, identifierLoggingString, indexer.CustomBeginBlockParserTrackers, indexer.CustomEndBlockParserTrackers) 112 | if err != nil { 113 | config.Log.Fatal(fmt.Sprintf("Error indexing custom block events for %s.", identifierLoggingString), err) 114 | } 115 | 116 | config.Log.Info(fmt.Sprintf("Finished indexing %v Block Events from block %d", numEvents, eventData.blockDBWrapper.Block.Height)) 117 | } 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /indexer/process.go: -------------------------------------------------------------------------------- 1 | package indexer 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/DefiantLabs/cosmos-indexer/config" 7 | "github.com/DefiantLabs/cosmos-indexer/core" 8 | dbTypes "github.com/DefiantLabs/cosmos-indexer/db" 9 | ) 10 | 11 | // This function is responsible for processing raw RPC data into app-usable types. It handles both block events and transactions. 12 | // It parses each dataset according to the application configuration requirements and passes the data to the channels that handle the parsed data. 13 | func (indexer *Indexer) ProcessBlocks(wg *sync.WaitGroup, failedBlockHandler core.FailedBlockHandler, blockRPCWorkerChan chan core.IndexerBlockEventData, blockEventsDataChan chan *BlockEventsDBData, txDataChan chan *DBData, chainID uint, blockEventFilterRegistry BlockEventFilterRegistries) { 14 | defer close(blockEventsDataChan) 15 | defer close(txDataChan) 16 | defer wg.Done() 17 | 18 | for blockData := range blockRPCWorkerChan { 19 | currentHeight := blockData.BlockData.Block.Height 20 | config.Log.Infof("Parsing data for block %d", currentHeight) 21 | 22 | block, err := core.ProcessBlock(blockData.BlockData, blockData.BlockResultsData, chainID) 23 | if err != nil { 24 | config.Log.Error("ProcessBlock: unhandled error", err) 25 | failedBlockHandler(currentHeight, core.UnprocessableTxError, err) 26 | err := dbTypes.UpsertFailedBlock(indexer.DB, currentHeight, indexer.Config.Probe.ChainID, indexer.Config.Probe.ChainName) 27 | if err != nil { 28 | config.Log.Fatal("Failed to insert failed block", err) 29 | } 30 | continue 31 | } 32 | 33 | if blockData.IndexBlockEvents && !blockData.BlockEventRequestsFailed { 34 | config.Log.Info("Parsing block events") 35 | blockDBWrapper, err := core.ProcessRPCBlockResults(*indexer.Config, block, blockData.BlockResultsData, indexer.CustomBeginBlockEventParserRegistry, indexer.CustomEndBlockEventParserRegistry) 36 | if err != nil { 37 | config.Log.Errorf("Failed to process block events during block %d event processing, adding to failed block events table", currentHeight) 38 | failedBlockHandler(currentHeight, core.FailedBlockEventHandling, err) 39 | err := dbTypes.UpsertFailedEventBlock(indexer.DB, currentHeight, indexer.Config.Probe.ChainID, indexer.Config.Probe.ChainName) 40 | if err != nil { 41 | config.Log.Fatal("Failed to insert failed block event", err) 42 | } 43 | } else { 44 | config.Log.Infof("Finished parsing block event data for block %d", currentHeight) 45 | 46 | var beginBlockFilterError error 47 | var endBlockFilterError error 48 | if blockEventFilterRegistry.BeginBlockEventFilterRegistry != nil && blockEventFilterRegistry.BeginBlockEventFilterRegistry.NumFilters() > 0 { 49 | blockDBWrapper.BeginBlockEvents, beginBlockFilterError = core.FilterRPCBlockEvents(blockDBWrapper.BeginBlockEvents, *blockEventFilterRegistry.BeginBlockEventFilterRegistry) 50 | } 51 | 52 | if blockEventFilterRegistry.EndBlockEventFilterRegistry != nil && blockEventFilterRegistry.EndBlockEventFilterRegistry.NumFilters() > 0 { 53 | blockDBWrapper.EndBlockEvents, endBlockFilterError = core.FilterRPCBlockEvents(blockDBWrapper.EndBlockEvents, *blockEventFilterRegistry.EndBlockEventFilterRegistry) 54 | } 55 | 56 | if beginBlockFilterError == nil && endBlockFilterError == nil { 57 | blockEventsDataChan <- &BlockEventsDBData{ 58 | blockDBWrapper: blockDBWrapper, 59 | } 60 | } else { 61 | config.Log.Errorf("Failed to filter block events during block %d event processing, adding to failed block events table. Begin blocker filter error %s. End blocker filter error %s", currentHeight, beginBlockFilterError, endBlockFilterError) 62 | failedBlockHandler(currentHeight, core.FailedBlockEventHandling, err) 63 | err := dbTypes.UpsertFailedEventBlock(indexer.DB, currentHeight, indexer.Config.Probe.ChainID, indexer.Config.Probe.ChainName) 64 | if err != nil { 65 | config.Log.Fatal("Failed to insert failed block event", err) 66 | } 67 | } 68 | } 69 | } 70 | 71 | if blockData.IndexTransactions && !blockData.TxRequestsFailed { 72 | config.Log.Info("Parsing transactions") 73 | var txDBWrappers []dbTypes.TxDBWrapper 74 | var err error 75 | 76 | if blockData.GetTxsResponse != nil { 77 | config.Log.Debug("Processing TXs from RPC TX Search response") 78 | txDBWrappers, _, err = core.ProcessRPCTXs(indexer.Config, indexer.DB, indexer.ChainClient, indexer.MessageTypeFilters, indexer.MessageFilters, blockData.GetTxsResponse, indexer.CustomMessageParserRegistry) 79 | } else if blockData.BlockResultsData != nil { 80 | config.Log.Debug("Processing TXs from BlockResults search response") 81 | txDBWrappers, _, err = core.ProcessRPCBlockByHeightTXs(indexer.Config, indexer.DB, indexer.ChainClient, indexer.MessageTypeFilters, indexer.MessageFilters, blockData.BlockData, blockData.BlockResultsData, indexer.CustomMessageParserRegistry) 82 | } 83 | 84 | if err != nil { 85 | config.Log.Error("ProcessRpcTxs: unhandled error", err) 86 | failedBlockHandler(currentHeight, core.UnprocessableTxError, err) 87 | err := dbTypes.UpsertFailedBlock(indexer.DB, currentHeight, indexer.Config.Probe.ChainID, indexer.Config.Probe.ChainName) 88 | if err != nil { 89 | config.Log.Fatal("Failed to insert failed block", err) 90 | } 91 | } else { 92 | txDataChan <- &DBData{ 93 | txDBWrappers: txDBWrappers, 94 | block: block, 95 | } 96 | } 97 | 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /indexer/registration.go: -------------------------------------------------------------------------------- 1 | package indexer 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/DefiantLabs/cosmos-indexer/config" 7 | "github.com/DefiantLabs/cosmos-indexer/db/models" 8 | "github.com/DefiantLabs/cosmos-indexer/filter" 9 | "github.com/DefiantLabs/cosmos-indexer/parsers" 10 | sdkTypes "github.com/cosmos/cosmos-sdk/types" 11 | "github.com/cosmos/cosmos-sdk/types/module" 12 | ) 13 | 14 | func (indexer *Indexer) RegisterCustomModuleBasics(basics []module.AppModuleBasic) { 15 | indexer.CustomModuleBasics = append(indexer.CustomModuleBasics, basics...) 16 | } 17 | 18 | func (indexer *Indexer) RegisterCustomMsgTypesByTypeURLs(customMessageTypeURLSToTypes map[string]sdkTypes.Msg) error { 19 | if indexer.CustomMsgTypeRegistry == nil { 20 | indexer.CustomMsgTypeRegistry = make(map[string]sdkTypes.Msg) 21 | } 22 | 23 | for url, msg := range customMessageTypeURLSToTypes { 24 | if _, ok := indexer.CustomMsgTypeRegistry[url]; ok { 25 | return fmt.Errorf("found duplicate message type with URL \"%s\", message types must be uniquely identified", url) 26 | } 27 | indexer.CustomMsgTypeRegistry[url] = msg 28 | } 29 | 30 | return nil 31 | } 32 | 33 | func (indexer *Indexer) RegisterMessageTypeFilter(filter filter.MessageTypeFilter) { 34 | indexer.MessageTypeFilters = append(indexer.MessageTypeFilters, filter) 35 | } 36 | 37 | func (indexer *Indexer) RegisterMessageFilter(filter filter.MessageFilter) { 38 | indexer.MessageFilters = append(indexer.MessageFilters, filter) 39 | } 40 | 41 | func (indexer *Indexer) RegisterCustomModels(models []any) { 42 | indexer.CustomModels = append(indexer.CustomModels, models...) 43 | } 44 | 45 | func (indexer *Indexer) RegisterCustomBeginBlockEventParser(eventKey string, parser parsers.BlockEventParser) { 46 | var err error 47 | indexer.CustomBeginBlockEventParserRegistry, indexer.CustomBeginBlockParserTrackers, err = customBlockEventRegistration( 48 | indexer.CustomBeginBlockEventParserRegistry, 49 | indexer.CustomBeginBlockParserTrackers, 50 | eventKey, 51 | parser, 52 | models.BeginBlockEvent, 53 | ) 54 | if err != nil { 55 | config.Log.Fatal("Error registering BeginBlock custom parser", err) 56 | } 57 | } 58 | 59 | func (indexer *Indexer) RegisterCustomEndBlockEventParser(eventKey string, parser parsers.BlockEventParser) { 60 | var err error 61 | indexer.CustomEndBlockEventParserRegistry, indexer.CustomEndBlockParserTrackers, err = customBlockEventRegistration( 62 | indexer.CustomEndBlockEventParserRegistry, 63 | indexer.CustomEndBlockParserTrackers, 64 | eventKey, 65 | parser, 66 | models.EndBlockEvent, 67 | ) 68 | if err != nil { 69 | config.Log.Fatal("Error registering EndBlock custom parser", err) 70 | } 71 | } 72 | 73 | func (indexer *Indexer) RegisterCustomMessageParser(messageKey string, parser parsers.MessageParser) { 74 | if indexer.CustomMessageParserRegistry == nil { 75 | indexer.CustomMessageParserRegistry = make(map[string][]parsers.MessageParser) 76 | } 77 | 78 | if indexer.CustomMessageParserTrackers == nil { 79 | indexer.CustomMessageParserTrackers = make(map[string]models.MessageParser) 80 | } 81 | 82 | indexer.CustomMessageParserRegistry[messageKey] = append(indexer.CustomMessageParserRegistry[messageKey], parser) 83 | 84 | if _, ok := indexer.CustomMessageParserTrackers[parser.Identifier()]; ok { 85 | config.Log.Fatalf("Found duplicate message parser with identifier \"%s\", parsers must be uniquely identified", parser.Identifier()) 86 | } 87 | 88 | indexer.CustomMessageParserTrackers[parser.Identifier()] = models.MessageParser{ 89 | Identifier: parser.Identifier(), 90 | } 91 | } 92 | 93 | func customBlockEventRegistration(registry map[string][]parsers.BlockEventParser, tracker map[string]models.BlockEventParser, eventKey string, parser parsers.BlockEventParser, lifecycleValue models.BlockLifecyclePosition) (map[string][]parsers.BlockEventParser, map[string]models.BlockEventParser, error) { 94 | if registry == nil { 95 | registry = make(map[string][]parsers.BlockEventParser) 96 | } 97 | 98 | if tracker == nil { 99 | tracker = make(map[string]models.BlockEventParser) 100 | } 101 | 102 | registry[eventKey] = append(registry[eventKey], parser) 103 | 104 | if _, ok := tracker[parser.Identifier()]; ok { 105 | return registry, tracker, fmt.Errorf("found duplicate block event parser with identifier \"%s\", parsers must be uniquely identified", parser.Identifier()) 106 | } 107 | 108 | tracker[parser.Identifier()] = models.BlockEventParser{ 109 | Identifier: parser.Identifier(), 110 | BlockLifecyclePosition: lifecycleValue, 111 | } 112 | return registry, tracker, nil 113 | } 114 | -------------------------------------------------------------------------------- /indexer/types.go: -------------------------------------------------------------------------------- 1 | package indexer 2 | 3 | import ( 4 | "github.com/DefiantLabs/cosmos-indexer/config" 5 | "github.com/DefiantLabs/cosmos-indexer/core" 6 | dbTypes "github.com/DefiantLabs/cosmos-indexer/db" 7 | "github.com/DefiantLabs/cosmos-indexer/db/models" 8 | "github.com/DefiantLabs/cosmos-indexer/filter" 9 | "github.com/DefiantLabs/cosmos-indexer/parsers" 10 | "github.com/DefiantLabs/probe/client" 11 | sdkTypes "github.com/cosmos/cosmos-sdk/types" 12 | "github.com/cosmos/cosmos-sdk/types/module" 13 | "gorm.io/gorm" 14 | ) 15 | 16 | // DB is not safe to add here just yet, since the index command in cmd/ defers a close of the DB connection 17 | // Maybe the defer should be removed? 18 | type PostSetupDataset struct { 19 | Config *config.IndexConfig 20 | DryRun bool 21 | ChainClient *client.ChainClient 22 | } 23 | 24 | type PostSetupCustomDataset struct { 25 | Config config.IndexConfig 26 | ChainClient *client.ChainClient 27 | DB *gorm.DB 28 | } 29 | 30 | type PostIndexCustomMessageDataset struct { 31 | Config config.IndexConfig 32 | DB *gorm.DB 33 | DryRun bool 34 | IndexedDataset *[]dbTypes.TxDBWrapper 35 | MessageParser map[string]models.MessageParser 36 | IndexedBlock models.Block 37 | } 38 | 39 | type PreExitCustomDataset struct { 40 | Config config.IndexConfig 41 | DB *gorm.DB 42 | DryRun bool 43 | } 44 | 45 | type Indexer struct { 46 | Config *config.IndexConfig 47 | DryRun bool 48 | DB *gorm.DB 49 | ChainClient *client.ChainClient 50 | BlockEnqueueFunction func(chan *core.EnqueueData) error 51 | CustomModuleBasics []module.AppModuleBasic // Used for extending the AppModuleBasics registered in the probe ChainClientient 52 | BlockEventFilterRegistries BlockEventFilterRegistries 53 | MessageTypeFilters []filter.MessageTypeFilter 54 | MessageFilters []filter.MessageFilter 55 | CustomMsgTypeRegistry map[string]sdkTypes.Msg 56 | CustomBeginBlockEventParserRegistry map[string][]parsers.BlockEventParser // Used for associating parsers to block event types in BeginBlock events 57 | CustomEndBlockEventParserRegistry map[string][]parsers.BlockEventParser // Used for associating parsers to block event types in EndBlock events 58 | CustomBeginBlockParserTrackers map[string]models.BlockEventParser // Used for tracking block event parsers in the database 59 | CustomEndBlockParserTrackers map[string]models.BlockEventParser // Used for tracking block event parsers in the database 60 | CustomMessageParserRegistry map[string][]parsers.MessageParser // Used for associating parsers to message types 61 | CustomMessageParserTrackers map[string]models.MessageParser // Used for tracking message parsers in the database 62 | CustomModels []any 63 | PostIndexCustomMessageFunction func(*PostIndexCustomMessageDataset) error // Called post indexing of the custom messages with the indexed dataset, useful for custom indexing on the whole dataset or for additional processing 64 | PostSetupCustomFunction func(PostSetupCustomDataset) error // Called post setup of the indexer, useful for custom indexing on the whole dataset or for additional processing 65 | PostSetupDatasetChannel chan *PostSetupDataset // passes configured indexer data to any reader 66 | PreExitCustomFunction func(*PreExitCustomDataset) error // Called post indexing of the custom messages with the indexed dataset, useful for custom indexing on the whole dataset or for additional processing 67 | } 68 | 69 | type BlockEventFilterRegistries struct { 70 | BeginBlockEventFilterRegistry *filter.StaticBlockEventFilterRegistry 71 | EndBlockEventFilterRegistry *filter.StaticBlockEventFilterRegistry 72 | } 73 | 74 | type DBData struct { 75 | txDBWrappers []dbTypes.TxDBWrapper 76 | block models.Block 77 | } 78 | 79 | type BlockEventsDBData struct { 80 | blockDBWrapper *dbTypes.BlockDBWrapper 81 | } 82 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | 6 | "github.com/DefiantLabs/cosmos-indexer/cmd" 7 | ) 8 | 9 | func main() { 10 | // simplest main as recommended by the Cobra package 11 | err := cmd.Execute() 12 | if err != nil { 13 | log.Fatalf("Failed to execute. Err: %v", err) 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /notes/DatabaseSetup.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bryanlabs/cosmos-indexer-sdk/8d95bd817714fad49404b47eebe4d5dccf59dff7/notes/DatabaseSetup.png -------------------------------------------------------------------------------- /notes/delete-db.sql: -------------------------------------------------------------------------------- 1 | -- Below will delete the table and the rows... 2 | drop table if exists message_types cascade; 3 | drop table if exists failed_blocks cascade; 4 | drop table if exists failed_event_blocks cascade; 5 | drop table if exists taxable_tx cascade; 6 | drop table if exists chains cascade; 7 | drop table if exists denom_unit_aliases cascade; 8 | drop table if exists messages cascade; 9 | drop table if exists txes cascade; 10 | drop table if exists tx_signer_addresses cascade; 11 | drop table if exists taxable_event cascade; 12 | drop table if exists messages cascade; 13 | drop table if exists addresses cascade; 14 | drop table if exists blocks cascade; 15 | drop table if exists simple_denoms cascade; 16 | drop table if exists denom_units cascade; 17 | drop table if exists denoms cascade; 18 | drop table if exists fees cascade; 19 | drop table if exists block_events cascade; 20 | drop table if exists epoches cascade; 21 | drop table if exists block_event_attribute_keys cascade; 22 | drop table if exists block_event_attributes cascade; 23 | drop table if exists block_event_types cascade; 24 | drop table if exists failed_messages cascade; 25 | drop table if exists message_events cascade; 26 | drop table if exists message_event_types cascade; 27 | drop table if exists message_event_attributes cascade; 28 | drop table if exists message_event_attribute_keys cascade; 29 | drop table if exists block_event_parsers cascade; 30 | drop table if exists block_event_parser_errors cascade; 31 | drop table if exists message_parsers cascade; 32 | drop table if exists message_parser_errors cascade; 33 | -------------------------------------------------------------------------------- /notes/message_types.txt: -------------------------------------------------------------------------------- 1 | The following are message types as reported by Spew library which prints unknown go structs etc. 2 | 3 | (map[string]interface {}) (len=4) { 4 | (string) (len=10) "to_address": (string) (len=43) "juno1m2hg5t7n8f6kzh8kmh98phenk8a4xp5wyuz34y", 5 | (string) (len=6) "amount": ([]interface {}) (len=1 cap=1) { 6 | (map[string]interface {}) (len=2) { 7 | (string) (len=6) "amount": (string) (len=5) "80085", 8 | (string) (len=5) "denom": (string) (len=6) "ustake" 9 | } 10 | }, 11 | (string) (len=5) "@type": (string) (len=28) "/cosmos.bank.v1beta1.MsgSend", 12 | (string) (len=12) "from_address": (string) (len=43) "juno16g2rahf5846rxzp3fwlswy08fz8ccuwk03k57y" 13 | } 14 | 15 | ... 16 | 17 | (map[string]interface {}) (len=4) { 18 | (string) (len=5) "@type": (string) (len=35) "/cosmos.staking.v1beta1.MsgDelegate", 19 | (string) (len=17) "delegator_address": (string) (len=43) "juno1m2hg5t7n8f6kzh8kmh98phenk8a4xp5wyuz34y", 20 | (string) (len=17) "validator_address": (string) (len=50) "junovaloper128taw6wkhfq29u83lmh5qyfv8nff6h0wtrgrta", 21 | (string) (len=6) "amount": (map[string]interface {}) (len=2) { 22 | (string) (len=6) "amount": (string) (len=5) "50000", 23 | (string) (len=5) "denom": (string) (len=6) "ustake" 24 | } 25 | } 26 | 27 | ... 28 | 29 | (map[string]interface {}) (len=4) { 30 | (string) (len=6) "amount": (map[string]interface {}) (len=2) { 31 | (string) (len=5) "denom": (string) (len=6) "ustake", 32 | (string) (len=6) "amount": (string) (len=2) "85" 33 | }, 34 | (string) (len=5) "@type": (string) (len=35) "/cosmos.staking.v1beta1.MsgDelegate", 35 | (string) (len=17) "delegator_address": (string) (len=43) "juno1m2hg5t7n8f6kzh8kmh98phenk8a4xp5wyuz34y", 36 | (string) (len=17) "validator_address": (string) (len=50) "junovaloper128taw6wkhfq29u83lmh5qyfv8nff6h0wtrgrta" 37 | } 38 | 39 | ... 40 | 41 | (map[string]interface {}) (len=3) { 42 | (string) (len=5) "@type": (string) (len=55) "/cosmos.distribution.v1beta1.MsgWithdrawDelegatorReward", 43 | (string) (len=17) "delegator_address": (string) (len=43) "juno128taw6wkhfq29u83lmh5qyfv8nff6h0w577vsy", 44 | (string) (len=17) "validator_address": (string) (len=50) "junovaloper128taw6wkhfq29u83lmh5qyfv8nff6h0wtrgrta" 45 | } 46 | 47 | ... 48 | 49 | (map[string]interface {}) (len=2) { 50 | (string) (len=5) "@type": (string) (len=59) "/cosmos.distribution.v1beta1.MsgWithdrawValidatorCommission", 51 | (string) (len=17) "validator_address": (string) (len=50) "junovaloper128taw6wkhfq29u83lmh5qyfv8nff6h0wtrgrta" 52 | } 53 | 54 | ... 55 | 56 | (map[string]interface {}) (len=3) { 57 | (string) (len=5) "@type": (string) (len=55) "/cosmos.distribution.v1beta1.MsgWithdrawDelegatorReward", 58 | (string) (len=17) "delegator_address": (string) (len=43) "juno128taw6wkhfq29u83lmh5qyfv8nff6h0w577vsy", 59 | (string) (len=17) "validator_address": (string) (len=50) "junovaloper128taw6wkhfq29u83lmh5qyfv8nff6h0wtrgrta" 60 | } 61 | 62 | ... 63 | 64 | (map[string]interface {}) (len=2) { 65 | (string) (len=5) "@type": (string) (len=59) "/cosmos.distribution.v1beta1.MsgWithdrawValidatorCommission", 66 | (string) (len=17) "validator_address": (string) (len=50) "junovaloper128taw6wkhfq29u83lmh5qyfv8nff6h0wtrgrta" 67 | } 68 | 69 | ... 70 | 71 | 72 | (map[string]interface {}) (len=3) { 73 | (string) (len=5) "@type": (string) (len=55) "/cosmos.distribution.v1beta1.MsgWithdrawDelegatorReward", 74 | (string) (len=17) "delegator_address": (string) (len=43) "juno128taw6wkhfq29u83lmh5qyfv8nff6h0w577vsy", 75 | (string) (len=17) "validator_address": (string) (len=50) "junovaloper128taw6wkhfq29u83lmh5qyfv8nff6h0wtrgrta" 76 | } 77 | 78 | ... 79 | 80 | (map[string]interface {}) (len=2) { 81 | (string) (len=5) "@type": (string) (len=59) "/cosmos.distribution.v1beta1.MsgWithdrawValidatorCommission", 82 | (string) (len=17) "validator_address": (string) (len=50) "junovaloper128taw6wkhfq29u83lmh5qyfv8nff6h0wtrgrta" 83 | } 84 | 85 | ... 86 | -------------------------------------------------------------------------------- /parsers/block_events.go: -------------------------------------------------------------------------------- 1 | package parsers 2 | 3 | import ( 4 | "github.com/DefiantLabs/cosmos-indexer/config" 5 | "github.com/DefiantLabs/cosmos-indexer/db/models" 6 | abci "github.com/cometbft/cometbft/abci/types" 7 | "gorm.io/gorm" 8 | ) 9 | 10 | type BlockEventParser interface { 11 | Identifier() string 12 | ParseBlockEvent(abci.Event, config.IndexConfig) (*any, error) 13 | IndexBlockEvent(*any, *gorm.DB, models.Block, models.BlockEvent, []models.BlockEventAttribute, config.IndexConfig) error 14 | } 15 | 16 | type BlockEventParsedData struct { 17 | Data *any 18 | Error error 19 | Parser *BlockEventParser 20 | } 21 | -------------------------------------------------------------------------------- /parsers/messages.go: -------------------------------------------------------------------------------- 1 | package parsers 2 | 3 | import ( 4 | "github.com/DefiantLabs/cosmos-indexer/config" 5 | txtypes "github.com/DefiantLabs/cosmos-indexer/cosmos/modules/tx" 6 | "github.com/DefiantLabs/cosmos-indexer/db/models" 7 | sdkTypes "github.com/cosmos/cosmos-sdk/types" 8 | "gorm.io/gorm" 9 | ) 10 | 11 | // Intermediate type for the database inserted message datasets 12 | // Is there a way to remove this? It may require a one-to-many mapping of the message events + attributes instead of the belongs-to 13 | type MessageEventWithAttributes struct { 14 | Event models.MessageEvent 15 | Attributes []models.MessageEventAttribute 16 | } 17 | 18 | type MessageParser interface { 19 | Identifier() string 20 | ParseMessage(sdkTypes.Msg, *txtypes.LogMessage, config.IndexConfig) (*any, error) 21 | IndexMessage(*any, *gorm.DB, models.Message, []MessageEventWithAttributes, config.IndexConfig) error 22 | } 23 | 24 | type MessageParsedData struct { 25 | Data *any 26 | Error error 27 | Parser *MessageParser 28 | } 29 | -------------------------------------------------------------------------------- /probe/probe.go: -------------------------------------------------------------------------------- 1 | package probe 2 | 3 | import ( 4 | "github.com/DefiantLabs/cosmos-indexer/config" 5 | probeClient "github.com/DefiantLabs/probe/client" 6 | sdkTypes "github.com/cosmos/cosmos-sdk/types" 7 | "github.com/cosmos/cosmos-sdk/types/module" 8 | ) 9 | 10 | func GetProbeClient(conf config.Probe, appModuleBasicsExtensions []module.AppModuleBasic, customMsgTypeRegistry map[string]sdkTypes.Msg) (*probeClient.ChainClient, error) { 11 | return probeClient.NewChainClient(GetProbeConfig(conf, true, appModuleBasicsExtensions, customMsgTypeRegistry), "", nil, nil) 12 | } 13 | 14 | // Will include the protos provided by the Probe package for Osmosis module interfaces 15 | func IncludeOsmosisInterfaces(client *probeClient.ChainClient) { 16 | probeClient.RegisterOsmosisInterfaces(client.Codec.InterfaceRegistry) 17 | } 18 | 19 | // Will include the protos provided by the Probe package for Tendermint Liquidity module interfaces 20 | func IncludeTendermintInterfaces(client *probeClient.ChainClient) { 21 | probeClient.RegisterTendermintLiquidityInterfaces(client.Codec.Amino, client.Codec.InterfaceRegistry) 22 | } 23 | 24 | func GetProbeConfig(conf config.Probe, debug bool, appModuleBasicsExtensions []module.AppModuleBasic, customMsgTypeRegistry map[string]sdkTypes.Msg) *probeClient.ChainClientConfig { 25 | moduleBasics := []module.AppModuleBasic{} 26 | moduleBasics = append(moduleBasics, probeClient.DefaultModuleBasics...) 27 | moduleBasics = append(moduleBasics, appModuleBasicsExtensions...) 28 | 29 | return &probeClient.ChainClientConfig{ 30 | Key: "default", 31 | ChainID: conf.ChainID, 32 | RPCAddr: conf.RPC, 33 | AccountPrefix: conf.AccountPrefix, 34 | KeyringBackend: "test", 35 | Debug: debug, 36 | Timeout: "30s", 37 | OutputFormat: "json", 38 | Modules: moduleBasics, 39 | CustomMsgTypeRegistry: customMsgTypeRegistry, 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /rpc/blocks.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "math" 10 | "net/http" 11 | "net/url" 12 | "reflect" 13 | "time" 14 | 15 | "github.com/DefiantLabs/cosmos-indexer/config" 16 | abci "github.com/cometbft/cometbft/abci/types" 17 | tmjson "github.com/cometbft/cometbft/libs/json" 18 | cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" 19 | jsonrpc "github.com/cometbft/cometbft/rpc/jsonrpc/client" 20 | types "github.com/cometbft/cometbft/rpc/jsonrpc/types" 21 | ) 22 | 23 | func argsToURLValues(args map[string]interface{}) (url.Values, error) { 24 | values := make(url.Values) 25 | if len(args) == 0 { 26 | return values, nil 27 | } 28 | 29 | err := argsToJSON(args) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | for key, val := range args { 35 | values.Set(key, val.(string)) 36 | } 37 | 38 | return values, nil 39 | } 40 | 41 | func argsToJSON(args map[string]interface{}) error { 42 | for k, v := range args { 43 | rt := reflect.TypeOf(v) 44 | isByteSlice := rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8 45 | if isByteSlice { 46 | bytes := reflect.ValueOf(v).Bytes() 47 | args[k] = fmt.Sprintf("0x%X", bytes) 48 | continue 49 | } 50 | 51 | data, err := tmjson.Marshal(v) 52 | if err != nil { 53 | return err 54 | } 55 | args[k] = string(data) 56 | } 57 | return nil 58 | } 59 | 60 | func (c *URIClient) DoHTTPGet(ctx context.Context, method string, params map[string]interface{}, result interface{}) (interface{}, error) { 61 | values, err := argsToURLValues(params) 62 | if err != nil { 63 | return nil, fmt.Errorf("failed to encode params: %w", err) 64 | } 65 | 66 | req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.Address+"/"+method, nil) 67 | if err != nil { 68 | return nil, fmt.Errorf("error creating new request: %w", err) 69 | } 70 | 71 | req.URL.RawQuery = values.Encode() 72 | // fmt.Printf("Query string: %s\n", values.Encode()) 73 | 74 | // req.Header.Set("Content-Type", "application/x-www-form-urlencoded") 75 | if c.AuthHeader != "" { 76 | req.Header.Add("Authorization", c.AuthHeader) 77 | } 78 | 79 | resp, err := c.Client.Do(req) 80 | if err != nil { 81 | return nil, fmt.Errorf("get: %w", err) 82 | } 83 | defer resp.Body.Close() 84 | 85 | responseBytes, err := io.ReadAll(resp.Body) 86 | if err != nil { 87 | return nil, fmt.Errorf("read response body: %w", err) 88 | } 89 | 90 | return unmarshalResponseBytes(responseBytes, jsonrpc.URIClientRequestID, result) 91 | } 92 | 93 | type URIClient struct { 94 | Address string 95 | Client *http.Client 96 | AuthHeader string 97 | } 98 | 99 | func unmarshalResponseBytes(responseBytes []byte, expectedID types.JSONRPCIntID, result interface{}) (interface{}, error) { 100 | // Read response. If rpc/core/types is imported, the result will unmarshal 101 | // into the correct type. 102 | response := &types.RPCResponse{} 103 | if err := json.Unmarshal(responseBytes, response); err != nil { 104 | return nil, fmt.Errorf("error unmarshalling: %w", err) 105 | } 106 | 107 | if response.Error != nil { 108 | return nil, response.Error 109 | } 110 | 111 | if err := validateAndVerifyID(response, expectedID); err != nil { 112 | return nil, fmt.Errorf("wrong ID: %w", err) 113 | } 114 | 115 | // Unmarshal the RawMessage into the result. 116 | if err := tmjson.Unmarshal(response.Result, result); err != nil { 117 | return nil, fmt.Errorf("error unmarshalling result: %w", err) 118 | } 119 | 120 | return result, nil 121 | } 122 | 123 | func validateAndVerifyID(res *types.RPCResponse, expectedID types.JSONRPCIntID) error { 124 | if err := validateResponseID(res.ID); err != nil { 125 | return err 126 | } 127 | if expectedID != res.ID.(types.JSONRPCIntID) { // validateResponseID ensured res.ID has the right type 128 | return fmt.Errorf("response ID (%d) does not match request ID (%d)", res.ID, expectedID) 129 | } 130 | return nil 131 | } 132 | 133 | func validateResponseID(id interface{}) error { 134 | if id == nil { 135 | return errors.New("no ID") 136 | } 137 | _, ok := id.(types.JSONRPCIntID) 138 | if !ok { 139 | return fmt.Errorf("expected JSONRPCIntID, but got: %T", id) 140 | } 141 | return nil 142 | } 143 | 144 | // This type **should** cover SDK v0.4x and v0.50, but updates will need to be monitored 145 | type CustomBlockResults struct { 146 | Height int64 `json:"height"` 147 | TxsResults []*abci.ResponseDeliverTx `json:"txs_results"` 148 | BeginBlockEvents []abci.Event `json:"begin_block_events"` 149 | EndBlockEvents []abci.Event `json:"end_block_events"` 150 | ValidatorUpdates []abci.ValidatorUpdate `json:"validator_updates"` 151 | ConsensusParamUpdates *cmtproto.ConsensusParams `json:"consensus_param_updates"` 152 | FinalizeBlockEvents []abci.Event `json:"finalize_block_events"` 153 | } 154 | 155 | func (c *URIClient) DoBlockResults(ctx context.Context, height *int64) (*CustomBlockResults, error) { 156 | result := new(CustomBlockResults) 157 | params := make(map[string]interface{}) 158 | if height != nil { 159 | params["height"] = height 160 | } 161 | 162 | _, err := c.DoHTTPGet(ctx, "block_results", params, result) 163 | if err != nil { 164 | return nil, err 165 | } 166 | 167 | return result, nil 168 | } 169 | 170 | func GetBlockResult(client URIClient, height int64) (*CustomBlockResults, error) { 171 | brctx, cancel := context.WithTimeout(context.Background(), 100*time.Second) 172 | defer cancel() 173 | 174 | bresults, err := client.DoBlockResults(brctx, &height) 175 | if err != nil { 176 | return nil, err 177 | } 178 | 179 | return bresults, nil 180 | } 181 | 182 | func GetBlockResultWithRetry(client URIClient, height int64, retryMaxAttempts int64, retryMaxWaitSeconds uint64) (*CustomBlockResults, error) { 183 | if retryMaxAttempts == 0 { 184 | return GetBlockResult(client, height) 185 | } 186 | 187 | if retryMaxWaitSeconds < 2 { 188 | retryMaxWaitSeconds = 2 189 | } 190 | 191 | var attempts int64 192 | maxRetryTime := time.Duration(retryMaxWaitSeconds) * time.Second 193 | if maxRetryTime < 0 { 194 | config.Log.Warn("Detected maxRetryTime overflow, setting time to sane maximum of 30s") 195 | maxRetryTime = 30 * time.Second 196 | } 197 | 198 | currentBackoffDuration, maxReached := GetBackoffDurationForAttempts(attempts, maxRetryTime) 199 | 200 | for { 201 | resp, err := GetBlockResult(client, height) 202 | attempts++ 203 | if err != nil && (retryMaxAttempts < 0 || (attempts <= retryMaxAttempts)) { 204 | config.Log.Error("Error getting RPC response, backing off and trying again", err) 205 | config.Log.Debugf("Attempt %d with wait time %+v", attempts, currentBackoffDuration) 206 | time.Sleep(currentBackoffDuration) 207 | 208 | // guard against overflow 209 | if !maxReached { 210 | currentBackoffDuration, maxReached = GetBackoffDurationForAttempts(attempts, maxRetryTime) 211 | } 212 | 213 | } else { 214 | if err != nil { 215 | config.Log.Error("Error getting RPC response, reached max retry attempts") 216 | } 217 | return resp, err 218 | } 219 | } 220 | } 221 | 222 | func GetBackoffDurationForAttempts(numAttempts int64, maxRetryTime time.Duration) (time.Duration, bool) { 223 | backoffBase := 1.5 224 | backoffDuration := time.Duration(math.Pow(backoffBase, float64(numAttempts)) * float64(time.Second)) 225 | 226 | maxReached := false 227 | if backoffDuration > maxRetryTime || backoffDuration < 0 { 228 | maxReached = true 229 | backoffDuration = maxRetryTime 230 | } 231 | 232 | return backoffDuration, maxReached 233 | } 234 | -------------------------------------------------------------------------------- /rpc/requests.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "time" 5 | 6 | coretypes "github.com/cometbft/cometbft/rpc/core/types" 7 | 8 | "github.com/DefiantLabs/cosmos-indexer/config" 9 | probeClient "github.com/DefiantLabs/probe/client" 10 | probeQuery "github.com/DefiantLabs/probe/query" 11 | "github.com/cosmos/cosmos-sdk/types/query" 12 | txTypes "github.com/cosmos/cosmos-sdk/types/tx" 13 | ) 14 | 15 | // GetBlockTimestamp 16 | func GetBlock(cl *probeClient.ChainClient, height int64) (*coretypes.ResultBlock, error) { 17 | options := probeQuery.QueryOptions{Height: height} 18 | query := probeQuery.Query{Client: cl, Options: &options} 19 | resp, err := query.Block() 20 | if err != nil { 21 | return nil, err 22 | } 23 | 24 | return resp, nil 25 | } 26 | 27 | // GetTxsByBlockHeight makes a request to the Cosmos RPC API and returns all the transactions for a specific block 28 | func GetTxsByBlockHeight(cl *probeClient.ChainClient, height int64) (*txTypes.GetTxsEventResponse, error) { 29 | pg := query.PageRequest{Limit: 100} 30 | options := probeQuery.QueryOptions{Height: height, Pagination: &pg} 31 | query := probeQuery.Query{Client: cl, Options: &options} 32 | resp, err := query.TxByHeight(cl.Codec) 33 | if err != nil { 34 | return nil, err 35 | } 36 | 37 | // handle pagination if needed 38 | if resp != nil && resp.Pagination != nil { 39 | // if there are more total objects than we have so far, keep going 40 | for resp.Pagination.Total > uint64(len(resp.Txs)) { 41 | query.Options.Pagination.Offset = uint64(len(resp.Txs)) 42 | chunkResp, err := query.TxByHeight(cl.Codec) 43 | if err != nil { 44 | return nil, err 45 | } 46 | resp.Txs = append(resp.Txs, chunkResp.Txs...) 47 | resp.TxResponses = append(resp.TxResponses, chunkResp.TxResponses...) 48 | } 49 | } 50 | 51 | return resp, nil 52 | } 53 | 54 | // IsCatchingUp true if the node is catching up to the chain, false otherwise 55 | func IsCatchingUp(cl *probeClient.ChainClient) (bool, error) { 56 | query := probeQuery.Query{Client: cl, Options: &probeQuery.QueryOptions{}} 57 | ctx, cancel := query.GetQueryContext() 58 | defer cancel() 59 | 60 | resStatus, err := query.Client.RPCClient.Status(ctx) 61 | if err != nil { 62 | return false, err 63 | } 64 | return resStatus.SyncInfo.CatchingUp, nil 65 | } 66 | 67 | func GetLatestBlockHeight(cl *probeClient.ChainClient) (int64, error) { 68 | query := probeQuery.Query{Client: cl, Options: &probeQuery.QueryOptions{}} 69 | ctx, cancel := query.GetQueryContext() 70 | defer cancel() 71 | 72 | resStatus, err := query.Client.RPCClient.Status(ctx) 73 | if err != nil { 74 | return 0, err 75 | } 76 | return resStatus.SyncInfo.LatestBlockHeight, nil 77 | } 78 | 79 | func GetLatestBlockHeightWithRetry(cl *probeClient.ChainClient, retryMaxAttempts int64, retryMaxWaitSeconds uint64) (int64, error) { 80 | if retryMaxAttempts == 0 { 81 | return GetLatestBlockHeight(cl) 82 | } 83 | 84 | if retryMaxWaitSeconds < 2 { 85 | retryMaxWaitSeconds = 2 86 | } 87 | 88 | var attempts int64 89 | maxRetryTime := time.Duration(retryMaxWaitSeconds) * time.Second 90 | if maxRetryTime < 0 { 91 | config.Log.Warn("Detected maxRetryTime overflow, setting time to sane maximum of 30s") 92 | maxRetryTime = 30 * time.Second 93 | } 94 | 95 | currentBackoffDuration, maxReached := GetBackoffDurationForAttempts(attempts, maxRetryTime) 96 | 97 | for { 98 | resp, err := GetLatestBlockHeight(cl) 99 | attempts++ 100 | if err != nil && (retryMaxAttempts < 0 || (attempts <= retryMaxAttempts)) { 101 | config.Log.Error("Error getting RPC response, backing off and trying again", err) 102 | config.Log.Debugf("Attempt %d with wait time %+v", attempts, currentBackoffDuration) 103 | time.Sleep(currentBackoffDuration) 104 | 105 | // guard against overflow 106 | if !maxReached { 107 | currentBackoffDuration, maxReached = GetBackoffDurationForAttempts(attempts, maxRetryTime) 108 | } 109 | 110 | } else { 111 | if err != nil { 112 | config.Log.Error("Error getting RPC response, reached max retry attempts") 113 | } 114 | return resp, err 115 | } 116 | } 117 | } 118 | 119 | func GetEarliestAndLatestBlockHeights(cl *probeClient.ChainClient) (int64, int64, error) { 120 | query := probeQuery.Query{Client: cl, Options: &probeQuery.QueryOptions{}} 121 | ctx, cancel := query.GetQueryContext() 122 | defer cancel() 123 | 124 | resStatus, err := query.Client.RPCClient.Status(ctx) 125 | if err != nil { 126 | return 0, 0, err 127 | } 128 | return resStatus.SyncInfo.EarliestBlockHeight, resStatus.SyncInfo.LatestBlockHeight, nil 129 | } 130 | -------------------------------------------------------------------------------- /setup/linux-createdb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | ### must run this script AFTER installing postgres 3 | 4 | # switch to postgres user 5 | su - postgres 6 | # this creates a postgres user then database 7 | createuser taxapp 8 | createdb taxappdb 9 | # command line session with postgres tool 10 | psql 11 | alter user taxapp with encrypted password 'taxapptest'; 12 | grant all privileges on database taxappdb to taxapp; 13 | 14 | #now confirm the DB is there 15 | select datname from pg_database where datistemplate = false; 16 | 17 | 18 | # While developing you might wanna delete everything... 19 | #delete from tx_addresses; 20 | #delete from addresses; 21 | #delete from txes; 22 | #delete from blocks; 23 | #commit; 24 | -------------------------------------------------------------------------------- /setup/local-node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # NOTE: I've already run the commands below to produce the Docker images. The only commands you'd need to run in a fresh test environment 4 | # are to import the keys with junod keys add. 5 | 6 | # ACCOUNT info: 7 | # 8 | # Validator's delegator address: "juno128taw6wkhfq29u83lmh5qyfv8nff6h0w577vsy" 9 | # Validator address: "junovaloper128taw6wkhfq29u83lmh5qyfv8nff6h0wtrgrta" 10 | # 11 | # MNEMONICS BUILT INTO THIS IMAGE ARE AS FOLLOWS: 12 | # The validator mnemonic is 13 | # wave assume sun shoe wash once unfair master actual vessel diesel actor spend swear elder once fetch spider aim shift brown artefact jump wild 14 | # The kyle test key mnemonic is 15 | # cup lend senior velvet sleep rely stock roast area color violin such urban endless strategy such more future crane cruel tone daring fly style 16 | # Juno development team's built in test key mnemonic is 17 | # clip hire initial neck maid actor venue client foam budget lock catalog sweet steak waste crater broccoli pipe steak sister coyote moment obvious choose 18 | 19 | # test key 'kyle' will be used for testing transactions that send/receive funds. use the mnemonic above 20 | junod keys add kyle --recover 21 | 22 | # this is the mnemonic of the Juno development team's test user key. it is a genesis account. 23 | junod keys add default --recover 24 | 25 | # This is the delegator address that goes with the validator. juno128taw6wkhfq29u83lmh5qyfv8nff6h0w577vsy 26 | junod keys add validator --recover 27 | 28 | # Launch the node in the background 29 | docker-compose up -d 30 | # Give the node time to startup in case this is first run 31 | sleep 10 32 | 33 | # send some money from the genesis key to our new key (juno1m2hg5t7n8f6kzh8kmh98phenk8a4xp5wyuz34y=the kyle key from above) 34 | junod tx bank send default juno1m2hg5t7n8f6kzh8kmh98phenk8a4xp5wyuz34y 80085ustake --chain-id testing 35 | # show balances 36 | junod query bank balances juno1m2hg5t7n8f6kzh8kmh98phenk8a4xp5wyuz34y --chain-id testing 37 | 38 | #Validator address is junovaloper128taw6wkhfq29u83lmh5qyfv8nff6h0wtrgrta in the base image kyle created 39 | junod tx staking delegate junovaloper128taw6wkhfq29u83lmh5qyfv8nff6h0wtrgrta 50000ustake --from kyle --chain-id testing 40 | junod tx staking delegate junovaloper128taw6wkhfq29u83lmh5qyfv8nff6h0wtrgrta 85ustake --from kyle --chain-id testing 41 | 42 | #some time later, collect rewards.. 43 | junod tx distribution withdraw-rewards junovaloper128taw6wkhfq29u83lmh5qyfv8nff6h0wtrgrta --commission --from validator --chain-id testing 44 | 45 | # In the genesis, in the MsgCreateValidator transaction, the delegator starts with 1000000000 and delegates 250000000 leaving 750000000. 46 | # After waiting a while and collecting rewards with the command above, you will always see > 750000000 when you run the query bank balances command. 47 | junod query bank balances juno128taw6wkhfq29u83lmh5qyfv8nff6h0w577vsy --chain-id testing 48 | 49 | junod query distribution rewards juno1mt72y3jny20456k247tc5gf2dnat76l4ynvqwl $VALOPER_ADDRESS 50 | junod query distribution commission $VALOPER_ADDRESS 51 | junod query distribution rewards $TEST_USER_ADDRESS $VALOPER_ADDRESS 52 | -------------------------------------------------------------------------------- /setup/pull-images.sh: -------------------------------------------------------------------------------- 1 | ACCOUNT="123456789012" 2 | 3 | tee -a ~/.docker/config.json <.amazonaws.com": "ecr-login" 8 | } 9 | } 10 | EOF 11 | 12 | sudo apt update 13 | sudo apt install amazon-ecr-credential-helper 14 | docker pull $ACCOUNT.dkr.ecr.us-east-1.amazonaws.com/cosmostesting:validatorrewards-1.0 15 | -------------------------------------------------------------------------------- /setup/sample_data_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | sleep 10 3 | 4 | # Import Key 5 | 6 | echo $TEST_USER_WALLET_PASS | junod keys import $TEST_USER_WALLET_NAME $TEST_USER_WALLET_KEY 7 | export VALOPER_ADDRESS=$(junod keys show validator -a --bech val) 8 | export TEST_USER_ADDRESS=$(junod keys show $TEST_USER_WALLET_NAME -a) 9 | 10 | # Put Test Transactions Here 11 | # Note that the sleep command is needed sometimes after transactions 12 | 13 | # NOTE: if you read the Juno team's genesis TXs, you will see that the validator starts with 1000000000 then stakes 250000000 (1/4 of the funds). 14 | # send the test user another 1/4, leaving the validator with 500000000 unstaked. 15 | echo "Y" | junod tx bank send validator $TEST_USER_ADDRESS 250000000ustake --chain-id testing 16 | sleep 2 17 | 18 | ## Stake almost all of the test user's funds, leaving 1000ustake in their bank 19 | echo "Y" | junod tx staking delegate $VALOPER_ADDRESS 249999000ustake --from $TEST_USER_ADDRESS --chain-id testing 20 | sleep 2 21 | 22 | ## Only stake 1000ustake of the validator's own funds, leaving the validator with 499999000 23 | echo "Y" | junod tx staking delegate $VALOPER_ADDRESS 1000ustake --from validator --chain-id testing 24 | sleep 2 25 | junod query bank balances $TEST_USER_ADDRESS 26 | -------------------------------------------------------------------------------- /tools/dump-failed-block-heights/.env.template: -------------------------------------------------------------------------------- 1 | export DB_HOST="" 2 | export DB_PORT="" 3 | export DB_NAME="" 4 | export DB_USER="" 5 | export DB_PASSWORD="" 6 | -------------------------------------------------------------------------------- /tools/dump-failed-block-heights/.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | venv 3 | output/* 4 | -------------------------------------------------------------------------------- /tools/dump-failed-block-heights/main.py: -------------------------------------------------------------------------------- 1 | import psycopg2 2 | import os 3 | import json 4 | import traceback 5 | 6 | def get_env(): 7 | ret = { 8 | "host": os.environ.get("DB_HOST", ""), 9 | "password": os.environ.get("DB_PASSWORD", ""), 10 | "user": os.environ.get("DB_USER", ""), 11 | "port": os.environ.get("DB_PORT", ""), 12 | "db_name": os.environ.get("DB_NAME", "") 13 | } 14 | 15 | if any([ret[x] == "" for x in ret]): 16 | raise Exception("Must provide env vars") 17 | 18 | return ret 19 | 20 | DUMP_FAILED_BLOCKS_QUERY = "SELECT height FROM failed_blocks ORDER BY height ASC;" 21 | 22 | if __name__ == "__main__": 23 | env = get_env() 24 | os.makedirs("./output", exist_ok=True) 25 | 26 | conn = psycopg2.connect(f"dbname={env['db_name']} user={env['user']} host={env['host']} password={env['password']} port={env['port']}") 27 | try: 28 | heights = [] 29 | with conn.cursor() as cur: 30 | cur.execute(DUMP_FAILED_BLOCKS_QUERY) 31 | for record in cur.fetchall(): 32 | heights.append(record[0]) 33 | 34 | json.dump(heights, open("output/failed_heights.json", 'w'), indent=4) 35 | 36 | except Exception as err: 37 | print(err) 38 | traceback.print_exc() 39 | finally: 40 | conn.close() 41 | -------------------------------------------------------------------------------- /tools/dump-failed-block-heights/requirements.txt: -------------------------------------------------------------------------------- 1 | psycopg2==2.9.7 2 | -------------------------------------------------------------------------------- /util/utils.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "math/big" 5 | 6 | "github.com/shopspring/decimal" 7 | ) 8 | 9 | func ToNumeric(i *big.Int) decimal.Decimal { 10 | num := decimal.NewFromBigInt(i, 0) 11 | return num 12 | } 13 | 14 | // StrNotSet will return true if the string value provided is empty 15 | func StrNotSet(value string) bool { 16 | return len(value) == 0 17 | } 18 | 19 | func RemoveDuplicatesFromUint64Slice(sliceList []uint64) []uint64 { 20 | allKeys := make(map[uint64]bool) 21 | list := []uint64{} 22 | for _, item := range sliceList { 23 | if _, value := allKeys[item]; !value { 24 | allKeys[item] = true 25 | list = append(list, item) 26 | } 27 | } 28 | return list 29 | } 30 | --------------------------------------------------------------------------------