├── .dockerignore ├── .env.example ├── .env.terraform.example ├── .github ├── ISSUE_TEMPLATE │ ├── bug.yml │ └── feature_request.yml ├── codeowners ├── pull_request_template.md └── workflows │ ├── dispatch_deploy.yml │ ├── dispatch_publish.yml │ ├── dispatch_validate.yml │ ├── event_intake.yml │ ├── event_pr.yml │ ├── event_release.yml │ ├── sub-cd.yml │ ├── sub-providers.yml │ ├── sub-validate.yml │ └── sub-yttrium-integration.yml ├── .gitignore ├── .gitmodules ├── .terraformignore ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── SUPPORTED_CHAINS.md ├── assets └── names_dictionary.txt ├── build.rs ├── deny.toml ├── docker-compose.mock-bundler.yaml ├── docker-compose.yaml ├── funding.json ├── integration ├── .prettierrc ├── balance.test.ts ├── bundler.test.ts ├── chain_orchestrator.test.ts ├── convert.test.ts ├── fungible_price.test.ts ├── generators.test.ts ├── health.test.ts ├── history.test.ts ├── identity.test.ts ├── init.ts ├── jestconfig.integration.json ├── middlewares.test.ts ├── names.test.ts ├── onramp.test.ts ├── portfolio.test.ts ├── proxy.test.ts ├── ratelimiting.test.ts ├── sessions.test.ts ├── supported-chains.test.ts └── tsconfig.json ├── justfile ├── migrations ├── 20231209213851_init-names.sql ├── 20231209213904_init-addresses.sql └── README.md ├── package.json ├── rustfmt.toml ├── src ├── analytics │ ├── account_names_info.rs │ ├── balance_lookup_info.rs │ ├── chain_abstraction_info.rs │ ├── config.rs │ ├── history_lookup_info.rs │ ├── identity_lookup_info.rs │ ├── message_info.rs │ ├── mod.rs │ └── onramp_history_lookup_info.rs ├── database │ ├── config.rs │ ├── error.rs │ ├── helpers.rs │ ├── mod.rs │ ├── types.rs │ └── utils.rs ├── env │ ├── allnodes.rs │ ├── arbitrum.rs │ ├── aurora.rs │ ├── base.rs │ ├── binance.rs │ ├── callstatic.rs │ ├── drpc.rs │ ├── dune.rs │ ├── hiro.rs │ ├── mantle.rs │ ├── mod.rs │ ├── monad.rs │ ├── moonbeam.rs │ ├── morph.rs │ ├── near.rs │ ├── odyssey.rs │ ├── onerpc.rs │ ├── pokt.rs │ ├── publicnode.rs │ ├── quicknode.rs │ ├── server.rs │ ├── solscan.rs │ ├── sui.rs │ ├── syndica.rs │ ├── therpc.rs │ ├── unichain.rs │ ├── wemix.rs │ ├── zan.rs │ ├── zerion.rs │ ├── zksync.rs │ └── zora.rs ├── error.rs ├── handlers │ ├── balance.rs │ ├── bundler.rs │ ├── chain_agnostic │ │ ├── assets.rs │ │ ├── lifi.rs │ │ ├── mod.rs │ │ ├── nonce_manager.rs │ │ ├── route.rs │ │ └── status.rs │ ├── convert │ │ ├── allowance.rs │ │ ├── approve.rs │ │ ├── gas_price.rs │ │ ├── mod.rs │ │ ├── quotes.rs │ │ ├── tokens.rs │ │ └── transaction.rs │ ├── fungible_price.rs │ ├── generators │ │ ├── mod.rs │ │ └── onrampurl.rs │ ├── health.rs │ ├── history.rs │ ├── identity.rs │ ├── metrics.rs │ ├── mod.rs │ ├── onramp │ │ ├── mod.rs │ │ ├── multi_quotes.rs │ │ ├── options.rs │ │ ├── properties.rs │ │ ├── providers.rs │ │ ├── quotes.rs │ │ └── widget.rs │ ├── portfolio.rs │ ├── profile │ │ ├── address.rs │ │ ├── attributes.rs │ │ ├── lookup.rs │ │ ├── mod.rs │ │ ├── register.rs │ │ ├── reverse.rs │ │ └── suggestions.rs │ ├── proxy.rs │ ├── self_provider.rs │ ├── sessions │ │ ├── context.rs │ │ ├── cosign.rs │ │ ├── create.rs │ │ ├── get.rs │ │ ├── list.rs │ │ ├── mod.rs │ │ └── revoke.rs │ ├── supported_chains.rs │ ├── wallet │ │ ├── call_id.rs │ │ ├── exchanges │ │ │ ├── binance.rs │ │ │ ├── coinbase.rs │ │ │ └── mod.rs │ │ ├── get_assets.rs │ │ ├── get_calls_status.rs │ │ ├── get_exchange_buy_status.rs │ │ ├── get_exchange_url.rs │ │ ├── get_exchanges.rs │ │ ├── handler.rs │ │ ├── mod.rs │ │ ├── prepare_calls.rs │ │ ├── send_prepared_calls.rs │ │ └── types.rs │ └── ws_proxy.rs ├── json_rpc │ ├── mod.rs │ └── tests.rs ├── lib.rs ├── main.rs ├── metrics.rs ├── names │ ├── mod.rs │ ├── suggestions.rs │ └── utils.rs ├── profiler │ └── mod.rs ├── project │ ├── config.rs │ ├── error.rs │ ├── metrics │ │ └── mod.rs │ ├── mod.rs │ └── storage │ │ ├── config.rs │ │ └── mod.rs ├── providers │ ├── allnodes.rs │ ├── arbitrum.rs │ ├── aurora.rs │ ├── base.rs │ ├── binance.rs │ ├── bungee.rs │ ├── callstatic.rs │ ├── coinbase.rs │ ├── drpc.rs │ ├── dune.rs │ ├── hiro.rs │ ├── mantle.rs │ ├── meld.rs │ ├── mock_alto.rs │ ├── mod.rs │ ├── monad.rs │ ├── moonbeam.rs │ ├── morph.rs │ ├── near.rs │ ├── odyssey.rs │ ├── one_inch.rs │ ├── onerpc.rs │ ├── pimlico.rs │ ├── pokt.rs │ ├── publicnode.rs │ ├── quicknode.rs │ ├── solscan.rs │ ├── sui.rs │ ├── syndica.rs │ ├── tenderly.rs │ ├── therpc.rs │ ├── unichain.rs │ ├── weights.rs │ ├── wemix.rs │ ├── zan.rs │ ├── zerion.rs │ ├── zksync.rs │ └── zora.rs ├── state.rs ├── storage │ ├── error.rs │ ├── irn │ │ └── mod.rs │ ├── mod.rs │ └── redis │ │ └── mod.rs ├── test_helpers.rs ├── utils │ ├── batch_json_rpc_request.rs │ ├── build.rs │ ├── crypto.rs │ ├── erc4337.rs │ ├── erc7677.rs │ ├── mod.rs │ ├── network.rs │ ├── permissions.rs │ ├── rate_limit.rs │ ├── sessions.rs │ ├── simple_request_json.rs │ └── token_amount.rs └── ws.rs ├── terraform ├── .terraform-docs.yml ├── .terraform.lock.hcl ├── .tflint.hcl ├── README.md ├── alerting │ ├── README.md │ ├── alarms_ecs.tf │ ├── alarms_redis.tf │ ├── context.tf │ ├── main.tf │ ├── terraform.tf │ └── variables.tf ├── context.tf ├── ecs │ ├── README.md │ ├── cluster.tf │ ├── cluster_autoscaling.tf │ ├── cluster_iam.tf │ ├── cluster_logs.tf │ ├── context.tf │ ├── dns.tf │ ├── main.tf │ ├── network.tf │ ├── outputs.tf │ ├── terraform.tf │ └── variables.tf ├── inputs.tf ├── main.tf ├── monitoring │ ├── README.md │ ├── context.tf │ ├── dashboard.jsonnet │ ├── data_sources.tf │ ├── main.tf │ ├── outputs.tf │ ├── panels │ │ ├── app │ │ │ ├── handlers_latency.libsonnet │ │ │ └── handlers_rate.libsonnet │ │ ├── balance │ │ │ ├── provider_retries.libsonnet │ │ │ ├── requests_distribution_evm.libsonnet │ │ │ └── requests_distribution_solana.libsonnet │ │ ├── chain_abstraction │ │ │ ├── gas_estimation.libsonnet │ │ │ ├── insufficient_funds.libsonnet │ │ │ ├── no_bridging.libsonnet │ │ │ ├── no_routes.libsonnet │ │ │ └── response_types_rate.libsonnet │ │ ├── ecs │ │ │ ├── availability.libsonnet │ │ │ ├── cpu.libsonnet │ │ │ └── memory.libsonnet │ │ ├── history │ │ │ ├── availability.libsonnet │ │ │ ├── latency.libsonnet │ │ │ └── requests.libsonnet │ │ ├── identity │ │ │ ├── availability.libsonnet │ │ │ ├── cache.libsonnet │ │ │ ├── latency.libsonnet │ │ │ ├── requests.libsonnet │ │ │ └── usage.libsonnet │ │ ├── irn │ │ │ └── latency.libsonnet │ │ ├── lb │ │ │ ├── active_connections.libsonnet │ │ │ ├── error_4xx.libsonnet │ │ │ ├── error_5xx.libsonnet │ │ │ ├── healthy_hosts.libsonnet │ │ │ ├── latency.libsonnet │ │ │ └── requests.libsonnet │ │ ├── names │ │ │ └── registered.libsonnet │ │ ├── non_rpc │ │ │ ├── cache_latency.libsonnet │ │ │ └── endpoints_latency.libsonnet │ │ ├── panels.libsonnet │ │ ├── projects │ │ │ ├── cache_latency.libsonnet │ │ │ ├── fetch_latency.libsonnet │ │ │ ├── quota_limited_projects.libsonnet │ │ │ └── rejected_projects.libsonnet │ │ ├── proxy │ │ │ ├── calls.libsonnet │ │ │ ├── chains_unavailability.libsonnet │ │ │ ├── errors_non_provider.libsonnet │ │ │ ├── errors_provider.libsonnet │ │ │ ├── http_codes.libsonnet │ │ │ ├── latency.libsonnet │ │ │ ├── rpc_retries.libsonnet │ │ │ └── websocket_connections.libsonnet │ │ ├── rate_limiting │ │ │ ├── counter.libsonnet │ │ │ ├── latency.libsonnet │ │ │ └── rate_limited.libsonnet │ │ ├── status │ │ │ └── provider.libsonnet │ │ ├── swaps │ │ │ └── availability.libsonnet │ │ ├── usage │ │ │ └── provider.libsonnet │ │ └── weights │ │ │ └── provider.libsonnet │ ├── terraform.tf │ └── variables.tf ├── outputs.tf ├── postgres │ ├── context.tf │ ├── main.tf │ ├── outputs.tf │ ├── password.tf │ ├── terraform.tf │ └── variables.tf ├── providers.tf ├── redis │ ├── README.md │ ├── context.tf │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tf │ └── variables.tf ├── res_alerting.tf ├── res_db.tf ├── res_dns.tf ├── res_ecs.tf ├── res_monitoring.tf ├── res_network.tf ├── res_redis.tf ├── terraform.tf └── variables.tf ├── tests ├── context │ ├── mod.rs │ └── server.rs ├── functional │ ├── README.md │ ├── bundler.rs │ ├── database.rs │ ├── http │ │ ├── allnodes.rs │ │ ├── arbitrum.rs │ │ ├── aurora.rs │ │ ├── base.rs │ │ ├── binance.rs │ │ ├── drpc.rs │ │ ├── mantle.rs │ │ ├── mod.rs │ │ ├── monad.rs │ │ ├── moonbeam.rs │ │ ├── morph.rs │ │ ├── near.rs │ │ ├── odyssey.rs │ │ ├── pokt.rs │ │ ├── publicnode.rs │ │ ├── quicknode.rs │ │ ├── sui.rs │ │ ├── syndica.rs │ │ ├── unichain.rs │ │ ├── wemix.rs │ │ ├── zksync.rs │ │ └── zora.rs │ ├── metrics.rs │ ├── mod.rs │ └── websocket │ │ ├── mod.rs │ │ └── zora.rs ├── integration.rs └── utils │ └── mod.rs └── yarn.lock /.dockerignore: -------------------------------------------------------------------------------- 1 | * 2 | !crate 3 | !src 4 | !build.rs 5 | !xtask 6 | !Cargo.* 7 | !.git 8 | !bin 9 | !migrations 10 | !assets 11 | !irn 12 | -------------------------------------------------------------------------------- /.env.terraform.example: -------------------------------------------------------------------------------- 1 | export AWS_ACCESS_KEY_ID="" 2 | export AWS_SECRET_ACCESS_KEY="" 3 | export AWS_REGION="eu-central-1" 4 | 5 | export TF_VAR_pokt_project_id="" 6 | export TF_VAR_quicknode_api_tokens="" 7 | export TF_VAR_coinbase_api_key="" 8 | export TF_VAR_coinbase_app_id="" 9 | export TF_VAR_zerion_api_key="" 10 | export TF_VAR_one_inch_api_key="" 11 | export TF_VAR_pimlico_api_key="" 12 | export TF_VAR_solscan_api_v2_token="" 13 | export TF_VAR_bungee_api_key="" 14 | export TF_VAR_tenderly_api_key="" 15 | export TF_VAR_tenderly_account_id="" 16 | export TF_VAR_tenderly_project_id="" 17 | export TF_VAR_dune_api_key="" 18 | export TF_VAR_grafana_endpoint=$(aws grafana list-workspaces | jq -r '.workspaces[] | select( .tags.Env == "prod") | select( .tags.Name == "grafana-9") | .endpoint') 19 | export TF_VAR_registry_api_auth_token="" 20 | export TF_VAR_debug_secret="" 21 | 22 | export GRAFANA_AUTH="Grab one at https://$TF_VAR_grafana_endpoint" 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Feature Request 2 | description: Request a new feature be added 3 | title: 'feat: ' 4 | labels: 5 | - enhancement 6 | body: 7 | - type: markdown 8 | attributes: 9 | value: | 10 | Thanks for taking the time to suggest a new feature for Bouncer! ✨ 11 | - type: checkboxes 12 | attributes: 13 | label: Is there an existing issue for this? 14 | description: Please search to see if an issue already exists for the feature you would like. 15 | options: 16 | - label: I have searched the existing issues 17 | required: true 18 | - type: textarea 19 | attributes: 20 | label: Current Behavior 21 | description: A concise description of what you're experiencing. 22 | validations: 23 | required: true 24 | - type: textarea 25 | attributes: 26 | label: Requested Behavior 27 | description: A concise description of what you expected to happen. 28 | validations: 29 | required: true 30 | - type: textarea 31 | attributes: 32 | label: Anything else? 33 | description: | 34 | Links? References? Anything that will give us more context about the issue you are encountering! 35 | 36 | Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. 37 | validations: 38 | required: false 39 | -------------------------------------------------------------------------------- /.github/codeowners: -------------------------------------------------------------------------------- 1 | * @chris13524 2 | * @geekbrother 3 | * @xav 4 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | 8 | 9 | Resolves # (issue) 10 | 11 | ## How Has This Been Tested? 12 | 13 | 18 | 19 | 20 | 21 | ## Due Diligence 22 | 23 | * [ ] Breaking change 24 | * [ ] Requires a documentation update 25 | * [ ] Requires a e2e/integration test update 26 | -------------------------------------------------------------------------------- /.github/workflows/dispatch_publish.yml: -------------------------------------------------------------------------------- 1 | name: ⚙️ Publish 2 | run-name: "Publish: ${{ github.sha }}${{ inputs.deploy-to != 'none' && format(' ❱❱ {0}', inputs.deploy-to) || ''}}" 3 | 4 | on: 5 | workflow_dispatch: 6 | inputs: 7 | deploy-to: 8 | description: "Deploy published image to" 9 | type: choice 10 | options: 11 | - none 12 | - staging 13 | - prod 14 | default: staging 15 | required: true 16 | 17 | concurrency: deploy 18 | 19 | permissions: 20 | contents: write 21 | checks: write 22 | id-token: write 23 | packages: write 24 | 25 | jobs: 26 | ci: 27 | name: CI 28 | uses: WalletConnect/ci_workflows/.github/workflows/ci.yml@0.2.18 29 | secrets: inherit 30 | with: 31 | check-infra: false 32 | check-app: true 33 | check-udeps: false 34 | 35 | release: 36 | name: Release 37 | uses: WalletConnect/ci_workflows/.github/workflows/release.yml@0.2.18 38 | secrets: inherit 39 | with: 40 | infra-changed: false 41 | app-changed: true 42 | 43 | cd: 44 | name: CD 45 | needs: [ release ] 46 | if: ${{ inputs.deploy-to == 'staging' || inputs.deploy-to == 'prod' }} 47 | secrets: inherit 48 | uses: ./.github/workflows/sub-cd.yml 49 | with: 50 | deploy-infra: false 51 | deploy-app: true 52 | deploy-prod: ${{ inputs.deploy-to == 'prod' }} 53 | version: ${{ needs.release.outputs.version }} 54 | -------------------------------------------------------------------------------- /.github/workflows/dispatch_validate.yml: -------------------------------------------------------------------------------- 1 | name: ⚙️ Validate 2 | run-name: "Validate: ${{ github.sha }}${{ (!inputs.check-infra && !inputs.check-app) && '👀 validate nothing' || ''}}${{ inputs.check-infra && ' ✓  infra' || '' }}${{ inputs.check-app && ' ✓  app' || '' }}" 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | check-infra: 7 | description: "Validate Infra" 8 | default: true 9 | required: true 10 | type: boolean 11 | check-app: 12 | description: "Validate App" 13 | default: true 14 | required: true 15 | type: boolean 16 | check-staging: 17 | description: "Validate Staging" 18 | default: false 19 | required: true 20 | type: boolean 21 | check-prod: 22 | description: "Validate Prod" 23 | default: false 24 | required: true 25 | type: boolean 26 | 27 | permissions: 28 | contents: read 29 | checks: write 30 | id-token: write 31 | 32 | jobs: 33 | ci: 34 | name: CI 35 | uses: WalletConnect/ci_workflows/.github/workflows/ci.yml@0.2.18 36 | secrets: inherit 37 | with: 38 | check-infra: ${{ inputs.check-infra }} 39 | check-app: ${{ inputs.check-app }} 40 | check-udeps: false 41 | 42 | validate-staging: 43 | name: Validate - Staging 44 | if: ${{ inputs.check-staging }} 45 | uses: ./.github/workflows/sub-validate.yml 46 | secrets: inherit 47 | with: 48 | stage: staging 49 | stage-url: https://staging.${{ vars.SUBDOMAIN_NAME }}.walletconnect.org 50 | 51 | validate-prod: 52 | name: Validate - Prod 53 | if: ${{ inputs.check-prod }} 54 | uses: ./.github/workflows/sub-validate.yml 55 | secrets: inherit 56 | with: 57 | stage: prod 58 | stage-url: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.org 59 | -------------------------------------------------------------------------------- /.github/workflows/event_intake.yml: -------------------------------------------------------------------------------- 1 | # This workflow moves issues to the Project board when they receive the "accepted" label 2 | # When WalletConnect Org members create issues they are automatically "accepted". 3 | # Otherwise, they need to manually receive that label during intake. 4 | name: ⚡ Intake 5 | 6 | on: 7 | issues: 8 | types: [ opened, labeled ] 9 | 10 | jobs: 11 | add-to-project: 12 | name: Add issue to board 13 | if: github.event_name == 'issues' && github.event.action == 'labeled' && github.event.label.name == 'accepted' 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/add-to-project@v0.1.0 17 | with: 18 | project-url: https://github.com/orgs/WalletConnect/projects/20 19 | github-token: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} 20 | labeled: accepted 21 | label-operator: OR 22 | 23 | auto-promote: 24 | name: auto-promote 25 | if: github.event.action == 'opened' 26 | runs-on: ubuntu-latest 27 | steps: 28 | - name: Check Core Team membership 29 | uses: tspascoal/get-user-teams-membership@v1 30 | id: is-core-team 31 | with: 32 | username: ${{ github.event_name != 'pull_request' && github.event.issue.user.login || github.event.sender.login }} 33 | team: "Core Team" 34 | GITHUB_TOKEN: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} 35 | - name: Print result 36 | env: 37 | CREATOR: ${{ github.event_name != 'pull_request' && github.event.issue.user.login || github.event.sender.login }} 38 | IS_TEAM_MEMBER: ${{ steps.is-core-team.outputs.isTeamMember }} 39 | run: echo "$CREATOR (Core Team Member $IS_TEAM_MEMBER) created this issue/PR" 40 | - name: Label issues 41 | if: ${{ steps.is-core-team.outputs.isTeamMember == 'true' }} 42 | uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 43 | with: 44 | add-labels: "accepted" 45 | repo-token: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} 46 | -------------------------------------------------------------------------------- /.github/workflows/event_release.yml: -------------------------------------------------------------------------------- 1 | name: ⚡ Release 2 | run-name: 'Release / ${{ github.event.head_commit.message }}' 3 | 4 | on: 5 | push: 6 | branches: 7 | - main 8 | - master 9 | paths-ignore: 10 | - 'docs/**' 11 | - 'README.md' 12 | - 'CHANGELOG.md' 13 | - 'LICENSE' 14 | - 'justfile' 15 | - 'rustfmt.toml' 16 | - '.editorconfig' 17 | - '.pre-commit-config.yaml' 18 | - '.terraformignore' 19 | - '.env.example' 20 | 21 | concurrency: deploy 22 | 23 | permissions: 24 | contents: write 25 | id-token: write 26 | packages: write 27 | checks: write 28 | 29 | jobs: 30 | paths_filter: 31 | name: Paths Filter 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@v4 35 | - uses: WalletConnect/actions/github/paths-filter/@2.2.1 36 | id: filter 37 | outputs: 38 | infra: ${{ steps.filter.outputs.infra }} 39 | app: ${{ steps.filter.outputs.app }} 40 | 41 | release: 42 | name: Release 43 | needs: [ paths_filter ] 44 | uses: WalletConnect/ci_workflows/.github/workflows/release.yml@0.2.18 45 | secrets: inherit 46 | with: 47 | task-name: ${{ vars.TASK_NAME }} 48 | infra-changed: ${{ needs.paths_filter.outputs.infra == 'true' }} 49 | app-changed: ${{ needs.paths_filter.outputs.app == 'true' }} 50 | 51 | cd: 52 | name: CD 53 | needs: [ paths_filter, release ] 54 | secrets: inherit 55 | uses: ./.github/workflows/sub-cd.yml 56 | with: 57 | deploy-infra: ${{ needs.paths_filter.outputs.infra == 'true' }} 58 | deploy-app: ${{ needs.paths_filter.outputs.app == 'true' }} 59 | deploy-prod: true 60 | version: ${{ needs.release.outputs.version }} 61 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | #--------------------------------------- 2 | # General 3 | 4 | .DS_Store 5 | .AppleDouble 6 | .LSOverride 7 | [Dd]esktop.ini 8 | 9 | #--------------------------------------- 10 | # Environment 11 | 12 | .direnv 13 | .envrc 14 | .env 15 | .env.terraform 16 | .actrc 17 | 18 | #--------------------------------------- 19 | # Editors 20 | 21 | # JetBrains 22 | .idea/ 23 | out/ 24 | .fleet 25 | *.iws 26 | 27 | # VSCode 28 | .vscode/ 29 | .history/ 30 | *.code-workspace 31 | 32 | #--------------------------------------- 33 | # Rust/Cargo 34 | 35 | # Generated by Cargo, will have compiled files and executables 36 | debug/ 37 | target/ 38 | .cargo/ 39 | 40 | # Backup files generated by rustfmt 41 | **/*.rs.bk 42 | 43 | # MSVC Windows builds of rustc generate these, which store debugging information 44 | *.pdb 45 | 46 | #--------------------------------------- 47 | # Terraform 48 | 49 | # Local .terraform directories 50 | **/.terraform/* 51 | 52 | # .tfstate files 53 | *.tfstate 54 | *.tfstate.* 55 | 56 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 57 | # password, private keys, and other secrets. These should not be part of version 58 | # control as they are data points which are potentially sensitive and subject 59 | # to change depending on the environment. 60 | *.tfvars 61 | *.tfvars.json 62 | 63 | # Ignore override files as they are usually used to override resources locally and so are not checked in 64 | override.tf 65 | override.tf.json 66 | *_override.tf 67 | *_override.tf.json 68 | 69 | # Include override files you do wish to add to version control using negated pattern 70 | # 71 | # !example_override.tf 72 | 73 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 74 | *tfplan* 75 | 76 | # Ignore CLI configuration files 77 | .terraformrc 78 | terraform.rc 79 | 80 | #--------------------------------------- 81 | # NPM/Yarn 82 | 83 | node_modules/ 84 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "terraform/monitoring/grafonnet-lib"] 2 | path = terraform/monitoring/grafonnet-lib 3 | url = git@github.com:WalletConnect/grafonnet-lib.git 4 | [submodule "irn"] 5 | path = irn 6 | url = git@github.com:WalletConnectFoundation/irn.git 7 | -------------------------------------------------------------------------------- /.terraformignore: -------------------------------------------------------------------------------- 1 | #--------------------------------------- 2 | # General 3 | .DS_Store 4 | .AppleDouble 5 | .LSOverride 6 | [Dd]esktop.ini 7 | .gitignore 8 | .gitmodules 9 | .pre-commit-config.yaml 10 | CHANGELOG.md 11 | LICENSE 12 | README.md 13 | .github/ 14 | ops/ 15 | Dockerfile 16 | justfile 17 | crash.log 18 | 19 | #--------------------------------------- 20 | # Rust/Cargo 21 | 22 | # Generated by Cargo, will have compiled files and executables 23 | src/ 24 | debug/ 25 | target/ 26 | build.rs 27 | cargo.lock 28 | cargo.toml 29 | rustfmt.toml 30 | 31 | # Backup files generated by rustfmt 32 | **/*.rs.bk 33 | 34 | # MSVC Windows builds of rustc generate these, which store debugging information 35 | *.pdb 36 | 37 | #--------------------------------------- 38 | # Environment 39 | .env.example 40 | .direnv 41 | .envrc 42 | 43 | #--------------------------------------- 44 | # JetBrains 45 | .idea/ 46 | out/ 47 | .fleet 48 | *.iws 49 | 50 | #--------------------------------------- 51 | # VSCode 52 | .vscode/ 53 | .history/ 54 | *.code-workspace 55 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 reown inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use vergen::{vergen, Config}; 2 | 3 | fn main() -> Result<(), Box> { 4 | vergen(Config::default())?; 5 | 6 | Ok(()) 7 | } 8 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | [licenses] 2 | unused-allowed-license = "deny" 3 | allow = [ 4 | "Apache-2.0", 5 | "MIT", 6 | "CC0-1.0", 7 | "Unlicense", 8 | "Unicode-3.0", 9 | "BSD-3-Clause", 10 | "BSD-2-Clause", 11 | "BSL-1.0", 12 | "0BSD", 13 | "ISC", 14 | "MPL-2.0", 15 | "Zlib", 16 | ] 17 | 18 | exceptions = [{ name = "unicode-ident", allow = ["Unicode-DFS-2016"] }] 19 | 20 | [licenses.private] 21 | ignore = true 22 | 23 | # TODO We should be able to remove `ignore-sources` once we add `publish = false` to all the crates sourced from here. 24 | ignore-sources = [ 25 | "https://github.com/WalletConnect/utils-rs.git", 26 | "https://github.com/WalletConnect/WalletConnectRust.git" 27 | ] 28 | 29 | [[licenses.clarify]] 30 | name = "ring" 31 | expression = "ISC" 32 | license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] 33 | -------------------------------------------------------------------------------- /docker-compose.mock-bundler.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | anvil: 3 | image: ghcr.io/foundry-rs/foundry:stable 4 | restart: unless-stopped 5 | ports: ["8545:8545"] 6 | entrypoint: [ "anvil", "--fork-url", "https://sepolia.gateway.tenderly.co", "--host", "0.0.0.0", "--gas-price", "1" ] 7 | platform: linux/amd64 8 | 9 | mock-paymaster: 10 | image: ghcr.io/pimlicolabs/mock-verifying-paymaster@sha256:588629c066e5060635ac9756679b8d9049eeb412003cd96a518944dccd0b3b01 11 | restart: unless-stopped 12 | ports: ["3000:3000"] 13 | environment: 14 | - ALTO_RPC=http://alto:4337 15 | - ANVIL_RPC=http://anvil:8545 16 | 17 | alto: 18 | image: ghcr.io/pimlicolabs/mock-alto-bundler:main 19 | restart: unless-stopped 20 | ports: ["4337:4337"] 21 | environment: 22 | - ANVIL_RPC=http://anvil:8545 23 | - SKIP_DEPLOYMENTS=true 24 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | redis: 4 | image: redis:7.0 5 | ports: 6 | - "6379:6379" 7 | volumes: 8 | - redis:/data 9 | 10 | postgres: 11 | image: postgres:16 12 | environment: 13 | POSTGRES_HOST_AUTH_METHOD: trust 14 | ports: 15 | - "5432:5432" 16 | 17 | proxy: 18 | platform: linux/x86_64 19 | build: 20 | context: . 21 | dockerfile: Dockerfile 22 | ports: 23 | - "3000:3000" 24 | environment: 25 | - RPC_PROXY_LOG_LEVEL=DEBUG 26 | - RPC_PROXY_HOST=0.0.0.0 27 | - RPC_PROXY_PORT=3000 28 | - RPC_PROXY_POKT_PROJECT_ID=${RPC_PROXY_POKT_PROJECT_ID} 29 | - RPC_PROXY_QUICKNODE_API_TOKENS=${RPC_PROXY_QUICKNODE_API_TOKENS} 30 | - RPC_PROXY_ZERION_API_KEY=${RPC_PROXY_ZERION_API_KEY} 31 | - RPC_PROXY_REGISTRY_API_URL=https://registry-prod-cf.walletconnect.org 32 | - RPC_PROXY_REGISTRY_API_AUTH_TOKEN=${RPC_PROXY_REGISTRY_API_AUTH_TOKEN} 33 | - RPC_PROXY_STORAGE_PROJECT_DATA_REDIS_ADDR_READ=redis://redis:6379/0 34 | - RPC_PROXY_STORAGE_PROJECT_DATA_REDIS_ADDR_WRITE=redis://redis:6379/0 35 | - RPC_PROXY_STORAGE_IDENTITY_CACHE_REDIS_ADDR_READ=redis://redis:6379/1 36 | - RPC_PROXY_STORAGE_IDENTITY_CACHE_REDIS_ADDR_WRITE=redis://redis:6379/1 37 | - RPC_PROXY_POSTGRES_URI=postgres://postgres@postgres:5432/postgres 38 | cap_add: 39 | - SYS_PTRACE # Enabling GDB to attach to a running process 40 | depends_on: 41 | - redis 42 | - postgres 43 | 44 | volumes: 45 | redis: 46 | -------------------------------------------------------------------------------- /funding.json: -------------------------------------------------------------------------------- 1 | { 2 | "opRetro": { 3 | "projectId": "0xa9155fd8bb7b8ef244047ad8578cabe91326a31ecb803485cc99147818fc30ca" 4 | } 5 | } 6 | 7 | -------------------------------------------------------------------------------- /integration/.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "semi": false, 4 | "trailingComma": "all", 5 | "tabWidth": 2, 6 | "printWidth": 80 7 | } 8 | -------------------------------------------------------------------------------- /integration/bundler.test.ts: -------------------------------------------------------------------------------- 1 | import { getTestSetup } from './init'; 2 | 3 | describe('Bundler operations', () => { 4 | const { baseUrl, projectId, httpClient } = getTestSetup(); 5 | 6 | const sepoliaChainId = 'eip155:11155111' 7 | const mainnetChainId = 'eip155:1' 8 | const method = 'eth_getUserOperationReceipt' 9 | const successOperationTxHash = '0x772b10c68cb2470259be889b97e87618a4d8fc2b21767503724a9842bc83b5de' 10 | 11 | it('unsupported method', async () => { 12 | let json_rpc = { 13 | jsonrpc: '2.0', 14 | method: 'eth_chainId', 15 | params: [successOperationTxHash], 16 | id: 1 17 | } 18 | let resp: any = await httpClient.post( 19 | `${baseUrl}/v1/bundler?projectId=${projectId}&chainId=${sepoliaChainId}`, 20 | json_rpc 21 | ) 22 | expect(resp.status).toBe(422) 23 | }) 24 | 25 | it('no receipt', async () => { 26 | let json_rpc = { 27 | jsonrpc: '2.0', 28 | method, 29 | params: [successOperationTxHash], 30 | id: 1 31 | } 32 | let resp: any = await httpClient.post( 33 | `${baseUrl}/v1/bundler?projectId=${projectId}&chainId=${mainnetChainId}`, 34 | json_rpc 35 | ) 36 | expect(resp.status).toBe(200) 37 | expect(resp.data.result).toBeNull() 38 | }) 39 | 40 | // Temporary disabling until fix the correct successOperationTxHash 41 | xit('successful receipt', async () => { 42 | let json_rpc = { 43 | jsonrpc: '2.0', 44 | method, 45 | params: [successOperationTxHash], 46 | id: 1 47 | } 48 | let resp: any = await httpClient.post( 49 | `${baseUrl}/v1/bundler?projectId=${projectId}&chainId=${sepoliaChainId}`, 50 | json_rpc 51 | ) 52 | expect(resp.status).toBe(200) 53 | expect(resp.data.result.success).toBe(true) 54 | }) 55 | }) 56 | -------------------------------------------------------------------------------- /integration/generators.test.ts: -------------------------------------------------------------------------------- 1 | import { getTestSetup } from './init'; 2 | 3 | describe('Generators', () => { 4 | const { baseUrl, projectId, httpClient } = getTestSetup(); 5 | 6 | it('onramp Pay SDK URL', async () => { 7 | const expected_host = 'https://pay.coinbase.com/buy/select-asset'; 8 | const address = '0x1234567890123456789012345678901234567890'; 9 | const partnerUserId = 'someUserID'; 10 | const payload = { 11 | partnerUserId, 12 | destinationWallets:[{ address }], 13 | }; 14 | let resp: any = await httpClient.post( 15 | `${baseUrl}/v1/generators/onrampurl?projectId=${projectId}`, 16 | payload 17 | ) 18 | expect(resp.status).toBe(200) 19 | expect(typeof resp.data).toBe('object') 20 | expect(typeof resp.data.url).toBe('string') 21 | expect(resp.data.url).toContain(expected_host) 22 | expect(resp.data.url).toContain(address) 23 | expect(resp.data.url).toContain(partnerUserId) 24 | }) 25 | it('onramp Pay SDK URL wrong payload', async () => { 26 | const address = '0x1234567890123456789012345678901234567890'; 27 | const partnerUserId = 'someUserID'; 28 | // Creating the wrong payload 29 | const payload = { 30 | partner: partnerUserId, 31 | someWallets:[{ address }], 32 | }; 33 | let resp: any = await httpClient.post( 34 | `${baseUrl}/v1/generators/onrampurl?projectId=${projectId}`, 35 | payload 36 | ) 37 | expect(resp.status).toBe(400) 38 | }) 39 | }) 40 | -------------------------------------------------------------------------------- /integration/health.test.ts: -------------------------------------------------------------------------------- 1 | import { getTestSetup } from './init'; 2 | 3 | describe('Health', () => { 4 | it('is healthy', async () => { 5 | const { baseUrl, projectId, httpClient } = getTestSetup(); 6 | const resp: any = await httpClient.get(`${baseUrl}/health`) 7 | 8 | expect(resp.status).toBe(200) 9 | expect(resp.data).toContain('OK v') 10 | expect(resp.data).toContain('hash:') 11 | expect(resp.data).toContain('features:') 12 | expect(resp.data).toContain('uptime:') 13 | }) 14 | }) 15 | -------------------------------------------------------------------------------- /integration/init.ts: -------------------------------------------------------------------------------- 1 | import axios from 'axios' 2 | 3 | interface TestSetup { 4 | baseUrl: string; 5 | projectId: string; 6 | httpClient: any; 7 | } 8 | 9 | export const getTestSetup = (): TestSetup => { 10 | const baseUrl = process.env.RPC_URL; 11 | if (!baseUrl) { 12 | throw new Error('RPC_URL environment variable not set'); 13 | } 14 | const projectId = process.env.PROJECT_ID; 15 | if (!projectId) { 16 | throw new Error('PROJECT_ID environment variable not set'); 17 | } 18 | const httpClient = axios.create({ 19 | validateStatus: (_status) => true, 20 | }) 21 | 22 | return { baseUrl, projectId, httpClient }; 23 | }; 24 | -------------------------------------------------------------------------------- /integration/jestconfig.integration.json: -------------------------------------------------------------------------------- 1 | { 2 | "setupFilesAfterEnv": [], 3 | "transform": { 4 | "^.+\\.(t|j)sx?$": "ts-jest" 5 | }, 6 | "testRegex": "./.*\\.test\\.ts$", 7 | "collectCoverageFrom": ["src/**/*.{ts,js}"], 8 | "testTimeout": 30000 9 | } 10 | -------------------------------------------------------------------------------- /integration/middlewares.test.ts: -------------------------------------------------------------------------------- 1 | import { getTestSetup } from './init'; 2 | 3 | describe('Middlewares', () => { 4 | const { baseUrl, projectId, httpClient } = getTestSetup(); 5 | 6 | it('OK response should contain x-request-id header', async () => { 7 | let resp: any = await httpClient.get( 8 | `${baseUrl}/v1/account/0x2aae531a81461f029cd55cb46703211c9227ba05/history?projectId=${projectId}`, 9 | ) 10 | expect(resp.headers).toBeDefined(); 11 | expect(resp.status).toBe(200); 12 | // Check if the header value is a valid UUIDv4 13 | const uuidv4Pattern = /^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i; 14 | expect(resp.headers['x-request-id']).toMatch(uuidv4Pattern); 15 | }) 16 | it('Error response should contain x-request-id header', async () => { 17 | // Wrong address request 18 | let resp: any = await httpClient.get( 19 | `${baseUrl}/v1/account/0Ff3ea39310011333095CFCcCc7c4Ad74034CABA63/history?projectId=${projectId}`, 20 | ) 21 | expect(resp.headers).toBeDefined(); 22 | expect(resp.status).toBe(400); 23 | // Check if the header value is a valid UUIDv4 24 | const uuidv4Pattern = /^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i; 25 | expect(resp.headers['x-request-id']).toMatch(uuidv4Pattern); 26 | }) 27 | }) 28 | -------------------------------------------------------------------------------- /integration/portfolio.test.ts: -------------------------------------------------------------------------------- 1 | import { getTestSetup } from './init'; 2 | 3 | describe('Portfolio', () => { 4 | const { baseUrl, projectId, httpClient } = getTestSetup(); 5 | 6 | it('finds portfolio items', async () => { 7 | let resp: any = await httpClient.get( 8 | `${baseUrl}/v1/account/0x2aae531a81461f029cd55cb46703211c9227ba05/portfolio?projectId=${projectId}`, 9 | ) 10 | expect(resp.status).toBe(200) 11 | const first = resp.data.data[0] 12 | expect(first.id).toBeDefined() 13 | expect(first.name).toBeDefined() 14 | expect(first.symbol).toBeDefined() 15 | }) 16 | }) 17 | -------------------------------------------------------------------------------- /integration/proxy.test.ts: -------------------------------------------------------------------------------- 1 | import { getTestSetup } from './init'; 2 | 3 | describe('Proxy', () => { 4 | const { baseUrl, projectId, httpClient } = getTestSetup(); 5 | 6 | it('Exact provider request', async () => { 7 | const providerId = 'Binance'; 8 | const chainId = "eip155:56"; 9 | const payload = { 10 | jsonrpc: "2.0", 11 | method: "eth_chainId", 12 | params: [], 13 | id: 1, 14 | }; 15 | 16 | // Allowed projectID 17 | // Only allowed projectID can make this type of request 18 | let resp: any = await httpClient.post( 19 | `${baseUrl}/v1?chainId=${chainId}&projectId=${projectId}&providerId=${providerId}`, 20 | payload 21 | ) 22 | expect(resp.status).toBe(200) 23 | expect(typeof resp.data).toBe('object') 24 | 25 | // Not allowed projectID for this request type 26 | const notAllowedProjectId = 'someprojectid'; 27 | resp = await httpClient.post( 28 | `${baseUrl}/v1?chainId=${chainId}&projectId=${notAllowedProjectId}&providerId=${providerId}`, 29 | payload 30 | ) 31 | expect(resp.status).toBe(401) 32 | }) 33 | }) 34 | -------------------------------------------------------------------------------- /integration/supported-chains.test.ts: -------------------------------------------------------------------------------- 1 | import { getTestSetup } from './init'; 2 | 3 | describe('Supported chains', () => { 4 | const { baseUrl, httpClient } = getTestSetup(); 5 | 6 | it('Returns Ethereum Mainnet', async () => { 7 | const resp = await httpClient.get(`${baseUrl}/v1/supported-chains`) 8 | expect(resp.status).toBe(200) 9 | expect(resp.data.http).toContain('eip155:1') 10 | expect(resp.data.http).toContain('eip155:8453') 11 | expect(resp.data.ws).toContain('eip155:1') 12 | expect(resp.data.ws).not.toContain('eip155:8453') 13 | }) 14 | }) 15 | -------------------------------------------------------------------------------- /integration/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "esModuleInterop": true 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /migrations/20231209213851_init-names.sql: -------------------------------------------------------------------------------- 1 | -- Creating the hstore extension for the attributes column 2 | CREATE EXTENSION hstore; 3 | 4 | -- Initializing the names table 5 | CREATE TABLE names ( 6 | name VARCHAR(255) PRIMARY KEY, 7 | registered_at TIMESTAMPTZ NOT NULL DEFAULT now(), 8 | updated_at TIMESTAMPTZ NOT NULL DEFAULT now(), 9 | 10 | -- We are using the hstore as a key-value for the extensible attributes list 11 | attributes hstore, 12 | 13 | -- Check for the standartized name format 14 | CONSTRAINT ens_name_standard CHECK (name ~ '^[a-z0-9.-]*$') 15 | ); 16 | -------------------------------------------------------------------------------- /migrations/20231209213904_init-addresses.sql: -------------------------------------------------------------------------------- 1 | -- List of supported blockchain namespaces 2 | CREATE TYPE namespaces AS ENUM ( 3 | 'eip155' -- Ethereum 4 | ); 5 | 6 | -- Initializing the addresses table 7 | CREATE TABLE addresses ( 8 | -- Breakdown of the CAP-10 address format into namespace:chain_id:address 9 | namespace namespaces, 10 | /* 11 | chain_id can represent a chain id e.g. (Cosmos and cosmoshub-3 chain): 12 | cosmos:cosmoshub-3:cosmos1t2uflqwqe0fsj0shcfkrvpukewcw40yjj6hdc0 13 | chain_id can be empty e.g. (Litecoin mainnet): 14 | bip122:12a765e31ffd4059bada1e25190f6e98 15 | */ 16 | chain_id VARCHAR(255) NOT NULL, 17 | address VARCHAR(255) NOT NULL, 18 | 19 | name VARCHAR(255) REFERENCES names (name) ON DELETE CASCADE, 20 | created_at TIMESTAMPTZ NOT NULL DEFAULT now(), 21 | 22 | PRIMARY KEY (name, namespace, chain_id, address) 23 | ); 24 | 25 | -- Creating indexes for the address lookups 26 | CREATE INDEX index_cap_10_format_address 27 | ON addresses (namespace, chain_id, address); 28 | CREATE INDEX index_namespace_address 29 | ON addresses (namespace, address); 30 | CREATE INDEX index_address 31 | ON addresses (address); 32 | CREATE INDEX index_name 33 | ON addresses (name); 34 | -------------------------------------------------------------------------------- /migrations/README.md: -------------------------------------------------------------------------------- 1 | # Migrations 2 | 3 | This folder contains SQL migration scripts and they are automatically run on start-up. 4 | 5 | ## New Migration 6 | 7 | To create new migration file sqlx-cli must be installed: 8 | 9 | ``` 10 | cargo install sqlx-cli 11 | ``` 12 | 13 | Create a new migration with the `name`: 14 | 15 | ``` 16 | sqlx migrate add 17 | ``` 18 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "blockchain-api-integration-tests", 3 | "version": "1.0.0", 4 | "description": "Integration tests for blockchain-api", 5 | "scripts": { 6 | "integration": "jest --config integration/jestconfig.integration.json --verbose", 7 | "format": "prettier --config integration/.prettierrc --write '*.{json,js}' 'integration/**/*.{js,ts}' '.github/**/*.{yml,yaml}'", 8 | "lint": "eslint --max-warnings=0 integration && prettier --config integration/.prettierrc --check '*.{json,js}' 'integration/**/*.{js,ts}'" 9 | }, 10 | "author": "reown inc. ", 11 | "license": "Apache-2.0", 12 | "eslintConfig": { 13 | "root": true, 14 | "extends": [ 15 | "typescript", 16 | "prettier" 17 | ] 18 | }, 19 | "devDependencies": { 20 | "@types/jest": "^26.0.23", 21 | "@typescript-eslint/eslint-plugin": "^4.16.1", 22 | "@typescript-eslint/parser": "^4.16.1", 23 | "axios": "^0.27.2", 24 | "eslint": "^7.21.0", 25 | "eslint-config-prettier": "^8.1.0", 26 | "eslint-config-typescript": "^3.0.0", 27 | "ethers": "^6.13", 28 | "jest": "^27.0.1", 29 | "json-canonicalize": "^1.0.6", 30 | "keccak256": "^1.0.6", 31 | "prettier": "^2.3.0", 32 | "ts-jest": "^27.0.1", 33 | "ts-loader": "^9.2.2", 34 | "typescript": "^4.3.2" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | fn_single_line = false 3 | format_code_in_doc_comments = true 4 | format_strings = true 5 | imports_layout = "HorizontalVertical" 6 | imports_granularity = "One" 7 | normalize_comments = true 8 | normalize_doc_attributes = true 9 | reorder_imports = true 10 | reorder_impl_items = true 11 | group_imports = "StdExternalCrate" 12 | use_try_shorthand = true 13 | wrap_comments = true 14 | overflow_delimited_expr = true 15 | remove_nested_parens = true 16 | reorder_modules = true 17 | unstable_features = true 18 | use_field_init_shorthand = true 19 | -------------------------------------------------------------------------------- /src/analytics/account_names_info.rs: -------------------------------------------------------------------------------- 1 | use {parquet_derive::ParquetRecordWriter, serde::Serialize, std::sync::Arc}; 2 | 3 | #[derive(Debug, Clone, Serialize, ParquetRecordWriter)] 4 | #[serde(rename_all = "camelCase")] 5 | pub struct AccountNameRegistration { 6 | pub timestamp: chrono::NaiveDateTime, 7 | 8 | pub name: String, 9 | pub owner_address: String, 10 | pub chain_id: String, 11 | 12 | pub origin: Option, 13 | pub region: Option, 14 | pub country: Option>, 15 | pub continent: Option>, 16 | 17 | // Sdk info 18 | pub sv: Option, 19 | pub st: Option, 20 | } 21 | 22 | impl AccountNameRegistration { 23 | #[allow(clippy::too_many_arguments)] 24 | pub fn new( 25 | name: String, 26 | owner_address: String, 27 | chain_id: String, 28 | origin: Option, 29 | region: Option>, 30 | country: Option>, 31 | continent: Option>, 32 | sv: Option, 33 | st: Option, 34 | ) -> Self { 35 | Self { 36 | timestamp: wc::analytics::time::now(), 37 | name, 38 | owner_address, 39 | chain_id, 40 | origin, 41 | region: region.map(|r| r.join(", ")), 42 | country, 43 | continent, 44 | sv, 45 | st, 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/analytics/balance_lookup_info.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::providers::ProviderKind, parquet_derive::ParquetRecordWriter, serde::Serialize, 3 | std::sync::Arc, 4 | }; 5 | 6 | #[derive(Debug, Clone, Serialize, ParquetRecordWriter)] 7 | #[serde(rename_all = "camelCase")] 8 | pub struct BalanceLookupInfo { 9 | pub timestamp: chrono::NaiveDateTime, 10 | 11 | pub symbol: String, 12 | pub implementation_chain_id: String, 13 | pub quantity: String, 14 | pub value: f64, 15 | pub price: f64, 16 | pub currency: String, 17 | 18 | pub address: String, 19 | pub project_id: String, 20 | 21 | pub provider: String, 22 | 23 | pub origin: Option, 24 | pub region: Option, 25 | pub country: Option>, 26 | pub continent: Option>, 27 | 28 | // Sdk info 29 | pub sv: Option, 30 | pub st: Option, 31 | 32 | pub request_id: String, 33 | } 34 | 35 | impl BalanceLookupInfo { 36 | #[allow(clippy::too_many_arguments)] 37 | pub fn new( 38 | symbol: String, 39 | implementation_chain_id: String, 40 | quantity: String, 41 | value: f64, 42 | price: f64, 43 | currency: String, 44 | address: String, 45 | project_id: String, 46 | provider: &ProviderKind, 47 | origin: Option, 48 | region: Option>, 49 | country: Option>, 50 | continent: Option>, 51 | sv: Option, 52 | st: Option, 53 | request_id: String, 54 | ) -> Self { 55 | Self { 56 | timestamp: wc::analytics::time::now(), 57 | symbol, 58 | implementation_chain_id, 59 | quantity, 60 | value, 61 | price, 62 | currency, 63 | address, 64 | project_id, 65 | provider: provider.to_string(), 66 | origin, 67 | region: region.map(|r| r.join(", ")), 68 | country, 69 | continent, 70 | sv, 71 | st, 72 | request_id, 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/analytics/config.rs: -------------------------------------------------------------------------------- 1 | use {serde::Deserialize, serde_piecewise_default::DeserializePiecewiseDefault}; 2 | 3 | #[derive(DeserializePiecewiseDefault, Debug, Clone, Default, PartialEq, Eq)] 4 | pub struct Config { 5 | pub s3_endpoint: Option, 6 | pub export_bucket: Option, 7 | } 8 | -------------------------------------------------------------------------------- /src/database/config.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | 3 | const DEFAULT_MAX_CONNECTIONS: u16 = 10; 4 | 5 | #[derive(Deserialize, Debug, Clone, PartialEq, Eq)] 6 | pub struct PostgresConfig { 7 | /// The database connection uri. 8 | /// postgres://postgres@localhost:5432/postgres 9 | pub uri: String, 10 | /// Maximum connections for the sqlx pool 11 | #[serde(default = "default_max_connections")] 12 | pub max_connections: u16, 13 | } 14 | 15 | fn default_max_connections() -> u16 { 16 | DEFAULT_MAX_CONNECTIONS 17 | } 18 | -------------------------------------------------------------------------------- /src/database/error.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, thiserror::Error)] 2 | pub enum DatabaseError { 3 | #[error("sqlx error: {0}")] 4 | SqlxError(#[from] sqlx::Error), 5 | #[error("Bad argument were provided for the database helper: {0}")] 6 | BadArgument(String), 7 | #[error("Address required: {0}")] 8 | AddressRequired(String), 9 | #[error("{0:?}")] 10 | SerdeJson(#[from] serde_json::Error), 11 | } 12 | -------------------------------------------------------------------------------- /src/database/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod config; 2 | pub mod error; 3 | pub mod helpers; 4 | pub mod types; 5 | pub mod utils; 6 | -------------------------------------------------------------------------------- /src/database/types.rs: -------------------------------------------------------------------------------- 1 | use { 2 | chrono::{DateTime, Utc}, 3 | serde::{Deserialize, Serialize}, 4 | sqlx::{FromRow, Type}, 5 | std::collections::HashMap, 6 | }; 7 | 8 | /// Currently supported blockchain namespaces 9 | #[derive(Type, Serialize, Deserialize, Debug, Clone, Eq, PartialEq, Hash)] 10 | #[sqlx(type_name = "namespaces", rename_all = "lowercase")] 11 | pub enum SupportedNamespaces { 12 | /// Ethereum 13 | Eip155, 14 | } 15 | 16 | impl SupportedNamespaces { 17 | // Convert a SLIP-44 coin type to the SupportedNamespaces enum 18 | pub fn from_slip44(coin_type: u32) -> Option { 19 | match coin_type { 20 | 60 => Some(SupportedNamespaces::Eip155), 21 | _ => None, 22 | } 23 | } 24 | 25 | // Convert from the enum to the SLIP-44 coin type 26 | pub fn to_slip44(&self) -> u32 { 27 | match self { 28 | SupportedNamespaces::Eip155 => 60, 29 | } 30 | } 31 | } 32 | 33 | /// Represents the ENS name record 34 | #[derive(Debug, FromRow, Serialize, Deserialize)] 35 | pub struct Name { 36 | pub name: String, 37 | pub registered_at: DateTime, 38 | pub updated_at: DateTime, 39 | /// Postgres hstore data type, represented as key-value pairs for attributes 40 | pub attributes: Option>>, 41 | } 42 | 43 | /// Represents the ENS address record 44 | #[derive(Debug, Serialize, Deserialize)] 45 | pub struct Address { 46 | pub address: String, 47 | pub created_at: Option>, 48 | } 49 | 50 | /// Represents the ENSIP-11 compatible addresses map 51 | pub type ENSIP11AddressesMap = HashMap; 52 | 53 | /// Represents the ENS name record 54 | #[derive(FromRow, Debug, Serialize, Deserialize)] 55 | pub struct NameAndAddresses { 56 | pub name: String, 57 | pub registered_at: DateTime, 58 | pub updated_at: DateTime, 59 | /// Postgres hstore data type, represented as key-value pairs for attributes 60 | pub attributes: Option>>, 61 | pub addresses: ENSIP11AddressesMap, 62 | } 63 | -------------------------------------------------------------------------------- /src/database/utils.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | pub fn hashmap_to_hstore(hashmap: &HashMap) -> String { 4 | hashmap 5 | .iter() 6 | .map(|(key, value)| format!("\"{}\" => \"{}\"", key, value)) 7 | .collect::>() 8 | .join(", ") 9 | } 10 | -------------------------------------------------------------------------------- /src/env/allnodes.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct AllnodesConfig { 9 | pub supported_chains: HashMap, 10 | pub supported_ws_chains: HashMap, 11 | pub api_key: String, 12 | } 13 | 14 | impl AllnodesConfig { 15 | pub fn new(api_key: String) -> Self { 16 | Self { 17 | supported_chains: default_supported_chains(), 18 | supported_ws_chains: default_ws_supported_chains(), 19 | api_key, 20 | } 21 | } 22 | } 23 | 24 | impl ProviderConfig for AllnodesConfig { 25 | fn supported_chains(self) -> HashMap { 26 | self.supported_chains 27 | } 28 | 29 | fn supported_ws_chains(self) -> HashMap { 30 | self.supported_ws_chains 31 | } 32 | 33 | fn provider_kind(&self) -> crate::providers::ProviderKind { 34 | crate::providers::ProviderKind::Allnodes 35 | } 36 | } 37 | 38 | fn default_supported_chains() -> HashMap { 39 | // Keep in-sync with SUPPORTED_CHAINS.md 40 | 41 | HashMap::from([ 42 | // Ethereum Mainnet 43 | ( 44 | "eip155:1".into(), 45 | ("eth57873".into(), Weight::new(Priority::Max).unwrap()), 46 | ), 47 | ]) 48 | } 49 | 50 | fn default_ws_supported_chains() -> HashMap { 51 | // Keep in-sync with SUPPORTED_CHAINS.md 52 | 53 | HashMap::from([ 54 | // Ethereum 55 | ( 56 | "eip155:1".into(), 57 | ("eth57873".into(), Weight::new(Priority::Normal).unwrap()), 58 | ), 59 | ]) 60 | } 61 | -------------------------------------------------------------------------------- /src/env/arbitrum.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct ArbitrumConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for ArbitrumConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for ArbitrumConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::Arbitrum 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Arbitrum One 39 | ( 40 | "eip155:42161".into(), 41 | ( 42 | "https://arb1.arbitrum.io/rpc".into(), 43 | Weight::new(Priority::Normal).unwrap(), 44 | ), 45 | ), 46 | // Arbitrum Sepolia 47 | ( 48 | "eip155:421614".into(), 49 | ( 50 | "https://sepolia-rollup.arbitrum.io/rpc".into(), 51 | Weight::new(Priority::Normal).unwrap(), 52 | ), 53 | ), 54 | ]) 55 | } 56 | -------------------------------------------------------------------------------- /src/env/aurora.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct AuroraConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for AuroraConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for AuroraConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::Aurora 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Aurora Mainnet 39 | ( 40 | "eip155:1313161554".into(), 41 | ( 42 | "https://mainnet.aurora.dev".into(), 43 | Weight::new(Priority::High).unwrap(), 44 | ), 45 | ), 46 | // Aurora Testnet 47 | ( 48 | "eip155:1313161555".into(), 49 | ( 50 | "https://testnet.aurora.dev".into(), 51 | Weight::new(Priority::High).unwrap(), 52 | ), 53 | ), 54 | ]) 55 | } 56 | -------------------------------------------------------------------------------- /src/env/base.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct BaseConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for BaseConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for BaseConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::Base 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Base Mainnet 39 | ( 40 | "eip155:8453".into(), 41 | ( 42 | "https://mainnet.base.org".into(), 43 | Weight::new(Priority::Normal).unwrap(), 44 | ), 45 | ), 46 | // Base Sepolia 47 | ( 48 | "eip155:84532".into(), 49 | ( 50 | "https://sepolia.base.org".into(), 51 | Weight::new(Priority::Low).unwrap(), 52 | ), 53 | ), 54 | ]) 55 | } 56 | -------------------------------------------------------------------------------- /src/env/binance.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct BinanceConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl ProviderConfig for BinanceConfig { 13 | fn supported_chains(self) -> HashMap { 14 | self.supported_chains 15 | } 16 | 17 | fn supported_ws_chains(self) -> HashMap { 18 | HashMap::new() 19 | } 20 | 21 | fn provider_kind(&self) -> crate::providers::ProviderKind { 22 | crate::providers::ProviderKind::Binance 23 | } 24 | } 25 | 26 | impl Default for BinanceConfig { 27 | fn default() -> Self { 28 | Self { 29 | supported_chains: default_supported_chains(), 30 | } 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Binance Smart Chain Mainnet 39 | ( 40 | "eip155:56".into(), 41 | ( 42 | "https://bsc-dataseed.binance.org/".into(), 43 | Weight::new(Priority::High).unwrap(), 44 | ), 45 | ), 46 | // Binance Smart Chain Testnet 47 | ( 48 | "eip155:97".into(), 49 | ( 50 | "https://data-seed-prebsc-1-s1.binance.org:8545".into(), 51 | Weight::new(Priority::High).unwrap(), 52 | ), 53 | ), 54 | ]) 55 | } 56 | -------------------------------------------------------------------------------- /src/env/callstatic.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct CallStaticConfig { 9 | pub api_key: String, 10 | pub supported_chains: HashMap, 11 | } 12 | 13 | impl CallStaticConfig { 14 | pub fn new(api_key: String) -> Self { 15 | Self { 16 | api_key, 17 | supported_chains: default_supported_chains(), 18 | } 19 | } 20 | } 21 | 22 | impl ProviderConfig for CallStaticConfig { 23 | fn supported_chains(self) -> HashMap { 24 | self.supported_chains 25 | } 26 | 27 | fn supported_ws_chains(self) -> HashMap { 28 | HashMap::new() 29 | } 30 | 31 | fn provider_kind(&self) -> crate::providers::ProviderKind { 32 | crate::providers::ProviderKind::CallStatic 33 | } 34 | } 35 | 36 | fn default_supported_chains() -> HashMap { 37 | // Keep in-sync with SUPPORTED_CHAINS.md 38 | 39 | HashMap::from([ 40 | // BSC mainnet 41 | ( 42 | "eip155:56".into(), 43 | ("bsc".into(), Weight::new(Priority::Disabled).unwrap()), 44 | ), 45 | ]) 46 | } 47 | -------------------------------------------------------------------------------- /src/env/dune.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::BalanceProviderConfig, 3 | crate::{ 4 | providers::{Priority, Weight}, 5 | utils::crypto::CaipNamespaces, 6 | }, 7 | std::collections::HashMap, 8 | }; 9 | 10 | #[derive(Debug)] 11 | pub struct DuneConfig { 12 | pub api_key: String, 13 | pub supported_namespaces: HashMap, 14 | } 15 | 16 | impl DuneConfig { 17 | pub fn new(api_key: String) -> Self { 18 | Self { 19 | api_key, 20 | supported_namespaces: default_supported_namespaces(), 21 | } 22 | } 23 | } 24 | 25 | impl BalanceProviderConfig for DuneConfig { 26 | fn supported_namespaces(self) -> HashMap { 27 | self.supported_namespaces 28 | } 29 | 30 | fn provider_kind(&self) -> crate::providers::ProviderKind { 31 | crate::providers::ProviderKind::Dune 32 | } 33 | } 34 | 35 | fn default_supported_namespaces() -> HashMap { 36 | HashMap::from([ 37 | (CaipNamespaces::Eip155, Weight::new(Priority::High).unwrap()), 38 | (CaipNamespaces::Solana, Weight::new(Priority::High).unwrap()), 39 | ]) 40 | } 41 | -------------------------------------------------------------------------------- /src/env/hiro.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct HiroConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for HiroConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for HiroConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::Hiro 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Stacks Mainnet 39 | ( 40 | "stacks:1".into(), 41 | ( 42 | "https://api.mainnet.hiro.so/".into(), 43 | Weight::new(Priority::Normal).unwrap(), 44 | ), 45 | ), 46 | // Stacks Testnet 47 | ( 48 | "stacks:2147483648".into(), 49 | ( 50 | "https://api.testnet.hiro.so/".into(), 51 | Weight::new(Priority::Normal).unwrap(), 52 | ), 53 | ), 54 | ]) 55 | } 56 | -------------------------------------------------------------------------------- /src/env/mantle.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct MantleConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for MantleConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for MantleConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::Mantle 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Mantle mainnet 39 | ( 40 | "eip155:5000".into(), 41 | ( 42 | "https://rpc.mantle.xyz".into(), 43 | Weight::new(Priority::High).unwrap(), 44 | ), 45 | ), 46 | // Mantle Sepolia testnet 47 | ( 48 | "eip155:5003".into(), 49 | ( 50 | "https://rpc.sepolia.mantle.xyz".into(), 51 | Weight::new(Priority::High).unwrap(), 52 | ), 53 | ), 54 | ]) 55 | } 56 | -------------------------------------------------------------------------------- /src/env/monad.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct MonadConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for MonadConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for MonadConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::Monad 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Monad testnet 39 | ( 40 | "eip155:10143".into(), 41 | ( 42 | "https://testnet-rpc.monad.xyz/".into(), 43 | Weight::new(Priority::Normal).unwrap(), 44 | ), 45 | ), 46 | ]) 47 | } 48 | -------------------------------------------------------------------------------- /src/env/moonbeam.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct MoonbeamConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for MoonbeamConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for MoonbeamConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::Moonbeam 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Moonbeam Mainnet 39 | ( 40 | "eip155:1284".into(), 41 | ( 42 | "https://rpc.api.moonbeam.network".into(), 43 | Weight::new(Priority::Normal).unwrap(), 44 | ), 45 | ), 46 | ]) 47 | } 48 | -------------------------------------------------------------------------------- /src/env/morph.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct MorphConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for MorphConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for MorphConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::Morph 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Morph Mainnet 39 | ( 40 | "eip155:2818".into(), 41 | ( 42 | "rpc-quicknode".into(), 43 | Weight::new(Priority::Normal).unwrap(), 44 | ), 45 | ), 46 | // Morph Holesky 47 | ( 48 | "eip155:2810".into(), 49 | ( 50 | "rpc-quicknode-holesky".into(), 51 | Weight::new(Priority::Normal).unwrap(), 52 | ), 53 | ), 54 | ]) 55 | } 56 | -------------------------------------------------------------------------------- /src/env/near.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct NearConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for NearConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for NearConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::Near 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Near protocol 39 | ( 40 | "near:mainnet".into(), 41 | ( 42 | "https://rpc.mainnet.near.org".into(), 43 | Weight::new(Priority::High).unwrap(), 44 | ), 45 | ), 46 | ]) 47 | } 48 | -------------------------------------------------------------------------------- /src/env/odyssey.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct OdysseyConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for OdysseyConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for OdysseyConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::Odyssey 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Solana Mainnet 39 | ( 40 | "eip155:911867".into(), 41 | ( 42 | "https://odyssey.ithaca.xyz".into(), 43 | Weight::new(Priority::Normal).unwrap(), 44 | ), 45 | ), 46 | ]) 47 | } 48 | -------------------------------------------------------------------------------- /src/env/onerpc.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct OneRpcConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for OneRpcConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for OneRpcConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::OneRpc 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Ethereum mainnet 39 | ( 40 | "eip155:1".into(), 41 | ("eth".into(), Weight::new(Priority::Minimal).unwrap()), 42 | ), 43 | // Arbitrum One 44 | ( 45 | "eip155:42161".into(), 46 | ("arb".into(), Weight::new(Priority::Low).unwrap()), 47 | ), 48 | // BSC 49 | ( 50 | "eip155:56".into(), 51 | ("bnb".into(), Weight::new(Priority::Low).unwrap()), 52 | ), 53 | // Polygon 54 | ( 55 | "eip155:137".into(), 56 | ("matic".into(), Weight::new(Priority::Low).unwrap()), 57 | ), 58 | // Base 59 | ( 60 | "eip155:8453".into(), 61 | ("base".into(), Weight::new(Priority::Low).unwrap()), 62 | ), 63 | // Klaytn 64 | ( 65 | "eip155:8217".into(), 66 | ("klay".into(), Weight::new(Priority::Low).unwrap()), 67 | ), 68 | ]) 69 | } 70 | -------------------------------------------------------------------------------- /src/env/server.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::utils::{self, network::NetworkInterfaceError}, 3 | serde::Deserialize, 4 | serde_piecewise_default::DeserializePiecewiseDefault, 5 | std::net::IpAddr, 6 | }; 7 | 8 | #[derive(DeserializePiecewiseDefault, Debug, Clone, PartialEq, Eq)] 9 | pub struct ServerConfig { 10 | pub host: String, 11 | pub port: u16, 12 | pub prometheus_port: u16, 13 | pub log_level: String, 14 | pub external_ip: Option, 15 | pub s3_endpoint: Option, 16 | pub blocked_countries: Vec, 17 | pub geoip_db_bucket: Option, 18 | pub geoip_db_key: Option, 19 | pub testing_project_id: Option, 20 | pub validate_project_id: bool, 21 | } 22 | 23 | impl Default for ServerConfig { 24 | fn default() -> Self { 25 | ServerConfig { 26 | host: "127.0.0.1".to_string(), 27 | port: 3000, 28 | prometheus_port: 4000, 29 | log_level: "INFO".to_string(), 30 | external_ip: None, 31 | s3_endpoint: None, 32 | blocked_countries: Vec::new(), 33 | geoip_db_bucket: None, 34 | geoip_db_key: None, 35 | testing_project_id: None, 36 | validate_project_id: true, 37 | } 38 | } 39 | } 40 | 41 | impl ServerConfig { 42 | pub fn external_ip(&self) -> Result { 43 | self.external_ip 44 | .map(Ok) 45 | .unwrap_or_else(utils::network::find_public_ip_addr) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/env/solscan.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::BalanceProviderConfig, 3 | crate::{ 4 | providers::{Priority, Weight}, 5 | utils::crypto::CaipNamespaces, 6 | }, 7 | std::collections::HashMap, 8 | }; 9 | 10 | pub struct SolScanConfig { 11 | pub api_key: String, 12 | pub supported_namespaces: HashMap, 13 | } 14 | 15 | impl SolScanConfig { 16 | pub fn new(api_key: String) -> Self { 17 | Self { 18 | api_key, 19 | supported_namespaces: default_supported_namespaces(), 20 | } 21 | } 22 | } 23 | 24 | impl BalanceProviderConfig for SolScanConfig { 25 | fn supported_namespaces(self) -> HashMap { 26 | self.supported_namespaces 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::SolScan 31 | } 32 | } 33 | 34 | fn default_supported_namespaces() -> HashMap { 35 | HashMap::from([(CaipNamespaces::Solana, Weight::new(Priority::Low).unwrap())]) 36 | } 37 | -------------------------------------------------------------------------------- /src/env/sui.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct SuiConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for SuiConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for SuiConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::Sui 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Sui mainnet 39 | ( 40 | "sui:mainnet".into(), 41 | ( 42 | "https://fullnode.mainnet.sui.io".into(), 43 | Weight::new(Priority::Normal).unwrap(), 44 | ), 45 | ), 46 | // Sui testnet 47 | ( 48 | "sui:testnet".into(), 49 | ( 50 | "https://fullnode.testnet.sui.io".into(), 51 | Weight::new(Priority::Normal).unwrap(), 52 | ), 53 | ), 54 | // Sui devnet 55 | ( 56 | "sui:devnet".into(), 57 | ( 58 | "https://fullnode.devnet.sui.io".into(), 59 | Weight::new(Priority::Normal).unwrap(), 60 | ), 61 | ), 62 | ]) 63 | } 64 | -------------------------------------------------------------------------------- /src/env/unichain.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct UnichainConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for UnichainConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for UnichainConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::Unichain 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Unichain Sepolia 39 | ( 40 | "eip155:1301".into(), 41 | ( 42 | "https://sepolia.unichain.org".into(), 43 | Weight::new(Priority::Normal).unwrap(), 44 | ), 45 | ), 46 | ]) 47 | } 48 | -------------------------------------------------------------------------------- /src/env/wemix.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct WemixConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for WemixConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for WemixConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::Wemix 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // Wemix Mainnet 39 | ( 40 | "eip155:1111".into(), 41 | ( 42 | "https://api.wemix.com/".into(), 43 | Weight::new(Priority::Normal).unwrap(), 44 | ), 45 | ), 46 | // Wemix Testnet 47 | ( 48 | "eip155:1112".into(), 49 | ( 50 | "https://api.test.wemix.com".into(), 51 | Weight::new(Priority::Normal).unwrap(), 52 | ), 53 | ), 54 | ]) 55 | } 56 | -------------------------------------------------------------------------------- /src/env/zerion.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::BalanceProviderConfig, 3 | crate::{ 4 | providers::{Priority, Weight}, 5 | utils::crypto::CaipNamespaces, 6 | }, 7 | std::collections::HashMap, 8 | }; 9 | 10 | #[derive(Debug)] 11 | pub struct ZerionConfig { 12 | pub api_key: String, 13 | pub supported_namespaces: HashMap, 14 | } 15 | 16 | impl ZerionConfig { 17 | pub fn new(api_key: String) -> Self { 18 | Self { 19 | api_key, 20 | supported_namespaces: default_supported_namespaces(), 21 | } 22 | } 23 | } 24 | 25 | impl BalanceProviderConfig for ZerionConfig { 26 | fn supported_namespaces(self) -> HashMap { 27 | self.supported_namespaces 28 | } 29 | 30 | fn provider_kind(&self) -> crate::providers::ProviderKind { 31 | crate::providers::ProviderKind::Zerion 32 | } 33 | } 34 | 35 | fn default_supported_namespaces() -> HashMap { 36 | HashMap::from([( 37 | CaipNamespaces::Eip155, 38 | Weight::new(Priority::Minimal).unwrap(), 39 | )]) 40 | } 41 | -------------------------------------------------------------------------------- /src/env/zksync.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct ZKSyncConfig { 9 | pub supported_chains: HashMap, 10 | } 11 | 12 | impl Default for ZKSyncConfig { 13 | fn default() -> Self { 14 | Self { 15 | supported_chains: default_supported_chains(), 16 | } 17 | } 18 | } 19 | 20 | impl ProviderConfig for ZKSyncConfig { 21 | fn supported_chains(self) -> HashMap { 22 | self.supported_chains 23 | } 24 | 25 | fn supported_ws_chains(self) -> HashMap { 26 | HashMap::new() 27 | } 28 | 29 | fn provider_kind(&self) -> crate::providers::ProviderKind { 30 | crate::providers::ProviderKind::ZKSync 31 | } 32 | } 33 | 34 | fn default_supported_chains() -> HashMap { 35 | // Keep in-sync with SUPPORTED_CHAINS.md 36 | 37 | HashMap::from([ 38 | // zkSync Sepolia Testnet 39 | ( 40 | "eip155:300".into(), 41 | ( 42 | "https://sepolia.era.zksync.dev".into(), 43 | Weight::new(Priority::Normal).unwrap(), 44 | ), 45 | ), 46 | // zkSync Mainnet 47 | ( 48 | "eip155:324".into(), 49 | ( 50 | "https://mainnet.era.zksync.io".into(), 51 | Weight::new(Priority::Normal).unwrap(), 52 | ), 53 | ), 54 | ]) 55 | } 56 | -------------------------------------------------------------------------------- /src/env/zora.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::ProviderConfig, 3 | crate::providers::{Priority, Weight}, 4 | std::collections::HashMap, 5 | }; 6 | 7 | #[derive(Debug)] 8 | pub struct ZoraConfig { 9 | pub supported_chains: HashMap, 10 | pub supported_ws_chains: HashMap, 11 | } 12 | 13 | impl Default for ZoraConfig { 14 | fn default() -> Self { 15 | Self { 16 | supported_chains: default_supported_chains(), 17 | supported_ws_chains: default_ws_supported_chains(), 18 | } 19 | } 20 | } 21 | 22 | impl ProviderConfig for ZoraConfig { 23 | fn supported_chains(self) -> HashMap { 24 | self.supported_chains 25 | } 26 | 27 | fn supported_ws_chains(self) -> HashMap { 28 | self.supported_ws_chains 29 | } 30 | 31 | fn provider_kind(&self) -> crate::providers::ProviderKind { 32 | crate::providers::ProviderKind::Zora 33 | } 34 | } 35 | 36 | fn default_supported_chains() -> HashMap { 37 | // Keep in-sync with SUPPORTED_CHAINS.md 38 | 39 | HashMap::from([ 40 | // Zora Mainnet 41 | ( 42 | "eip155:7777777".into(), 43 | ( 44 | "https://rpc.zora.energy".into(), 45 | Weight::new(Priority::Normal).unwrap(), 46 | ), 47 | ), 48 | // Zora Sepolia 49 | ( 50 | "eip155:999999999".into(), 51 | ( 52 | "https://sepolia.rpc.zora.energy".into(), 53 | Weight::new(Priority::Normal).unwrap(), 54 | ), 55 | ), 56 | ]) 57 | } 58 | 59 | fn default_ws_supported_chains() -> HashMap { 60 | // Keep in-sync with SUPPORTED_CHAINS.md 61 | 62 | HashMap::from([ 63 | // Zora Mainnet 64 | ( 65 | "eip155:7777777".into(), 66 | ( 67 | "wss://rpc.zora.energy".into(), 68 | Weight::new(Priority::Normal).unwrap(), 69 | ), 70 | ), 71 | ]) 72 | } 73 | -------------------------------------------------------------------------------- /src/handlers/chain_agnostic/lifi.rs: -------------------------------------------------------------------------------- 1 | use crate::error::RpcError; 2 | use yttrium::chain_abstraction::solana; 3 | 4 | pub fn caip2_to_lifi_chain_id(caip2: &str) -> Result<&str, RpcError> { 5 | match caip2 { 6 | solana::SOLANA_MAINNET_CAIP2 => Ok("SOL"), 7 | id if id.starts_with("eip155:") => Ok(id.trim_start_matches("eip155:")), 8 | _ => Err(RpcError::InvalidValue(caip2.to_string())), 9 | } 10 | } 11 | 12 | #[cfg(test)] 13 | mod tests { 14 | use super::*; 15 | 16 | #[test] 17 | fn test_caip2_to_lifi_chain_id() { 18 | assert!(matches!(caip2_to_lifi_chain_id("eip155:1"), Ok("1"))); 19 | assert!(matches!( 20 | caip2_to_lifi_chain_id("solana:5eykt4UsFv8P8NJdTREpY1vzqKqZKvdp"), 21 | Ok("SOL") 22 | )); 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/handlers/convert/allowance.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::super::HANDLER_TASK_METRICS, 3 | crate::{error::RpcError, state::AppState}, 4 | axum::{ 5 | extract::{Query, State}, 6 | response::{IntoResponse, Response}, 7 | Json, 8 | }, 9 | serde::{Deserialize, Serialize}, 10 | std::sync::Arc, 11 | tap::TapFallible, 12 | tracing::log::error, 13 | wc::future::FutureExt, 14 | }; 15 | 16 | #[derive(Debug, Deserialize, Clone)] 17 | #[serde(rename_all = "camelCase")] 18 | pub struct AllowanceQueryParams { 19 | pub project_id: String, 20 | pub token_address: String, 21 | pub user_address: String, 22 | } 23 | 24 | #[derive(Debug, Deserialize, Serialize, Clone)] 25 | #[serde(rename_all = "camelCase")] 26 | pub struct AllowanceResponseBody { 27 | pub allowance: String, 28 | } 29 | 30 | pub async fn handler( 31 | state: State>, 32 | query: Query, 33 | ) -> Result { 34 | handler_internal(state, query) 35 | .with_metrics(HANDLER_TASK_METRICS.with_name("conversion_allowance")) 36 | .await 37 | } 38 | 39 | #[tracing::instrument(skip_all, level = "debug")] 40 | async fn handler_internal( 41 | state: State>, 42 | query: Query, 43 | ) -> Result { 44 | state 45 | .validate_project_access_and_quota(&query.project_id) 46 | .await?; 47 | 48 | let response = state 49 | .providers 50 | .conversion_provider 51 | .get_allowance(query.0, state.metrics.clone()) 52 | .await 53 | .tap_err(|e| { 54 | error!("Failed to call get allownce with {}", e); 55 | })?; 56 | 57 | Ok(Json(response).into_response()) 58 | } 59 | -------------------------------------------------------------------------------- /src/handlers/convert/approve.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::super::HANDLER_TASK_METRICS, 3 | crate::{error::RpcError, state::AppState}, 4 | axum::{ 5 | extract::{Query, State}, 6 | response::{IntoResponse, Response}, 7 | Json, 8 | }, 9 | serde::{Deserialize, Serialize}, 10 | std::sync::Arc, 11 | tap::TapFallible, 12 | tracing::log::error, 13 | wc::future::FutureExt, 14 | }; 15 | 16 | #[derive(Debug, Deserialize, Clone)] 17 | #[serde(rename_all = "camelCase")] 18 | pub struct ConvertApproveQueryParams { 19 | pub project_id: String, 20 | pub from: String, 21 | pub to: String, 22 | pub amount: Option, 23 | } 24 | 25 | #[derive(Debug, Deserialize, Serialize, Clone)] 26 | pub struct ConvertApproveResponseBody { 27 | pub tx: ConvertApproveTx, 28 | } 29 | 30 | #[derive(Debug, Deserialize, Serialize, Clone)] 31 | pub struct ConvertApproveTx { 32 | pub from: String, 33 | pub to: String, 34 | pub data: String, 35 | pub value: String, 36 | pub eip155: Option, 37 | } 38 | 39 | #[derive(Debug, Deserialize, Serialize, Clone)] 40 | #[serde(rename_all = "camelCase")] 41 | pub struct ConvertApproveTxEip155 { 42 | pub gas_price: String, 43 | } 44 | 45 | pub async fn handler( 46 | state: State>, 47 | query: Query, 48 | ) -> Result { 49 | handler_internal(state, query) 50 | .with_metrics(HANDLER_TASK_METRICS.with_name("convert_approve_tx")) 51 | .await 52 | } 53 | 54 | #[tracing::instrument(skip_all, level = "debug")] 55 | async fn handler_internal( 56 | state: State>, 57 | query: Query, 58 | ) -> Result { 59 | state 60 | .validate_project_access_and_quota(&query.project_id) 61 | .await?; 62 | 63 | let response = state 64 | .providers 65 | .conversion_provider 66 | .build_approve_tx(query.0, state.metrics.clone()) 67 | .await 68 | .tap_err(|e| { 69 | error!("Failed to call build approve tx for conversion with {}", e); 70 | })?; 71 | 72 | Ok(Json(response).into_response()) 73 | } 74 | -------------------------------------------------------------------------------- /src/handlers/convert/gas_price.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::super::HANDLER_TASK_METRICS, 3 | crate::{error::RpcError, state::AppState}, 4 | axum::{ 5 | extract::{Query, State}, 6 | response::{IntoResponse, Response}, 7 | Json, 8 | }, 9 | serde::{Deserialize, Serialize}, 10 | std::sync::Arc, 11 | tap::TapFallible, 12 | tracing::log::error, 13 | wc::future::FutureExt, 14 | }; 15 | 16 | #[derive(Debug, Deserialize, Clone)] 17 | #[serde(rename_all = "camelCase")] 18 | pub struct GasPriceQueryParams { 19 | pub project_id: String, 20 | pub chain_id: String, 21 | } 22 | 23 | #[derive(Debug, Deserialize, Serialize, Clone)] 24 | #[serde(rename_all = "camelCase")] 25 | pub struct GasPriceQueryResponseBody { 26 | pub standard: String, 27 | pub fast: String, 28 | pub instant: String, 29 | } 30 | 31 | pub async fn handler( 32 | state: State>, 33 | query: Query, 34 | ) -> Result { 35 | handler_internal(state, query) 36 | .with_metrics(HANDLER_TASK_METRICS.with_name("gas_price")) 37 | .await 38 | } 39 | 40 | #[tracing::instrument(skip_all, level = "debug")] 41 | async fn handler_internal( 42 | state: State>, 43 | query: Query, 44 | ) -> Result { 45 | state 46 | .validate_project_access_and_quota(&query.project_id) 47 | .await?; 48 | 49 | let response = state 50 | .providers 51 | .conversion_provider 52 | .get_gas_price(query.0, state.metrics.clone()) 53 | .await 54 | .tap_err(|e| { 55 | error!("Failed to call get gas price with {}", e); 56 | })?; 57 | 58 | Ok(Json(response).into_response()) 59 | } 60 | -------------------------------------------------------------------------------- /src/handlers/convert/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod allowance; 2 | pub mod approve; 3 | pub mod gas_price; 4 | pub mod quotes; 5 | pub mod tokens; 6 | pub mod transaction; 7 | -------------------------------------------------------------------------------- /src/handlers/convert/quotes.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::super::HANDLER_TASK_METRICS, 3 | crate::{error::RpcError, state::AppState}, 4 | axum::{ 5 | extract::{Query, State}, 6 | response::{IntoResponse, Response}, 7 | Json, 8 | }, 9 | serde::{Deserialize, Serialize}, 10 | std::sync::Arc, 11 | tap::TapFallible, 12 | tracing::log::error, 13 | wc::future::FutureExt, 14 | }; 15 | 16 | #[derive(Debug, Deserialize, Clone)] 17 | #[serde(rename_all = "camelCase")] 18 | pub struct ConvertQuoteQueryParams { 19 | pub project_id: String, 20 | pub amount: String, 21 | pub from: String, 22 | pub to: String, 23 | pub gas_price: Option, 24 | } 25 | 26 | #[derive(Debug, Deserialize, Serialize, Clone)] 27 | #[serde(rename_all = "camelCase")] 28 | pub struct ConvertQuoteResponseBody { 29 | pub quotes: Vec, 30 | } 31 | 32 | #[derive(Debug, Deserialize, Serialize, Clone)] 33 | #[serde(rename_all = "camelCase")] 34 | pub struct QuoteItem { 35 | pub id: Option, 36 | pub from_amount: String, 37 | pub from_account: String, 38 | pub to_amount: String, 39 | pub to_account: String, 40 | } 41 | 42 | pub async fn handler( 43 | state: State>, 44 | query: Query, 45 | ) -> Result { 46 | handler_internal(state, query) 47 | .with_metrics(HANDLER_TASK_METRICS.with_name("convert_quote")) 48 | .await 49 | } 50 | 51 | #[tracing::instrument(skip_all, level = "debug")] 52 | async fn handler_internal( 53 | state: State>, 54 | query: Query, 55 | ) -> Result { 56 | state 57 | .validate_project_access_and_quota(&query.project_id) 58 | .await?; 59 | 60 | let response = state 61 | .providers 62 | .conversion_provider 63 | .get_convert_quote(query.0, state.metrics.clone()) 64 | .await 65 | .tap_err(|e| { 66 | error!("Failed to call get conversion quotes with {}", e); 67 | })?; 68 | 69 | Ok(Json(response).into_response()) 70 | } 71 | -------------------------------------------------------------------------------- /src/handlers/convert/tokens.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::super::HANDLER_TASK_METRICS, 3 | crate::{error::RpcError, state::AppState}, 4 | axum::{ 5 | extract::{Query, State}, 6 | response::{IntoResponse, Response}, 7 | Json, 8 | }, 9 | serde::{Deserialize, Serialize}, 10 | std::sync::Arc, 11 | tap::TapFallible, 12 | tracing::log::error, 13 | wc::future::FutureExt, 14 | }; 15 | 16 | #[derive(Debug, Deserialize, Clone)] 17 | #[serde(rename_all = "camelCase")] 18 | pub struct TokensListQueryParams { 19 | pub project_id: String, 20 | pub chain_id: String, 21 | /// Filter tokens by the implementation address 22 | pub address: Option, 23 | } 24 | 25 | #[derive(Debug, Deserialize, Serialize, Clone)] 26 | #[serde(rename_all = "camelCase")] 27 | pub struct TokensListResponseBody { 28 | pub tokens: Vec, 29 | } 30 | 31 | #[derive(Debug, Deserialize, Serialize, Clone)] 32 | #[serde(rename_all = "camelCase")] 33 | pub struct TokenItem { 34 | pub name: String, 35 | pub symbol: String, 36 | pub address: String, 37 | pub decimals: u8, 38 | pub logo_uri: Option, 39 | pub eip2612: Option, 40 | } 41 | 42 | pub async fn handler( 43 | state: State>, 44 | query: Query, 45 | ) -> Result { 46 | handler_internal(state, query) 47 | .with_metrics(HANDLER_TASK_METRICS.with_name("tokens_list")) 48 | .await 49 | } 50 | 51 | #[tracing::instrument(skip_all, level = "debug")] 52 | async fn handler_internal( 53 | state: State>, 54 | query: Query, 55 | ) -> Result { 56 | state 57 | .validate_project_access_and_quota(&query.project_id) 58 | .await?; 59 | 60 | let response = state 61 | .providers 62 | .conversion_provider 63 | .get_tokens_list(query.0, state.metrics.clone()) 64 | .await 65 | .tap_err(|e| { 66 | error!("Failed to call get tokens list for conversion with {}", e); 67 | })?; 68 | 69 | Ok(Json(response).into_response()) 70 | } 71 | -------------------------------------------------------------------------------- /src/handlers/generators/mod.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | 3 | pub mod onrampurl; 4 | 5 | #[derive(Debug, Deserialize, Clone)] 6 | #[serde(rename_all = "camelCase")] 7 | pub struct GeneratorQueryParams { 8 | pub project_id: String, 9 | } 10 | -------------------------------------------------------------------------------- /src/handlers/health.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::state::AppState, 3 | axum::{extract::State, response::IntoResponse}, 4 | hyper::StatusCode, 5 | std::sync::Arc, 6 | }; 7 | 8 | pub async fn handler(State(state): State>) -> impl IntoResponse { 9 | ( 10 | StatusCode::OK, 11 | format!( 12 | "OK v{}, commit hash: {}, features: {}, uptime: {:?} seconds", 13 | state.compile_info.build().version(), 14 | state.compile_info.git().short_hash(), 15 | state.compile_info.build().features(), 16 | state.uptime.elapsed().as_secs() 17 | ), 18 | ) 19 | } 20 | -------------------------------------------------------------------------------- /src/handlers/metrics.rs: -------------------------------------------------------------------------------- 1 | use { 2 | axum::response::IntoResponse, hyper::StatusCode, tracing::error, wc::metrics::ServiceMetrics, 3 | }; 4 | 5 | pub async fn handler() -> impl IntoResponse { 6 | let result = ServiceMetrics::export(); 7 | 8 | match result { 9 | Ok(content) => (StatusCode::OK, content), 10 | Err(e) => { 11 | error!(?e, "Failed to parse metrics"); 12 | 13 | ( 14 | StatusCode::INTERNAL_SERVER_ERROR, 15 | "Failed to get metrics".to_string(), 16 | ) 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/handlers/onramp/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod multi_quotes; 2 | pub mod options; 3 | pub mod properties; 4 | pub mod providers; 5 | pub mod quotes; 6 | pub mod widget; 7 | -------------------------------------------------------------------------------- /src/handlers/onramp/properties.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{error::RpcError, handlers::HANDLER_TASK_METRICS, state::AppState}, 3 | axum::{ 4 | extract::{Query, State}, 5 | response::{IntoResponse, Response}, 6 | Json, 7 | }, 8 | serde::{Deserialize, Serialize}, 9 | std::sync::Arc, 10 | tap::TapFallible, 11 | tracing::log::error, 12 | wc::future::FutureExt, 13 | }; 14 | 15 | #[derive(Debug, Serialize, Deserialize, Clone)] 16 | #[serde(rename_all = "camelCase")] 17 | pub struct QueryParams { 18 | pub r#type: PropertyType, 19 | pub project_id: String, 20 | pub countries: Option, 21 | } 22 | 23 | #[derive(Debug, Serialize, Deserialize, Clone)] 24 | #[serde(rename_all = "kebab-case")] 25 | pub enum PropertyType { 26 | Countries, 27 | CryptoCurrencies, 28 | FiatCurrencies, 29 | PaymentMethods, 30 | FiatPurchasesLimits, 31 | } 32 | 33 | pub async fn handler( 34 | state: State>, 35 | query: Query, 36 | ) -> Result { 37 | handler_internal(state, query) 38 | .with_metrics(HANDLER_TASK_METRICS.with_name("onramp_providers_properties")) 39 | .await 40 | } 41 | 42 | #[tracing::instrument(skip_all, level = "debug")] 43 | async fn handler_internal( 44 | state: State>, 45 | query: Query, 46 | ) -> Result { 47 | state 48 | .validate_project_access_and_quota(&query.project_id) 49 | .await?; 50 | 51 | let providers_properties = state 52 | .providers 53 | .onramp_multi_provider 54 | .get_providers_properties(query.0, state.metrics.clone()) 55 | .await 56 | .tap_err(|e| { 57 | error!("Failed to call onramp providers properties with {}", e); 58 | })?; 59 | 60 | Ok(Json(providers_properties).into_response()) 61 | } 62 | -------------------------------------------------------------------------------- /src/handlers/onramp/providers.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{error::RpcError, handlers::HANDLER_TASK_METRICS, state::AppState}, 3 | axum::{ 4 | extract::{Query, State}, 5 | response::{IntoResponse, Response}, 6 | Json, 7 | }, 8 | serde::{Deserialize, Serialize}, 9 | std::sync::Arc, 10 | tap::TapFallible, 11 | tracing::log::error, 12 | wc::future::FutureExt, 13 | }; 14 | 15 | #[derive(Debug, Serialize, Deserialize, Clone)] 16 | #[serde(rename_all = "camelCase")] 17 | pub struct QueryParams { 18 | pub countries: Option, 19 | pub project_id: String, 20 | } 21 | 22 | #[derive(Debug, Serialize, Deserialize, Clone)] 23 | #[serde(rename_all = "camelCase")] 24 | pub struct ProvidersResponse { 25 | pub categories: Vec, 26 | pub logos: Logos, 27 | pub name: String, 28 | pub service_provider: String, 29 | pub status: String, 30 | pub website_url: String, 31 | } 32 | 33 | #[derive(Debug, Serialize, Deserialize, Clone)] 34 | #[serde(rename_all = "camelCase")] 35 | pub struct Logos { 36 | pub dark: String, 37 | pub dark_short: String, 38 | pub light: String, 39 | pub light_short: String, 40 | } 41 | 42 | pub async fn handler( 43 | state: State>, 44 | query: Query, 45 | ) -> Result { 46 | handler_internal(state, query) 47 | .with_metrics(HANDLER_TASK_METRICS.with_name("onramp_providers")) 48 | .await 49 | } 50 | 51 | #[tracing::instrument(skip_all, level = "debug")] 52 | async fn handler_internal( 53 | state: State>, 54 | query: Query, 55 | ) -> Result { 56 | state 57 | .validate_project_access_and_quota(&query.project_id) 58 | .await?; 59 | 60 | let providers_response = state 61 | .providers 62 | .onramp_multi_provider 63 | .get_providers(query.0, state.metrics.clone()) 64 | .await 65 | .tap_err(|e| { 66 | error!("Failed to call onramp providers with {}", e); 67 | })?; 68 | 69 | Ok(Json(providers_response).into_response()) 70 | } 71 | -------------------------------------------------------------------------------- /src/handlers/supported_chains.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::HANDLER_TASK_METRICS, 3 | crate::{error::RpcError, state::AppState}, 4 | axum::{ 5 | extract::State, 6 | response::{IntoResponse, Response}, 7 | Json, 8 | }, 9 | hyper::header::CACHE_CONTROL, 10 | std::sync::Arc, 11 | wc::future::FutureExt, 12 | }; 13 | 14 | pub async fn handler(state: State>) -> Result { 15 | handler_internal(state) 16 | .with_metrics(HANDLER_TASK_METRICS.with_name("supported_chains")) 17 | .await 18 | } 19 | 20 | #[tracing::instrument(skip_all, level = "debug")] 21 | async fn handler_internal(State(state): State>) -> Result { 22 | // Set cache control headers to 24 hours 23 | let ttl_secs = 24 * 60 * 60; 24 | 25 | Ok(( 26 | [( 27 | CACHE_CONTROL, 28 | format!("public, max-age={ttl_secs}, s-maxage={ttl_secs}"), 29 | )], 30 | Json(state.providers.rpc_supported_chains.clone()), 31 | ) 32 | .into_response()) 33 | } 34 | -------------------------------------------------------------------------------- /src/handlers/wallet/call_id.rs: -------------------------------------------------------------------------------- 1 | use alloy::primitives::{Bytes, U64}; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | #[derive(Debug, Clone, PartialEq, Eq)] 5 | pub struct CallId(pub CallIdInner); 6 | 7 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 8 | pub struct CallIdInner { 9 | pub chain_id: U64, 10 | pub user_op_hash: Bytes, 11 | } 12 | 13 | impl Serialize for CallId { 14 | fn serialize(&self, serializer: S) -> Result { 15 | Bytes::from(serde_json::to_vec(&self.0).map_err(serde::ser::Error::custom)?) 16 | .serialize(serializer) 17 | } 18 | } 19 | 20 | impl<'de> Deserialize<'de> for CallId { 21 | fn deserialize>(deserializer: D) -> Result { 22 | let bytes = Bytes::deserialize(deserializer)?; 23 | let inner = serde_json::from_slice(&bytes).map_err(serde::de::Error::custom)?; 24 | Ok(Self(inner)) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/handlers/wallet/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod call_id; 2 | pub mod exchanges; 3 | pub mod get_assets; 4 | pub mod get_calls_status; 5 | pub mod get_exchange_buy_status; 6 | pub mod get_exchange_url; 7 | pub mod get_exchanges; 8 | pub mod handler; 9 | pub mod prepare_calls; 10 | pub mod send_prepared_calls; 11 | mod types; 12 | -------------------------------------------------------------------------------- /src/handlers/wallet/types.rs: -------------------------------------------------------------------------------- 1 | use alloy::primitives::U64; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 5 | pub enum SignatureRequestType { 6 | #[serde(rename = "user-operation-v07")] 7 | UserOpV7, 8 | } 9 | 10 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 11 | #[serde(rename_all = "camelCase")] 12 | pub struct PreparedCalls { 13 | pub r#type: SignatureRequestType, 14 | pub data: yttrium::user_operation::UserOperationV07, 15 | pub chain_id: U64, 16 | } 17 | -------------------------------------------------------------------------------- /src/handlers/ws_proxy.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::{RpcQueryParams, HANDLER_TASK_METRICS}, 3 | crate::{error::RpcError, state::AppState}, 4 | axum::{ 5 | extract::{Query, State}, 6 | response::Response, 7 | }, 8 | axum_tungstenite::WebSocketUpgrade, 9 | std::sync::Arc, 10 | wc::future::FutureExt, 11 | }; 12 | 13 | pub async fn handler( 14 | state: State>, 15 | query_params: Query, 16 | ws: WebSocketUpgrade, 17 | ) -> Result { 18 | handler_internal(state, query_params, ws) 19 | .with_metrics(HANDLER_TASK_METRICS.with_name("ws_proxy")) 20 | .await 21 | } 22 | 23 | #[tracing::instrument(skip_all, level = "debug")] 24 | async fn handler_internal( 25 | State(state): State>, 26 | Query(query_params): Query, 27 | ws: WebSocketUpgrade, 28 | ) -> Result { 29 | state 30 | .validate_project_access_and_quota(&query_params.project_id) 31 | .await?; 32 | 33 | let chain_id = query_params.chain_id.clone(); 34 | let provider = state 35 | .providers 36 | .get_ws_provider_for_chain_id(&chain_id) 37 | .ok_or(RpcError::UnsupportedChain(chain_id.clone()))?; 38 | 39 | state.metrics.add_websocket_connection(chain_id); 40 | 41 | provider.proxy(ws, query_params).await 42 | } 43 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use { 2 | dotenv::dotenv, 3 | rpc_proxy::{env::Config, error}, 4 | tracing::level_filters::LevelFilter, 5 | tracing_subscriber::{fmt::format::FmtSpan, EnvFilter}, 6 | }; 7 | 8 | #[global_allocator] 9 | static ALLOC: wc::alloc::Jemalloc = wc::alloc::Jemalloc; 10 | 11 | #[tokio::main] 12 | async fn main() -> error::RpcResult<()> { 13 | dotenv().ok(); 14 | 15 | let config = Config::from_env() 16 | .map_err(|e| dbg!(e)) 17 | .expect("Failed to load config, please ensure all env variables are defined."); 18 | 19 | tracing_subscriber::fmt() 20 | .with_env_filter( 21 | EnvFilter::builder() 22 | .with_default_directive(LevelFilter::ERROR.into()) 23 | .parse(&config.server.log_level) 24 | .expect("Invalid log level"), 25 | ) 26 | .with_span_events(FmtSpan::CLOSE) 27 | .with_ansi(false) 28 | .init(); 29 | 30 | rpc_proxy::bootstrap(config).await 31 | } 32 | -------------------------------------------------------------------------------- /src/names/mod.rs: -------------------------------------------------------------------------------- 1 | use {once_cell::sync::Lazy, regex::Regex, serde::Deserialize, std::collections::HashMap}; 2 | 3 | pub mod suggestions; 4 | pub mod utils; 5 | 6 | /// Attributes value max length 7 | pub const ATTRIBUTES_VALUE_MAX_LENGTH: usize = 255; 8 | 9 | /// List of supported attributes with the regex check pattern 10 | pub static SUPPORTED_ATTRIBUTES: Lazy> = Lazy::new(|| { 11 | let mut map: HashMap = HashMap::new(); 12 | map.insert( 13 | "bio".into(), 14 | Regex::new(r"^[a-zA-Z0-9@:/._\-?&=+ ]+$").expect("Invalid regex for bio"), 15 | ); 16 | map 17 | }); 18 | 19 | #[derive(Debug, Clone, Deserialize, Eq, PartialEq)] 20 | pub struct Config { 21 | pub allowed_zones: Option>, 22 | } 23 | -------------------------------------------------------------------------------- /src/names/suggestions.rs: -------------------------------------------------------------------------------- 1 | /// Returns suggested words from the dictionary that start with the given 2 | /// prefix. 3 | pub fn dictionary_suggestions(start_with: &str) -> Vec<&str> { 4 | // The dictionary is a list of words separated by newlines 5 | let dictionary_contents = include_str!("../../assets/names_dictionary.txt"); 6 | let candidates: Vec<&str> = dictionary_contents 7 | .lines() 8 | .filter(|&suggested_name| { 9 | suggested_name.starts_with(start_with) && suggested_name != start_with 10 | }) 11 | .collect(); 12 | candidates 13 | } 14 | -------------------------------------------------------------------------------- /src/profiler/mod.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize)] 2 | pub struct ProfilerConfig {} 3 | 4 | pub async fn run() { 5 | loop { 6 | if let Err(err) = wc::alloc::stats::update_jemalloc_metrics() { 7 | tracing::warn!(?err, "failed to collect jemalloc stats"); 8 | } 9 | 10 | tokio::time::sleep(tokio::time::Duration::from_secs(30)).await; 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /src/project/config.rs: -------------------------------------------------------------------------------- 1 | use { 2 | serde::Deserialize, serde_piecewise_default::DeserializePiecewiseDefault, std::time::Duration, 3 | }; 4 | 5 | #[derive(DeserializePiecewiseDefault, Debug, Clone, PartialEq, Eq)] 6 | pub struct Config { 7 | pub api_url: Option, 8 | pub api_auth_token: Option, 9 | pub project_data_cache_ttl: u64, 10 | } 11 | 12 | impl Default for Config { 13 | fn default() -> Self { 14 | Self { 15 | api_url: None, 16 | api_auth_token: None, 17 | project_data_cache_ttl: 60 * 5, 18 | } 19 | } 20 | } 21 | 22 | impl Config { 23 | pub fn project_data_cache_ttl(&self) -> Duration { 24 | Duration::from_secs(self.project_data_cache_ttl) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/project/error.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::storage::error::StorageError, 3 | cerberus::registry::RegistryError, 4 | serde::{Deserialize, Serialize}, 5 | thiserror::Error as ThisError, 6 | }; 7 | 8 | #[derive(Debug, ThisError)] 9 | pub enum ProjectStorageError { 10 | #[error("registry error: {0}")] 11 | Registry(#[from] RegistryError), 12 | 13 | #[error("cache error: {0}")] 14 | Cache(#[from] StorageError), 15 | } 16 | 17 | #[derive(Debug, Clone, Serialize, Deserialize, ThisError)] 18 | pub enum ProjectDataError { 19 | #[error("Project not found in registry")] 20 | NotFound, 21 | 22 | #[error("Registry configuration error")] 23 | RegistryConfigError, 24 | } 25 | -------------------------------------------------------------------------------- /src/storage/error.rs: -------------------------------------------------------------------------------- 1 | //! Error typedefs used by this crate 2 | 3 | use thiserror::Error as ThisError; 4 | 5 | /// The error produced from most Storage functions 6 | #[derive(Debug, ThisError)] 7 | pub enum StorageError { 8 | /// Couldn't set the expiration for the given key 9 | #[error("couldn't set the expiry to the key")] 10 | SetExpiry, 11 | /// Unable to serialize data to store 12 | #[error("error on serialize data")] 13 | Serialize, 14 | /// Unable to deserialize data from store 15 | #[error("error on deserialize data")] 16 | Deserialize, 17 | /// Error on establishing a connection with the storage 18 | #[error("error on open connection")] 19 | Connection(String), 20 | /// Wrong node address 21 | #[error("wrong node address format: {0}")] 22 | WrongNodeAddress(String), 23 | /// Wrong key provided 24 | #[error("wrong key format: {0}")] 25 | WrongKey(String), 26 | /// Wrong namespace provided 27 | #[error("wrong namespace: {0}")] 28 | WrongNamespace(String), 29 | /// Wrong UTF8 encoding 30 | #[error("wrong UTF8 encoding")] 31 | Utf8Error(#[from] std::string::FromUtf8Error), 32 | /// WCN replication client error 33 | #[error("WCN client error: {0}")] 34 | WcnClientError(#[from] wcn_replication::Error), 35 | #[error("WCN auth error: {0}")] 36 | WcnAuthError(#[from] wcn_replication::auth::Error), 37 | #[error("WCN driver creation error: {0}")] 38 | WcnDriverCreationError(#[from] wcn_replication::CreationError), 39 | /// An unexpected error occurred 40 | #[error("{0:?}")] 41 | Other(String), 42 | } 43 | -------------------------------------------------------------------------------- /src/storage/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::storage::error::StorageError, 3 | async_trait::async_trait, 4 | serde::{de::DeserializeOwned, Serialize}, 5 | std::{fmt::Debug, time::Duration}, 6 | }; 7 | 8 | pub mod error; 9 | pub mod irn; 10 | pub mod redis; 11 | 12 | /// The Result type returned by Storage functions 13 | pub type StorageResult = Result; 14 | 15 | #[async_trait] 16 | pub trait KeyValueStorage: 'static + Send + Sync + Debug 17 | where 18 | T: Serialize + DeserializeOwned + Send + Sync, 19 | { 20 | /// Retrieve the data associated with the given key. 21 | async fn get(&self, key: &str) -> StorageResult>; 22 | 23 | /// Set the value for the given key. 24 | async fn set(&self, key: &str, value: &T, ttl: Option) -> StorageResult<()>; 25 | 26 | /// Set the value for the given key. Assumes the data is already serialized. 27 | async fn set_serialized( 28 | &self, 29 | key: &str, 30 | value: &[u8], 31 | ttl: Option, 32 | ) -> StorageResult<()>; 33 | 34 | /// Delete the value associated with the given key. 35 | async fn del(&self, key: &str) -> StorageResult<()>; 36 | } 37 | 38 | /// Holder the type of data will be serialized to be stored. 39 | pub type Data = Vec; 40 | 41 | pub fn serialize(data: &T) -> StorageResult 42 | where 43 | T: Serialize, 44 | { 45 | rmp_serde::to_vec(data).map_err(|_| StorageError::Serialize) 46 | } 47 | 48 | pub fn deserialize(data: &[u8]) -> StorageResult 49 | where 50 | T: DeserializeOwned, 51 | { 52 | rmp_serde::from_slice(data).map_err(|_| StorageError::Deserialize) 53 | } 54 | -------------------------------------------------------------------------------- /src/utils/build.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | const BUILD_VERSION: &str = env!("VERGEN_BUILD_SEMVER"); 4 | const BUILD_PROFILE: &str = env!("VERGEN_CARGO_PROFILE"); 5 | const BUILD_FEATURES: &str = env!("VERGEN_CARGO_FEATURES"); 6 | 7 | const GIT_SHA: &str = env!("VERGEN_GIT_SHA"); 8 | const GIT_TAG: &str = env!("VERGEN_GIT_SEMVER"); 9 | 10 | #[derive(Debug, Clone, Copy, Default)] 11 | pub struct CompileInfo; 12 | 13 | impl CompileInfo { 14 | pub const fn build(&self) -> BuildInfo { 15 | BuildInfo 16 | } 17 | 18 | pub const fn git(&self) -> GitInfo { 19 | GitInfo 20 | } 21 | } 22 | 23 | #[derive(Debug, Clone, Copy, Default)] 24 | pub struct BuildInfo; 25 | 26 | impl BuildInfo { 27 | pub const fn version(&self) -> &'static str { 28 | BUILD_VERSION 29 | } 30 | 31 | pub const fn profile(&self) -> &'static str { 32 | BUILD_PROFILE 33 | } 34 | 35 | pub const fn features(&self) -> &'static str { 36 | BUILD_FEATURES 37 | } 38 | } 39 | 40 | #[derive(Debug, Clone, Copy, Default)] 41 | pub struct GitInfo; 42 | 43 | impl GitInfo { 44 | pub const fn hash(&self) -> &'static str { 45 | GIT_SHA 46 | } 47 | 48 | pub fn short_hash(&self) -> &'static str { 49 | let hash = self.hash(); 50 | 51 | let end = hash 52 | .char_indices() 53 | .nth(7) 54 | .map(|(n, _)| n) 55 | .unwrap_or_else(|| hash.len()); 56 | 57 | &hash[..end] 58 | } 59 | 60 | pub const fn tag(&self) -> &'static str { 61 | GIT_TAG 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/utils/erc4337.rs: -------------------------------------------------------------------------------- 1 | use { 2 | alloy::{ 3 | primitives::{Address, U256}, 4 | rpc::client::{ClientBuilder, RpcClient}, 5 | transports::TransportResult, 6 | }, 7 | serde::{Deserialize, Serialize}, 8 | url::Url, 9 | yttrium::user_operation::UserOperationV07, 10 | }; 11 | 12 | pub struct BundlerRpcClient { 13 | pub client: RpcClient, 14 | } 15 | 16 | impl BundlerRpcClient { 17 | pub fn new(url: Url) -> Self { 18 | let client = ClientBuilder::default().http(url); 19 | Self { client } 20 | } 21 | 22 | pub async fn eth_estimate_user_operation_gas_v07( 23 | &self, 24 | user_op: &UserOperationV07, 25 | entrypoint: Address, 26 | ) -> TransportResult { 27 | self.client 28 | .request("eth_estimateUserOperationGas", (user_op, entrypoint)) 29 | .await 30 | } 31 | } 32 | 33 | #[derive(Debug, Serialize, Deserialize)] 34 | #[serde(rename_all = "camelCase")] 35 | pub struct EthEstimateUserOperationGasV07Response { 36 | pub pre_verification_gas: U256, 37 | pub verification_gas_limit: U256, 38 | pub call_gas_limit: U256, 39 | } 40 | -------------------------------------------------------------------------------- /src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | use rand::{distributions::Alphanumeric, Rng}; 2 | 3 | pub mod batch_json_rpc_request; 4 | pub mod build; 5 | pub mod crypto; 6 | pub mod erc4337; 7 | pub mod erc7677; 8 | pub mod network; 9 | pub mod permissions; 10 | pub mod rate_limit; 11 | pub mod sessions; 12 | pub mod simple_request_json; 13 | pub mod token_amount; 14 | 15 | pub fn generate_random_string(len: usize) -> String { 16 | let rng = rand::thread_rng(); 17 | rng.sample_iter(&Alphanumeric) 18 | .filter_map(|b| { 19 | let c = b as char; 20 | if c.is_ascii_alphanumeric() || c.is_ascii_digit() { 21 | Some(c) 22 | } else { 23 | None 24 | } 25 | }) 26 | .take(len) 27 | .collect() 28 | } 29 | 30 | pub fn capitalize_first_letter(s: &str) -> String { 31 | let mut c = s.chars(); 32 | match c.next() { 33 | None => String::new(), 34 | Some(first) => { 35 | // to_uppercase() returns an iterator because some characters can map to multiple chars 36 | first.to_uppercase().collect::() + c.as_str() 37 | } 38 | } 39 | } 40 | 41 | #[cfg(test)] 42 | mod tests { 43 | use super::*; 44 | 45 | #[test] 46 | fn test_capitalize_first_letter() { 47 | let input = ""; 48 | let expected = ""; 49 | assert_eq!(capitalize_first_letter(input), expected); 50 | 51 | let input = "rust"; 52 | let expected = "Rust"; 53 | assert_eq!(capitalize_first_letter(input), expected); 54 | 55 | let input = "rust world"; 56 | let expected = "Rust world"; 57 | assert_eq!(capitalize_first_letter(input), expected); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/ws.rs: -------------------------------------------------------------------------------- 1 | use { 2 | async_tungstenite::{tokio::ConnectStream, WebSocketStream}, 3 | futures_util::{select, StreamExt}, 4 | tracing::log::debug, 5 | }; 6 | 7 | #[tracing::instrument(skip(client_ws, provider_ws), level = "debug")] 8 | pub async fn proxy( 9 | project_id: String, 10 | client_ws: axum_tungstenite::WebSocket, 11 | provider_ws: WebSocketStream, 12 | ) { 13 | let (client_ws_sender, client_ws_receiver) = client_ws.split(); 14 | let (provider_ws_sender, provider_ws_receiver) = provider_ws.split(); 15 | 16 | let mut write = client_ws_receiver.forward(provider_ws_sender); 17 | let mut read = provider_ws_receiver.forward(client_ws_sender); 18 | select! { 19 | _ = read => debug!("WebSocket relaying messages to the provider for client {project_id} died.") , 20 | _ = write => debug!("WebSocket relaying messages from the provider to the client {project_id} died.") , 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /terraform/.terraform-docs.yml: -------------------------------------------------------------------------------- 1 | formatter: 'markdown table' 2 | 3 | recursive: 4 | enabled: true 5 | path: . 6 | 7 | output: 8 | file: README.md 9 | mode: inject 10 | template: |- 11 | 12 | {{ .Content }} 13 | 14 | 15 | content: | 16 | {{ .Header }} 17 | {{ .Requirements }} 18 | {{ .Providers }} 19 | {{ .Modules }} 20 | 21 | ## Inputs 22 | {{- $hideInputs := list "namespace" "region" "stage" "name" "delimiter" "attributes" "tags" "regex_replace_chars" "id_length_limit" "label_key_case" "label_value_case" "label_order" }} 23 | {{- $filteredInputs := list -}} 24 | {{- range .Module.Inputs -}} 25 | {{- if not (has .Name $hideInputs) -}} 26 | {{- $filteredInputs = append $filteredInputs . -}} 27 | {{- end -}} 28 | {{- end -}} 29 | {{ if not $filteredInputs }} 30 | 31 | No inputs. 32 | {{ else }} 33 | | Name | Description | Type | Default | Required | 34 | |------|-------------|------|---------|:--------:| 35 | {{- range $filteredInputs }} 36 | | {{ anchorNameMarkdown "input" .Name }} | {{ tostring .Description | sanitizeMarkdownTbl }} | {{ printf " " }}
{{ tostring .Type | sanitizeMarkdownTbl }}
| {{ printf " " }}
{{ .GetValue | sanitizeMarkdownTbl }}
| {{ printf " " }}{{ ternary .Required "yes" "no" }} | 37 | {{- end }} 38 | {{- end }} 39 | {{ .Outputs }} 40 | {{/** End of file fixer */}} 41 | -------------------------------------------------------------------------------- /terraform/.tflint.hcl: -------------------------------------------------------------------------------- 1 | config { 2 | format = "default" 3 | module = true 4 | } 5 | 6 | plugin "terraform" { 7 | enabled = true 8 | preset = "all" 9 | } 10 | 11 | plugin "aws" { 12 | enabled = true 13 | version = "0.18.0" 14 | source = "github.com/terraform-linters/tflint-ruleset-aws" 15 | } 16 | 17 | rule "terraform_workspace_remote" { 18 | enabled = false 19 | } 20 | -------------------------------------------------------------------------------- /terraform/alerting/alarms_ecs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "ecs_cpu_utilization" { 2 | alarm_name = "${local.alarm_prefix} - ECS CPU Utilization" 3 | alarm_description = "${local.alarm_prefix} - ECS CPU utilization is high (over ${var.ecs_cpu_threshold}%)" 4 | 5 | namespace = module.cloudwatch.namespaces.ECS 6 | dimensions = { 7 | ClusterName = var.ecs_cluster_name 8 | ServiceName = var.ecs_service_name 9 | } 10 | metric_name = module.cloudwatch.metrics.ECS.CPUUtilization 11 | 12 | evaluation_periods = local.evaluation_periods 13 | period = local.period 14 | 15 | statistic = module.cloudwatch.statistics.Average 16 | comparison_operator = module.cloudwatch.operators.GreaterThanOrEqualToThreshold 17 | threshold = var.ecs_cpu_threshold 18 | treat_missing_data = "breaching" 19 | 20 | alarm_actions = [aws_sns_topic.cloudwatch_webhook.arn] 21 | insufficient_data_actions = [aws_sns_topic.cloudwatch_webhook.arn] 22 | } 23 | 24 | resource "aws_cloudwatch_metric_alarm" "ecs_mem_utilization" { 25 | alarm_name = "${local.alarm_prefix} - ECS Memory Utilization" 26 | alarm_description = "${local.alarm_prefix} - ECS Memory utilization is high (over ${var.ecs_memory_threshold}%)" 27 | 28 | namespace = module.cloudwatch.namespaces.ECS 29 | dimensions = { 30 | ClusterName = var.ecs_cluster_name 31 | ServiceName = var.ecs_service_name 32 | } 33 | metric_name = module.cloudwatch.metrics.ECS.MemoryUtilization 34 | 35 | evaluation_periods = local.evaluation_periods 36 | period = local.period 37 | 38 | statistic = module.cloudwatch.statistics.Average 39 | comparison_operator = module.cloudwatch.operators.GreaterThanOrEqualToThreshold 40 | threshold = var.ecs_memory_threshold 41 | treat_missing_data = "breaching" 42 | 43 | alarm_actions = [aws_sns_topic.cloudwatch_webhook.arn] 44 | insufficient_data_actions = [aws_sns_topic.cloudwatch_webhook.arn] 45 | } 46 | -------------------------------------------------------------------------------- /terraform/alerting/alarms_redis.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "redis_cpu_utilization" { 2 | alarm_name = "${local.alarm_prefix} - Redis CPU Utilization" 3 | alarm_description = "${local.alarm_prefix} - Redis CPU utilization is high (over ${var.redis_cpu_threshold}%)" 4 | 5 | namespace = module.cloudwatch.namespaces.ElastiCache 6 | dimensions = { 7 | CacheClusterId = var.redis_cluster_id 8 | } 9 | metric_name = module.cloudwatch.metrics.ElastiCache.CPUUtilization 10 | 11 | evaluation_periods = local.evaluation_periods 12 | period = local.period 13 | 14 | statistic = module.cloudwatch.statistics.Average 15 | comparison_operator = module.cloudwatch.operators.GreaterThanOrEqualToThreshold 16 | threshold = var.redis_cpu_threshold 17 | treat_missing_data = "breaching" 18 | 19 | alarm_actions = [aws_sns_topic.cloudwatch_webhook.arn] 20 | insufficient_data_actions = [aws_sns_topic.cloudwatch_webhook.arn] 21 | } 22 | 23 | resource "aws_cloudwatch_metric_alarm" "redis_available_memory" { 24 | alarm_name = "${local.alarm_prefix} - Redis Available Memory" 25 | alarm_description = "${local.alarm_prefix} - Redis available memory is low (less than ${var.redis_memory_threshold}GiB)" 26 | 27 | namespace = module.cloudwatch.namespaces.ElastiCache 28 | dimensions = { 29 | CacheClusterId = var.redis_cluster_id 30 | } 31 | metric_name = module.cloudwatch.metrics.ElastiCache.FreeableMemory 32 | 33 | evaluation_periods = local.evaluation_periods 34 | period = local.period 35 | 36 | statistic = module.cloudwatch.statistics.Average 37 | comparison_operator = module.cloudwatch.operators.LessThanOrEqualToThreshold 38 | threshold = var.redis_memory_threshold * pow(1000, 3) 39 | treat_missing_data = "breaching" 40 | 41 | alarm_actions = [aws_sns_topic.cloudwatch_webhook.arn] 42 | insufficient_data_actions = [aws_sns_topic.cloudwatch_webhook.arn] 43 | } 44 | -------------------------------------------------------------------------------- /terraform/alerting/main.tf: -------------------------------------------------------------------------------- 1 | module "cloudwatch" { 2 | source = "app.terraform.io/wallet-connect/cloudwatch-constants/aws" 3 | version = "1.0.0" 4 | } 5 | 6 | locals { 7 | alarm_prefix = "${title(module.this.name)} - ${title(module.this.stage)}" 8 | evaluation_periods = 1 9 | period = 60 * 5 10 | } 11 | 12 | 13 | #tfsec:ignore:aws-sns-enable-topic-encryption 14 | resource "aws_sns_topic" "cloudwatch_webhook" { 15 | name = "cloudwatch-webhook" 16 | display_name = "CloudWatch Webhook forwarding to BetterUptime" 17 | } 18 | 19 | resource "aws_sns_topic_subscription" "cloudwatch_webhook" { 20 | count = var.webhook_cloudwatch_p2 != "" ? 1 : 0 21 | 22 | endpoint = var.webhook_cloudwatch_p2 23 | protocol = "https" 24 | topic_arn = aws_sns_topic.cloudwatch_webhook.arn 25 | } 26 | 27 | 28 | #tfsec:ignore:aws-sns-enable-topic-encryption 29 | resource "aws_sns_topic" "prometheus_webhook" { 30 | name = "prometheus-webhook" 31 | display_name = "Prometheus Webhook forwarding to BetterUptime" 32 | } 33 | 34 | resource "aws_sns_topic_subscription" "prometheus_webhook" { 35 | count = var.webhook_prometheus_p2 != "" ? 1 : 0 36 | endpoint = var.webhook_prometheus_p2 37 | protocol = "https" 38 | topic_arn = aws_sns_topic.prometheus_webhook.arn 39 | } 40 | -------------------------------------------------------------------------------- /terraform/alerting/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.7" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /terraform/alerting/variables.tf: -------------------------------------------------------------------------------- 1 | variable "webhook_cloudwatch_p2" { 2 | description = "The URL of the webhook to be called on CloudWatch P2 alarms" 3 | type = string 4 | } 5 | 6 | variable "webhook_prometheus_p2" { 7 | description = "The URL of the webhook to be called on Prometheus P2 alarms" 8 | type = string 9 | } 10 | 11 | #------------------------------------------------------------------------------- 12 | # ECS 13 | 14 | variable "ecs_cluster_name" { 15 | description = "The name of the ECS cluster running the application" 16 | type = string 17 | } 18 | 19 | variable "ecs_service_name" { 20 | description = "The name of the ECS service running the application" 21 | type = string 22 | } 23 | 24 | variable "ecs_cpu_threshold" { 25 | description = "The ECS CPU utilization alarm threshold in percents" 26 | type = number 27 | default = 80 28 | } 29 | 30 | variable "ecs_memory_threshold" { 31 | description = "The ECS memory utilization alarm threshold in percents" 32 | type = number 33 | default = 80 34 | } 35 | 36 | #------------------------------------------------------------------------------- 37 | # Redis 38 | 39 | variable "redis_cluster_id" { 40 | description = "The Redis cluster ID" 41 | type = string 42 | } 43 | 44 | variable "redis_cpu_threshold" { 45 | description = "The Redis CPU utilization alarm threshold in percents" 46 | type = number 47 | default = 80 48 | } 49 | 50 | variable "redis_memory_threshold" { 51 | description = "The Redis available memory alarm threshold in GiB" 52 | type = number 53 | default = 3 54 | } 55 | -------------------------------------------------------------------------------- /terraform/context.tf: -------------------------------------------------------------------------------- 1 | module "stage" { 2 | source = "app.terraform.io/wallet-connect/stage/null" 3 | version = "0.1.0" 4 | project = "blockchain" 5 | } 6 | 7 | locals { 8 | stage = module.stage.stage 9 | } 10 | 11 | module "this" { 12 | source = "app.terraform.io/wallet-connect/label/null" 13 | version = "0.3.2" 14 | 15 | namespace = "wc" 16 | region = var.region 17 | stage = local.stage 18 | name = var.name 19 | 20 | tags = { 21 | Application = var.name 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /terraform/ecs/cluster_logs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_log_group" "cluster" { 2 | name = "${module.this.id}-app-logs" 3 | kms_key_id = var.cloudwatch_logs_key_arn 4 | retention_in_days = var.cloudwatch_retention_in_days 5 | } 6 | 7 | resource "aws_cloudwatch_log_group" "otel" { 8 | name = "${module.this.id}-aws-otel-sidecar-collector" 9 | kms_key_id = var.cloudwatch_logs_key_arn 10 | retention_in_days = var.cloudwatch_retention_in_days 11 | } 12 | 13 | resource "aws_cloudwatch_log_group" "prometheus_proxy" { 14 | name = "${module.this.id}-sigv4-prometheus-proxy" 15 | kms_key_id = var.cloudwatch_logs_key_arn 16 | retention_in_days = var.cloudwatch_retention_in_days 17 | } 18 | -------------------------------------------------------------------------------- /terraform/ecs/dns.tf: -------------------------------------------------------------------------------- 1 | # DNS Records 2 | resource "aws_route53_record" "dns_load_balancer" { 3 | for_each = var.route53_zones 4 | 5 | zone_id = each.key 6 | name = each.value 7 | type = "A" 8 | 9 | alias { 10 | name = aws_lb.load_balancer.dns_name 11 | zone_id = aws_lb.load_balancer.zone_id 12 | evaluate_target_health = true 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terraform/ecs/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_pet" "this" { 2 | length = 2 3 | } 4 | -------------------------------------------------------------------------------- /terraform/ecs/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ecs_cluster_name" { 2 | description = "The name of the ECS cluster" 3 | value = aws_ecs_cluster.app_cluster.name 4 | } 5 | 6 | output "ecs_service_name" { 7 | description = "The name of the ECS service" 8 | value = aws_ecs_service.app_service.name 9 | } 10 | 11 | output "ecs_task_family" { 12 | description = "The family of the task definition" 13 | value = aws_ecs_task_definition.app_task.family 14 | } 15 | 16 | output "service_security_group_id" { 17 | description = "The ID of the security group for the service" 18 | value = aws_security_group.app_ingress.id 19 | } 20 | 21 | output "target_group_arn" { 22 | description = "The ARN of the target group" 23 | value = aws_lb_target_group.target_group.arn 24 | } 25 | 26 | output "load_balancer_arn" { 27 | description = "The ARN of the load balancer" 28 | value = aws_lb.load_balancer.arn 29 | } 30 | 31 | output "load_balancer_arn_suffix" { 32 | description = "The ARN suffix of the load balancer" 33 | value = aws_lb.load_balancer.arn_suffix 34 | } 35 | 36 | output "log_group_app_name" { 37 | description = "The name of the log group for the app" 38 | value = aws_cloudwatch_log_group.cluster.name 39 | } 40 | 41 | output "log_group_app_arn" { 42 | description = "The ARN of the log group for the app" 43 | value = aws_cloudwatch_log_group.cluster.arn 44 | } 45 | -------------------------------------------------------------------------------- /terraform/ecs/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.7" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "3.5.1" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terraform/inputs.tf: -------------------------------------------------------------------------------- 1 | data "terraform_remote_state" "org" { 2 | backend = "remote" 3 | config = { 4 | organization = "wallet-connect" 5 | workspaces = { 6 | name = "aws-org" 7 | } 8 | } 9 | } 10 | 11 | data "terraform_remote_state" "datalake" { 12 | backend = "remote" 13 | config = { 14 | organization = "wallet-connect" 15 | workspaces = { 16 | name = "datalake-${module.stage.dev ? "staging" : local.stage}" 17 | } 18 | } 19 | } 20 | 21 | data "terraform_remote_state" "infra_aws" { 22 | backend = "remote" 23 | config = { 24 | organization = "wallet-connect" 25 | workspaces = { 26 | name = "infra-aws" 27 | } 28 | } 29 | } 30 | 31 | data "terraform_remote_state" "monitoring" { 32 | backend = "remote" 33 | config = { 34 | organization = "wallet-connect" 35 | workspaces = { 36 | name = "monitoring" 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /terraform/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_caller_identity" "this" {} 2 | 3 | resource "random_pet" "this" { 4 | length = 2 5 | } 6 | 7 | locals { 8 | ecr_repository_url = module.stage.dev ? data.terraform_remote_state.org.outputs.accounts.sdlc.dev.ecr-urls.blockchain : data.terraform_remote_state.org.outputs.accounts.wl.blockchain[local.stage].ecr-url 9 | } 10 | 11 | resource "aws_kms_key" "cloudwatch_logs" { 12 | description = "KMS key for encrypting CloudWatch Logs" 13 | enable_key_rotation = true 14 | policy = jsonencode({ 15 | Version = "2012-10-17" 16 | Statement = [ 17 | { 18 | Sid = "Enable IAM User Permissions" 19 | Effect = "Allow" 20 | Principal = { 21 | AWS = data.aws_caller_identity.this.account_id 22 | } 23 | Action = "kms:*" 24 | Resource = "*" 25 | }, 26 | { 27 | Sid = "AllowCloudWatchLogs" 28 | Effect = "Allow" 29 | Principal = { 30 | Service = "logs.${module.this.region}.amazonaws.com" 31 | } 32 | Action = [ 33 | "kms:Encrypt*", 34 | "kms:Decrypt*", 35 | "kms:ReEncrypt*", 36 | "kms:GenerateDataKey*", 37 | "kms:Describe*" 38 | ] 39 | Resource = "*" 40 | }, 41 | ] 42 | }) 43 | } 44 | 45 | resource "aws_kms_alias" "cloudwatch_logs" { 46 | name = "alias/${module.this.id}-cloudwatch-logs" 47 | target_key_id = aws_kms_key.cloudwatch_logs.key_id 48 | } 49 | -------------------------------------------------------------------------------- /terraform/monitoring/data_sources.tf: -------------------------------------------------------------------------------- 1 | module "monitoring-role" { 2 | source = "app.terraform.io/wallet-connect/monitoring-role/aws" 3 | version = "1.1.0" 4 | context = module.this 5 | remote_role_arn = var.monitoring_role_arn 6 | } 7 | 8 | resource "grafana_data_source" "prometheus" { 9 | type = "prometheus" 10 | name = "${module.this.stage}-${module.this.name}-amp" 11 | url = var.prometheus_endpoint 12 | 13 | json_data_encoded = jsonencode({ 14 | httpMethod = "GET" 15 | sigV4Auth = true 16 | sigV4AuthType = "ec2_iam_role" 17 | sigV4Region = module.this.region 18 | sigV4AssumeRoleArn = module.monitoring-role.iam_role_arn 19 | }) 20 | 21 | depends_on = [module.monitoring-role] 22 | } 23 | 24 | resource "grafana_data_source" "cloudwatch" { 25 | type = "cloudwatch" 26 | name = "${module.this.stage}-${module.this.name}-cloudwatch" 27 | 28 | json_data_encoded = jsonencode({ 29 | defaultRegion = module.this.region 30 | assumeRoleArn = module.monitoring-role.iam_role_arn 31 | }) 32 | 33 | depends_on = [module.monitoring-role] 34 | } 35 | -------------------------------------------------------------------------------- /terraform/monitoring/main.tf: -------------------------------------------------------------------------------- 1 | data "jsonnet_file" "dashboard" { 2 | source = "${path.module}/dashboard.jsonnet" 3 | 4 | ext_str = { 5 | dashboard_title = "BlockchainAPI - ${title(module.this.stage)}" 6 | dashboard_uid = "blockchainapi-${module.this.stage}" 7 | 8 | prometheus_uid = grafana_data_source.prometheus.uid 9 | cloudwatch_uid = grafana_data_source.cloudwatch.uid 10 | 11 | environment = module.this.stage 12 | notifications = jsonencode(var.notification_channels) 13 | 14 | ecs_service_name = var.ecs_service_name 15 | ecs_task_family = var.ecs_task_family 16 | load_balancer = var.load_balancer_arn 17 | target_group = var.ecs_target_group_arn 18 | redis_cluster_id = var.redis_cluster_id 19 | log_group_app_name = var.log_group_app_name 20 | log_group_app_arn = var.log_group_app_arn 21 | aws_account_id = var.aws_account_id 22 | } 23 | } 24 | 25 | resource "grafana_dashboard" "main" { 26 | overwrite = true 27 | message = "Updated by Terraform" 28 | config_json = data.jsonnet_file.dashboard.rendered 29 | } 30 | -------------------------------------------------------------------------------- /terraform/monitoring/outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reown-com/blockchain-api/5866492c9d236a7f9fd66a6f08ff2c2d4136c899/terraform/monitoring/outputs.tf -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/handlers_rate.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Handlers rate', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries.withUnit('reqps')) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum by(task_name) (rate(handler_task_duration_count[$__rate_interval]))', 18 | legendFormat = "{{task_name}}" 19 | )) 20 | } 21 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/balance/provider_retries.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local _configuration = defaults.configuration.timeseries 8 | .withUnit('cpm'); 9 | 10 | { 11 | new(ds, vars):: 12 | panels.timeseries( 13 | title = 'Balance provider call retries', 14 | datasource = ds.prometheus, 15 | ) 16 | .configure(_configuration) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (namespace)(rate(balance_lookup_retries_sum{}[$__rate_interval]))', 21 | exemplar = false, 22 | legendFormat = '__auto', 23 | )) 24 | } 25 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/balance/requests_distribution_evm.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = "Requests distribution by provider (EVM)", 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | .addTarget(targets.prometheus( 15 | datasource = ds.prometheus, 16 | expr = 'sum (increase(provider_status_code_counter_total{provider="Dune", endpoint="evm_balances"}[$__rate_interval]))', 17 | legendFormat = 'Dune', 18 | )) 19 | .addTarget(targets.prometheus( 20 | datasource = ds.prometheus, 21 | expr = 'sum (increase(provider_status_code_counter_total{provider="Zerion", endpoint="positions"}[$__rate_interval]))', 22 | legendFormat = 'Zerion', 23 | )) 24 | } 25 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/balance/requests_distribution_solana.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = "Requests distribution by provider (Solana)", 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | .addTarget(targets.prometheus( 15 | datasource = ds.prometheus, 16 | expr = 'sum (increase(provider_status_code_counter_total{provider="Dune", endpoint="solana_balances"}[$__rate_interval]))', 17 | legendFormat = 'Dune', 18 | )) 19 | .addTarget(targets.prometheus( 20 | datasource = ds.prometheus, 21 | expr = 'sum (increase(provider_status_code_counter_total{provider="SolScan", endpoint="https://pro-api.solscan.io/v2.0/account/detail"}[$__rate_interval]))', 22 | legendFormat = 'SolScan', 23 | )) 24 | } 25 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/chain_abstraction/gas_estimation.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Gas estimations', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withSpanNulls(true) 16 | ) 17 | .addTarget(targets.prometheus( 18 | datasource = ds.prometheus, 19 | expr = 'sum by(chain_id) (rate(gas_estimation_sum[$__rate_interval])) / sum by(chain_id) (rate(gas_estimation_count[$__rate_interval]))', 20 | exemplar = false, 21 | legendFormat = '__auto', 22 | )) 23 | } 24 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/chain_abstraction/insufficient_funds.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Insufficient funds responses', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | .addTarget(targets.prometheus( 15 | datasource = ds.prometheus, 16 | expr = 'sum(increase(ca_insufficient_funds_total{}[$__rate_interval]))', 17 | exemplar = false, 18 | legendFormat = 'Insufficient funds responses counter', 19 | )) 20 | } 21 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/chain_abstraction/no_bridging.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'No bridging needed responses', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | .addTarget(targets.prometheus( 15 | datasource = ds.prometheus, 16 | expr = 'sum by(type) (increase(ca_no_bridging_needed_total{}[$__rate_interval]))', 17 | exemplar = false, 18 | legendFormat = '__auto', 19 | )) 20 | } 21 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/chain_abstraction/no_routes.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'No bridging routes found', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | .addTarget(targets.prometheus( 15 | datasource = ds.prometheus, 16 | expr = 'sum by(route) (increase(ca_no_routes_found_total{}[$__rate_interval]))', 17 | exemplar = false, 18 | legendFormat = '__auto', 19 | )) 20 | } 21 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/chain_abstraction/response_types_rate.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'CA response types rate', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | .addTarget(targets.prometheus( 15 | datasource = ds.prometheus, 16 | expr = 'sum(increase(ca_routes_found_total{}[$__rate_interval]))', 17 | exemplar = false, 18 | legendFormat = 'Routes found (success)', 19 | )) 20 | .addTarget(targets.prometheus( 21 | datasource = ds.prometheus, 22 | expr = 'sum(increase(ca_insufficient_funds_total{}[$__rate_interval]))', 23 | exemplar = false, 24 | legendFormat = 'Insufficient funds', 25 | )) 26 | .addTarget(targets.prometheus( 27 | datasource = ds.prometheus, 28 | expr = 'sum(increase(ca_no_bridging_needed_total{}[$__rate_interval]))', 29 | exemplar = false, 30 | legendFormat = 'No bridging needed', 31 | )) 32 | .addTarget(targets.prometheus( 33 | datasource = ds.prometheus, 34 | expr = 'sum(increase(ca_no_routes_found_total{}[$__rate_interval]))', 35 | exemplar = false, 36 | legendFormat = 'No routes found', 37 | )) 38 | } 39 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/ecs/availability.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | 9 | local error_alert(vars) = alert.new( 10 | namespace = 'Blockchain API', 11 | name = "%s - Availability" % vars.environment, 12 | message = "%s - Availability" % vars.environment, 13 | period = '5m', 14 | frequency = '1m', 15 | noDataState = 'alerting', 16 | notifications = vars.notifications, 17 | alertRuleTags = { 18 | 'og_priority': 'P3', 19 | }, 20 | 21 | conditions = [ 22 | alertCondition.new( 23 | evaluatorParams = [ 95 ], 24 | evaluatorType = 'lt', 25 | operatorType = 'or', 26 | queryRefId = 'availability', 27 | queryTimeStart = '5m', 28 | reducerType = 'avg', 29 | ), 30 | ] 31 | ); 32 | 33 | { 34 | new(ds, vars):: 35 | panels.timeseries( 36 | title = 'Availability', 37 | datasource = ds.prometheus, 38 | ) 39 | .configure( 40 | defaults.configuration.timeseries 41 | .withUnit('percent') 42 | .withSoftLimit( 43 | axisSoftMin = 98, 44 | axisSoftMax = 100, 45 | ) 46 | ) 47 | .setAlert(vars.environment, error_alert(vars)) 48 | 49 | .addTarget(targets.prometheus( 50 | datasource = ds.prometheus, 51 | expr = '(1-(sum(rate(http_call_counter_total{code=~"5[0-9][0-24-9]"}[5m])) or vector(0))/(sum(rate(http_call_counter_total{}[5m]))))*100', 52 | refId = "availability", 53 | exemplar = false, 54 | )) 55 | } 56 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/ecs/cpu.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | local overrides = defaults.overrides; 9 | 10 | { 11 | new(ds, vars):: 12 | panels.timeseries( 13 | title = 'CPU Utilization', 14 | datasource = ds.prometheus, 15 | ) 16 | .configure(overrides.cpu(defaults.configuration.timeseries_resource)) 17 | .setAlert(vars.environment, alert.new( 18 | namespace = 'Blockchain API', 19 | name = "%s - High CPU usage" % vars.environment, 20 | message = "%s - High CPU usage" % vars.environment, 21 | period = '20m', 22 | frequency = '5m', 23 | noDataState = 'alerting', 24 | notifications = vars.notifications, 25 | alertRuleTags = { 26 | 'og_priority': 'P3', 27 | }, 28 | conditions = [ 29 | alertCondition.new( 30 | evaluatorParams = [ 70 ], 31 | evaluatorType = 'gt', 32 | operatorType = 'or', 33 | queryRefId = 'CPU_Avg', 34 | queryTimeStart = '5m', 35 | reducerType = 'max', 36 | ), 37 | ] 38 | )) 39 | .addTarget(targets.prometheus( 40 | datasource = ds.prometheus, 41 | expr = 'sum(rate(cpu_usage_sum[$__rate_interval])) / sum(rate(cpu_usage_count[$__rate_interval]))', 42 | interval = '5m', 43 | legendFormat = 'CPU Utilization 5m avg.', 44 | refId = 'CPU_Avg', 45 | )) 46 | } 47 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/ecs/memory.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | local overrides = defaults.overrides; 9 | 10 | { 11 | new(ds, vars):: 12 | panels.timeseries( 13 | title = 'Memory Utilization', 14 | datasource = ds.prometheus, 15 | ) 16 | .configure(defaults.overrides.memory(defaults.configuration.timeseries_resource)) 17 | .setAlert(vars.environment, alert.new( 18 | namespace = 'Blockchain APi', 19 | name = "%s - High Memory (RAM) usage" % vars.environment, 20 | message = "%s - High Memory (RAM) usage" % vars.environment, 21 | period = '5m', 22 | frequency = '1m', 23 | noDataState = 'alerting', 24 | notifications = vars.notifications, 25 | alertRuleTags = { 26 | 'og_priority': 'P3', 27 | }, 28 | conditions = [ 29 | alertCondition.new( 30 | evaluatorParams = [ 70 ], 31 | evaluatorType = 'gt', 32 | operatorType = 'or', 33 | queryRefId = 'RAM_Avg', 34 | queryTimeStart = '5m', 35 | reducerType = 'avg', 36 | ), 37 | ] 38 | )) 39 | .addTarget(targets.prometheus( 40 | datasource = ds.prometheus, 41 | expr = '(sum(rate(memory_used_sum[$__rate_interval])) / sum(rate(memory_total_sum[$__rate_interval]))) * 100', 42 | refId = 'RAM_Avg', 43 | )) 44 | } 45 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/history/availability.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Availability', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('percent') 16 | .withSoftLimit( 17 | axisSoftMin = 98, 18 | axisSoftMax = 100, 19 | ) 20 | ) 21 | .addTarget(targets.prometheus( 22 | datasource = ds.prometheus, 23 | expr = '(sum by(provider) (rate(history_lookup_success_counter_total{}[$__rate_interval])) / sum by(provider) (rate(history_lookup_counter_total{}[$__rate_interval]))) * 100', 24 | exemplar = false, 25 | legendFormat = '__auto', 26 | )) 27 | } 28 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/history/latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local _configuration = defaults.configuration.timeseries 8 | .withUnit('s') 9 | .withSoftLimit( 10 | axisSoftMin = 0.4, 11 | axisSoftMax = 1.1, 12 | ); 13 | 14 | { 15 | new(ds, vars):: 16 | panels.timeseries( 17 | title = 'Latency', 18 | datasource = ds.prometheus, 19 | ) 20 | .configure(_configuration) 21 | 22 | .addTarget(targets.prometheus( 23 | datasource = ds.prometheus, 24 | expr = 'sum by(provider) (rate(history_lookup_latency_tracker_sum[$__rate_interval])) / sum by(provider) (rate(history_lookup_latency_tracker_count[$__rate_interval]))', 25 | exemplar = false, 26 | legendFormat = '__auto', 27 | )) 28 | } 29 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/history/requests.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Requests', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries.withUnit('reqps')) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum by(provider) (rate(history_lookup_counter_total{}[$__rate_interval]))', 18 | exemplar = false, 19 | legendFormat = '__auto', 20 | )) 21 | } 22 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/identity/cache.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Cache-hit ratio', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('percent') 16 | .withSoftLimit( 17 | axisSoftMin = 0, 18 | axisSoftMax = 100, 19 | ) 20 | ) 21 | 22 | .addTarget(targets.prometheus( 23 | datasource = ds.prometheus, 24 | expr = 'sum(rate(identity_lookup_success_counter_total{}[$__rate_interval]))', 25 | refId = "lookups", 26 | exemplar = false, 27 | hide = true, 28 | )) 29 | 30 | .addTarget(targets.prometheus( 31 | datasource = ds.prometheus, 32 | expr = 'sum(rate(identity_lookup_success_counter_total{source="cache"}[$__rate_interval]))', 33 | refId = "cache_hits", 34 | exemplar = false, 35 | hide = true, 36 | )) 37 | .addTarget(targets.math( 38 | expr = '($cache_hits / $lookups) * 100', 39 | refId = "Cache-hits", 40 | )) 41 | } 42 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/identity/requests.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Requests', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries.withUnit('reqps')) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum(rate(identity_lookup_counter_total{}[$__rate_interval]))', 18 | refId = "Requests", 19 | )) 20 | } 21 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/identity/usage.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Usage', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries.withUnit('percent')) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum(rate(identity_lookup_success_counter_total{}[$__rate_interval]))', 18 | refId = "lookups", 19 | exemplar = false, 20 | hide = true, 21 | )) 22 | 23 | .addTarget(targets.prometheus( 24 | datasource = ds.prometheus, 25 | expr = 'sum(rate(identity_lookup_name_present_counter_total{}[$__rate_interval]))', 26 | refId = "name_present", 27 | exemplar = false, 28 | hide = true, 29 | )) 30 | 31 | .addTarget(targets.prometheus( 32 | datasource = ds.prometheus, 33 | expr = 'sum(rate(identity_lookup_avatar_present_counter_total{}[$__rate_interval]))', 34 | refId = "avatar_present", 35 | exemplar = false, 36 | hide = true, 37 | )) 38 | 39 | .addTarget(targets.math( 40 | expr = '($name_present / $lookups) * 100', 41 | refId = "% of lookups with name", 42 | )) 43 | .addTarget(targets.math( 44 | expr = '($avatar_present / $lookups) * 100', 45 | refId = "% of lookups with avatar", 46 | )) 47 | } 48 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/irn/latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | 9 | local _configuration = defaults.configuration.timeseries 10 | .withUnit('s') 11 | .withSoftLimit( 12 | axisSoftMin = 0.4, 13 | axisSoftMax = 1.5, 14 | ); 15 | 16 | local error_alert(vars) = alert.new( 17 | namespace = 'Blockchain API', 18 | name = "%s - IRN Client latency" % vars.environment, 19 | message = "%s - IRN Client latency" % vars.environment, 20 | period = '5m', 21 | frequency = '1m', 22 | noDataState = 'no_data', 23 | notifications = vars.notifications, 24 | alertRuleTags = { 25 | 'og_priority': 'P3', 26 | }, 27 | 28 | conditions = [ 29 | alertCondition.new( 30 | evaluatorParams = [ 1.5 ], 31 | evaluatorType = 'gt', 32 | operatorType = 'or', 33 | queryRefId = 'IrnResponseLatency', 34 | queryTimeStart = '5m', 35 | reducerType = 'avg', 36 | ), 37 | ] 38 | ); 39 | 40 | { 41 | new(ds, vars):: 42 | panels.timeseries( 43 | title = 'Latency', 44 | datasource = ds.prometheus, 45 | ) 46 | .configure(_configuration) 47 | 48 | .setAlert(vars.environment, error_alert(vars)) 49 | 50 | .addTarget(targets.prometheus( 51 | datasource = ds.prometheus, 52 | expr = 'sum(rate(irn_latency_tracker_sum[$__rate_interval])) / sum(rate(irn_latency_tracker_count[$__rate_interval]))', 53 | refId = 'IrnResponseLatency', 54 | legendFormat = 'IRN response latency', 55 | )) 56 | } 57 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/active_connections.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Active Connections', 11 | datasource = ds.cloudwatch, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.cloudwatch( 16 | datasource = ds.cloudwatch, 17 | namespace = 'AWS/ApplicationELB', 18 | metricName = 'ActiveConnectionCount', 19 | dimensions = { 20 | LoadBalancer: vars.load_balancer 21 | }, 22 | statistic = 'Average', 23 | )) 24 | } 25 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/healthy_hosts.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local _configuration = defaults.configuration.timeseries 8 | .withSoftLimit( 9 | axisSoftMin = 0, 10 | axisSoftMax = 5, 11 | ); 12 | 13 | { 14 | new(ds, vars):: 15 | panels.timeseries( 16 | title = 'Healthy Hosts', 17 | datasource = ds.cloudwatch, 18 | ) 19 | .configure(_configuration) 20 | 21 | .addTarget(targets.cloudwatch( 22 | datasource = ds.cloudwatch, 23 | metricQueryType = grafana.target.cloudwatch.queryTypes.Query, 24 | 25 | dimensions = { 26 | TargetGroup: vars.target_group 27 | }, 28 | metricName = 'HealthyHostCount', 29 | namespace = 'AWS/ApplicationELB', 30 | sql = { 31 | from: { 32 | property: { 33 | name: "AWS/ApplicationELB", 34 | type: "string" 35 | }, 36 | type: "property" 37 | }, 38 | select: { 39 | name: "MAX", 40 | parameters: [ 41 | { 42 | name: "HealthyHostCount", 43 | type: "functionParameter" 44 | } 45 | ], 46 | type: "function" 47 | }, 48 | where: { 49 | expressions: [ 50 | { 51 | operator: { 52 | name: "=", 53 | value: vars.load_balancer 54 | }, 55 | property: { 56 | name: "LoadBalancer", 57 | type: "string" 58 | }, 59 | type: "operator" 60 | } 61 | ], 62 | type: "and" 63 | } 64 | }, 65 | sqlExpression = "SELECT MAX(HealthyHostCount) FROM \"AWS/ApplicationELB\" WHERE LoadBalancer = '%s'" % [vars.load_balancer], 66 | statistic = 'Maximum', 67 | )) 68 | } 69 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | 9 | { 10 | new(ds, vars):: 11 | panels.timeseries( 12 | title = 'Target response time', 13 | datasource = ds.cloudwatch, 14 | ) 15 | .configure(defaults.configuration.timeseries.withUnit('s')) 16 | 17 | .setAlert(vars.environment, alert.new( 18 | namespace = 'Blockchain API', 19 | name = "%s - ELB High target response time" % vars.environment, 20 | message = "%s - ELB High target response time" % vars.environment, 21 | period = '5m', 22 | frequency = '1m', 23 | noDataState = 'no_data', 24 | notifications = vars.notifications, 25 | alertRuleTags = { 26 | 'og_priority': 'P3', 27 | }, 28 | conditions = [ 29 | alertCondition.new( 30 | evaluatorParams = [ 3 ], 31 | evaluatorType = 'gt', 32 | operatorType = 'or', 33 | queryRefId = 'ELBTargetLatency', 34 | queryTimeStart = '5m', 35 | reducerType = 'avg', 36 | ), 37 | ] 38 | )) 39 | 40 | .addTarget(targets.cloudwatch( 41 | datasource = ds.cloudwatch, 42 | namespace = 'AWS/ApplicationELB', 43 | metricName = 'TargetResponseTime', 44 | dimensions = { 45 | LoadBalancer: vars.load_balancer 46 | }, 47 | statistic = 'Average', 48 | refId = 'ELBTargetLatency', 49 | )) 50 | } 51 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/requests.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Requests', 11 | datasource = ds.cloudwatch, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.cloudwatch( 16 | alias = 'Requests', 17 | datasource = ds.cloudwatch, 18 | namespace = 'AWS/ApplicationELB', 19 | metricName = 'RequestCount', 20 | dimensions = { 21 | LoadBalancer: vars.load_balancer 22 | }, 23 | matchExact = true, 24 | statistic = 'Sum', 25 | refId = 'Requests', 26 | )) 27 | } 28 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/names/registered.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Names registered', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'max(account_names_count)', 18 | refId = "Names count", 19 | )) 20 | } 21 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/non_rpc/cache_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Cache latency', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum(rate(non_rpc_providers_cache_latency_tracker_sum[$__rate_interval])) / sum(rate(non_rpc_providers_cache_latency_tracker_count[$__rate_interval]))', 18 | legendFormat = '__auto', 19 | )) 20 | } 21 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/non_rpc/endpoints_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars, provider):: 9 | panels.timeseries( 10 | title = provider, 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum by(endpoint) (rate(http_external_latency_tracker_sum{provider="%s"}[$__rate_interval])) / sum by(endpoint) (rate(http_external_latency_tracker_count{provider="%s"}[$__rate_interval]))' % [provider, provider], 18 | legendFormat = '__auto', 19 | )) 20 | } 21 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/projects/cache_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | 9 | local _configuration = defaults.configuration.timeseries 10 | .withUnit('ms'); 11 | 12 | { 13 | new(ds, vars):: 14 | panels.timeseries( 15 | title = 'Cache latency', 16 | datasource = ds.prometheus, 17 | ) 18 | .configure(_configuration) 19 | 20 | .setAlert(vars.environment, alert.new( 21 | namespace = 'Blockchain API', 22 | name = "%s - ELB High projects registry cache latency" % vars.environment, 23 | message = "%s - ELB High projects registry cache latency" % vars.environment, 24 | period = '5m', 25 | frequency = '1m', 26 | noDataState = 'no_data', 27 | notifications = vars.notifications, 28 | alertRuleTags = { 29 | 'og_priority': 'P3', 30 | }, 31 | conditions = [ 32 | alertCondition.new( 33 | evaluatorParams = [ 1000 ], 34 | evaluatorType = 'gt', 35 | operatorType = 'or', 36 | queryRefId = 'ProjectsRegistryCacheLatency', 37 | queryTimeStart = '5m', 38 | reducerType = 'avg', 39 | ), 40 | ] 41 | )) 42 | 43 | .addTarget(targets.prometheus( 44 | datasource = ds.prometheus, 45 | expr = 'sum(rate(project_data_local_cache_time_sum[$__rate_interval])) / sum(rate(project_data_local_cache_time_count[$__rate_interval]))', 46 | refId = 'ProjectsRegistryCacheLatency', 47 | legendFormat = 'Cache', 48 | )) 49 | } 50 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/projects/fetch_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local _configuration = defaults.configuration.timeseries 8 | .withUnit('ms'); 9 | 10 | { 11 | new(ds, vars):: 12 | panels.timeseries( 13 | title = 'Fetch latency', 14 | datasource = ds.prometheus, 15 | ) 16 | .configure(_configuration) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum(rate(project_data_registry_api_time_sum[$__rate_interval])) / sum(rate(project_data_registry_api_time_count[$__rate_interval]))', 21 | refId = 'ProjectsRegistryFetchLatency', 22 | legendFormat = 'Fetch', 23 | )) 24 | } 25 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/projects/quota_limited_projects.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Quota limited Project IDs', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum (increase(quota_limited_project_counter_total[5m]))', 18 | legendFormat = '__auto', 19 | )) 20 | } 21 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/projects/rejected_projects.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Rejected Project IDs', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum(rate(rejected_project_counter_total[$__rate_interval]))', 18 | legendFormat = '__auto', 19 | )) 20 | } 21 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/proxy/calls.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Calls by Chain ID', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum by(chain_id) (increase(rpc_call_counter_total{}[$__rate_interval]))', 18 | exemplar = false, 19 | legendFormat = '__auto', 20 | )) 21 | } 22 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/proxy/chains_unavailability.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | 9 | { 10 | new(ds, vars):: 11 | panels.timeseries( 12 | title = 'ChainID Unavailability', 13 | datasource = ds.prometheus, 14 | ) 15 | .configure(defaults.configuration.timeseries) 16 | .setAlert( 17 | vars.environment, 18 | grafana.alert.new( 19 | namespace = vars.namespace, 20 | name = "%(env)s - RPC chain unavailability alert" % { env: grafana.utils.strings.capitalize(vars.environment) }, 21 | message = '%(env)s - RPC chain unavailability alert' % { env: grafana.utils.strings.capitalize(vars.environment) }, 22 | notifications = vars.notifications, 23 | noDataState = 'no_data', 24 | period = '5m', 25 | conditions = [ 26 | grafana.alertCondition.new( 27 | evaluatorParams = [ 10 ], 28 | evaluatorType = 'gt', 29 | operatorType = 'or', 30 | queryRefId = 'ChainsUnavailability', 31 | queryTimeStart = '15m', 32 | queryTimeEnd = 'now', 33 | reducerType = grafana.alert_reducers.Avg 34 | ), 35 | ], 36 | ), 37 | ) 38 | 39 | .addTarget(targets.prometheus( 40 | datasource = ds.prometheus, 41 | expr = 'sum by(chain_id) (increase(no_providers_for_chain_counter_total{}[$__rate_interval]))', 42 | exemplar = false, 43 | legendFormat = '__auto', 44 | refId = "ChainsUnavailability", 45 | )) 46 | } 47 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/proxy/errors_non_provider.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | 9 | { 10 | new(ds, vars):: 11 | panels.timeseries( 12 | title = 'Non-Provider Errors', 13 | datasource = ds.prometheus, 14 | ) 15 | .configure(defaults.configuration.timeseries) 16 | .setAlert( 17 | vars.environment, 18 | grafana.alert.new( 19 | namespace = vars.namespace, 20 | name = "%(env)s - Non-Provider Errors alert" % { env: grafana.utils.strings.capitalize(vars.environment) }, 21 | message = '%(env)s - Non-Provider Errors alert' % { env: grafana.utils.strings.capitalize(vars.environment) }, 22 | notifications = vars.notifications, 23 | noDataState = 'no_data', 24 | period = '0m', 25 | conditions = [ 26 | grafana.alertCondition.new( 27 | evaluatorParams = [ 0 ], 28 | evaluatorType = 'gt', 29 | operatorType = 'or', 30 | queryRefId = 'NonProviderErrors', 31 | queryTimeStart = '15m', 32 | queryTimeEnd = 'now', 33 | reducerType = grafana.alert_reducers.Avg 34 | ), 35 | ], 36 | ), 37 | ) 38 | 39 | .addTarget(targets.prometheus( 40 | datasource = ds.prometheus, 41 | expr = 'sum by(code) (rate(http_call_counter_total{code=~"5[0-9][0-24-9]"}[$__rate_interval]))', 42 | refId = "NonProviderErrors", 43 | exemplar = true, 44 | legendFormat = '__auto', 45 | )) 46 | } 47 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/proxy/errors_provider.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | 9 | { 10 | new(ds, vars):: 11 | panels.timeseries( 12 | title = 'Provider Errors', 13 | datasource = ds.prometheus, 14 | ) 15 | .configure(defaults.configuration.timeseries) 16 | 17 | .addTarget(targets.prometheus( 18 | datasource = ds.prometheus, 19 | expr = 'sum(rate(http_call_counter_total{code=\"503\"}[$__rate_interval]))', 20 | refId = "service_unavailable", 21 | )) 22 | } 23 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/proxy/http_codes.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local _configuration = defaults.configuration.timeseries 8 | .withUnit('cpm') 9 | .withSoftLimit( 10 | axisSoftMin = 0.4, 11 | axisSoftMax = 1.1, 12 | ); 13 | 14 | { 15 | new(ds, vars):: 16 | panels.timeseries( 17 | title = 'HTTP Response Codes', 18 | datasource = ds.prometheus, 19 | ) 20 | .configure(_configuration) 21 | 22 | .addTarget(targets.prometheus( 23 | datasource = ds.prometheus, 24 | expr = 'sum by (code)(rate(http_call_counter_total{}[5m]))', 25 | exemplar = false, 26 | legendFormat = '__auto', 27 | )) 28 | } 29 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/proxy/latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local _configuration = defaults.configuration.timeseries 8 | .withUnit('s') 9 | .withSoftLimit( 10 | axisSoftMin = 0.4, 11 | axisSoftMax = 1.1, 12 | ); 13 | 14 | { 15 | new(ds, vars):: 16 | panels.timeseries( 17 | title = 'Latency', 18 | datasource = ds.prometheus, 19 | ) 20 | .configure(_configuration) 21 | 22 | .addTarget(targets.prometheus( 23 | datasource = ds.prometheus, 24 | expr = 'sum by(provider) (rate(http_external_latency_tracker_sum[$__rate_interval])) / sum by(provider) (rate(http_external_latency_tracker_count[$__rate_interval]))', 25 | exemplar = false, 26 | legendFormat = '__auto', 27 | )) 28 | } 29 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/proxy/rpc_retries.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local _configuration = defaults.configuration.timeseries 8 | .withUnit('cpm'); 9 | 10 | { 11 | new(ds, vars):: 12 | panels.timeseries( 13 | title = 'Provider call retries', 14 | datasource = ds.prometheus, 15 | ) 16 | .configure(_configuration) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (chain_id)(rate(rpc_call_retries_sum{}[$__rate_interval]))', 21 | exemplar = false, 22 | legendFormat = '__auto', 23 | )) 24 | } 25 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/proxy/websocket_connections.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'WebSocket connections by chain', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum by(chain_id) (increase(websocket_connection_counter_total{}[$__rate_interval]))', 18 | exemplar = false, 19 | legendFormat = '__auto', 20 | )) 21 | } 22 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/rate_limiting/counter.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | 9 | { 10 | new(ds, vars):: 11 | panels.timeseries( 12 | title = 'Rate limited entries count', 13 | datasource = ds.prometheus, 14 | ) 15 | .configure(defaults.configuration.timeseries) 16 | 17 | .setAlert(vars.environment, alert.new( 18 | namespace = 'Blockchain API', 19 | name = "%s - High rate-limiting entries count" % vars.environment, 20 | message = "%s - High rate-limiting entries count" % vars.environment, 21 | period = '15m', 22 | frequency = '5m', 23 | noDataState = 'alerting', 24 | notifications = vars.notifications, 25 | alertRuleTags = { 26 | 'og_priority': 'P3', 27 | }, 28 | conditions = [ 29 | alertCondition.new( 30 | evaluatorParams = [ 250 ], 31 | evaluatorType = 'gt', 32 | operatorType = 'or', 33 | queryRefId = 'Rate_limited_count', 34 | queryTimeStart = '5m', 35 | reducerType = 'avg', 36 | ), 37 | ] 38 | )) 39 | 40 | .addTarget(targets.prometheus( 41 | datasource = ds.prometheus, 42 | expr = 'max(rate(rate_limited_entries_sum{}[$__rate_interval]) / rate(rate_limited_entries_count{}[$__rate_interval]))', 43 | legendFormat = 'app in-memory entries', 44 | refId = 'Rate_limited_count', 45 | )) 46 | } 47 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/rate_limiting/latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | 9 | local _configuration = defaults.configuration.timeseries 10 | .withUnit('ms'); 11 | 12 | { 13 | new(ds, vars):: 14 | panels.timeseries( 15 | title = 'Rate limiter latency', 16 | datasource = ds.prometheus, 17 | ) 18 | .configure(_configuration) 19 | 20 | .setAlert(vars.environment, alert.new( 21 | namespace = 'Blockchain API', 22 | name = "%s - Rate limiter high latency" % vars.environment, 23 | message = "%s - Rate limiter high latency" % vars.environment, 24 | period = '5m', 25 | frequency = '1m', 26 | noDataState = 'no_data', 27 | notifications = vars.notifications, 28 | alertRuleTags = { 29 | 'og_priority': 'P3', 30 | }, 31 | conditions = [ 32 | alertCondition.new( 33 | evaluatorParams = [ 100 ], 34 | evaluatorType = 'gt', 35 | operatorType = 'or', 36 | queryRefId = 'RateLimiterLatency', 37 | queryTimeStart = '5m', 38 | reducerType = 'avg', 39 | ), 40 | ] 41 | )) 42 | 43 | .addTarget(targets.prometheus( 44 | datasource = ds.prometheus, 45 | expr = 'sum(rate(rate_limiting_latency_tracker_sum[$__rate_interval])) / sum(rate(rate_limiting_latency_tracker_count[$__rate_interval]))', 46 | refId = 'RateLimiterLatency', 47 | legendFormat = 'Latency', 48 | )) 49 | } 50 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/rate_limiting/rate_limited.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Rate limited responses', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum(rate(rate_limited_responses_counter_total{}[$__rate_interval]))', 18 | exemplar = false, 19 | legendFormat = '__auto', 20 | )) 21 | } 22 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/status/provider.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars, provider):: 9 | panels.timeseries( 10 | title = provider, 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | .addTarget(targets.prometheus( 15 | datasource = ds.prometheus, 16 | expr = 'sum by(status_code) (increase(provider_status_code_counter_total{provider="%s"}[$__rate_interval]))' % provider, 17 | legendFormat = '__auto', 18 | )) 19 | } 20 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/weights/provider.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars, provider):: 9 | panels.timeseries( 10 | title = provider, 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum by (chain_id) (increase(provider_weights_sum{provider="%s"}[5m])) / sum by (chain_id) (increase(provider_weights_count{provider="%s"}[5m]))' % [provider, provider], 18 | legendFormat = '__auto', 19 | )) 20 | } 21 | -------------------------------------------------------------------------------- /terraform/monitoring/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | grafana = { 6 | source = "grafana/grafana" 7 | version = ">= 2.1" 8 | } 9 | jsonnet = { 10 | source = "alxrem/jsonnet" 11 | version = "~> 2.2.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terraform/monitoring/variables.tf: -------------------------------------------------------------------------------- 1 | variable "monitoring_role_arn" { 2 | description = "The ARN of the monitoring role." 3 | type = string 4 | } 5 | 6 | variable "notification_channels" { 7 | description = "The notification channels to send alerts to" 8 | type = list(any) 9 | } 10 | 11 | variable "prometheus_endpoint" { 12 | description = "The endpoint for the Prometheus server." 13 | type = string 14 | } 15 | 16 | variable "ecs_service_name" { 17 | description = "The name of the ECS service." 18 | type = string 19 | } 20 | 21 | variable "ecs_task_family" { 22 | description = "The name of the ECS task family." 23 | type = string 24 | } 25 | 26 | variable "ecs_target_group_arn" { 27 | description = "The ARN of the ECS LB target group." 28 | type = string 29 | } 30 | 31 | variable "load_balancer_arn" { 32 | description = "The ARN of the load balancer." 33 | type = string 34 | } 35 | 36 | variable "redis_cluster_id" { 37 | description = "The ID of the keystore DocDB cluster." 38 | type = string 39 | } 40 | 41 | variable "log_group_app_name" { 42 | description = "The name of the log group for the app" 43 | type = string 44 | } 45 | 46 | variable "log_group_app_arn" { 47 | description = "The ARN of the log group for the app" 48 | type = string 49 | } 50 | 51 | variable "aws_account_id" { 52 | description = "The AWS account ID." 53 | type = string 54 | } 55 | 56 | -------------------------------------------------------------------------------- /terraform/outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reown-com/blockchain-api/5866492c9d236a7f9fd66a6f08ff2c2d4136c899/terraform/outputs.tf -------------------------------------------------------------------------------- /terraform/postgres/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_caller_identity" "this" {} 2 | 3 | resource "aws_db_subnet_group" "db_subnets" { 4 | name = module.this.id 5 | description = "Subnet group for the ${module.this.id} RDS cluster" 6 | subnet_ids = var.subnet_ids 7 | } 8 | 9 | module "db_cluster" { 10 | source = "terraform-aws-modules/rds-aurora/aws" 11 | version = "8.5.0" 12 | 13 | name = module.this.id 14 | database_name = var.db_name 15 | engine = "aurora-postgresql" 16 | engine_version = "15.10" 17 | engine_mode = "provisioned" 18 | ca_cert_identifier = "rds-ca-ecc384-g1" 19 | instance_class = "db.serverless" 20 | instances = { for i in range(1, var.instances + 1) : i => {} } 21 | 22 | master_username = var.db_master_username 23 | manage_master_user_password = false 24 | master_password = local.db_master_password 25 | 26 | vpc_id = var.vpc_id 27 | db_subnet_group_name = aws_db_subnet_group.db_subnets.name 28 | security_group_rules = { 29 | vpc_ingress = { 30 | cidr_blocks = var.ingress_cidr_blocks 31 | } 32 | } 33 | 34 | performance_insights_enabled = true 35 | storage_encrypted = true 36 | allow_major_version_upgrade = true 37 | apply_immediately = true 38 | skip_final_snapshot = true 39 | deletion_protection = true 40 | 41 | monitoring_interval = 30 42 | enabled_cloudwatch_logs_exports = ["postgresql"] 43 | cloudwatch_log_group_kms_key_id = var.cloudwatch_logs_key_arn 44 | cloudwatch_log_group_retention_in_days = var.cloudwatch_retention_in_days 45 | 46 | serverlessv2_scaling_configuration = { 47 | min_capacity = module.this.stage == "prod" ? var.min_capacity : 0.5 48 | max_capacity = var.max_capacity 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /terraform/postgres/outputs.tf: -------------------------------------------------------------------------------- 1 | output "database_name" { 2 | description = "The name of the default database in the cluster" 3 | value = var.db_name 4 | } 5 | 6 | output "master_username" { 7 | description = "The username for the master DB user" 8 | value = var.db_master_username 9 | } 10 | 11 | output "master_password_id" { 12 | description = "The ID of the database master password in Secrets Manager" 13 | value = aws_secretsmanager_secret.db_master_password.id 14 | } 15 | 16 | output "rds_cluster_arn" { 17 | description = "The ARN of the cluster" 18 | value = module.db_cluster.cluster_arn 19 | } 20 | 21 | output "rds_cluster_id" { 22 | description = "The ID of the cluster" 23 | value = module.db_cluster.cluster_id 24 | } 25 | 26 | output "rds_cluster_endpoint" { 27 | description = "The cluster endpoint" 28 | value = module.db_cluster.cluster_endpoint 29 | } 30 | 31 | output "database_url" { 32 | description = "The URL used to connect to the cluster" 33 | value = "postgres://${module.db_cluster.cluster_master_username}:${module.db_cluster.cluster_master_password}@${module.db_cluster.cluster_endpoint}:${module.db_cluster.cluster_port}/${var.db_name}" 34 | } 35 | -------------------------------------------------------------------------------- /terraform/postgres/password.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | db_master_password = var.db_master_password == "" ? random_password.db_master_password[0].result : var.db_master_password 3 | } 4 | 5 | resource "random_password" "db_master_password" { 6 | count = var.db_master_password == "" ? 1 : 0 7 | length = 16 8 | special = false 9 | } 10 | 11 | resource "aws_kms_key" "db_master_password" { 12 | description = "KMS key for the ${module.this.id} RDS cluster master password" 13 | enable_key_rotation = true 14 | 15 | policy = jsonencode({ 16 | Version = "2012-10-17" 17 | Statement = [ 18 | { 19 | Sid = "Enable IAM User Permissions" 20 | Effect = "Allow" 21 | Principal = { 22 | AWS = data.aws_caller_identity.this.account_id 23 | } 24 | Action = "kms:*" 25 | Resource = "*" 26 | }, 27 | ] 28 | }) 29 | } 30 | 31 | resource "aws_kms_alias" "db_master_password" { 32 | name = "alias/${module.this.id}-master-password" 33 | target_key_id = aws_kms_key.db_master_password.id 34 | } 35 | 36 | resource "aws_secretsmanager_secret" "db_master_password" { 37 | name = "${module.this.id}-master-password" 38 | kms_key_id = aws_kms_key.db_master_password.arn 39 | } 40 | 41 | resource "aws_secretsmanager_secret_version" "db_master_password" { 42 | secret_id = aws_secretsmanager_secret.db_master_password.id 43 | secret_string = local.db_master_password 44 | } 45 | -------------------------------------------------------------------------------- /terraform/postgres/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.7" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "~> 3.5" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terraform/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | 4 | default_tags { 5 | tags = module.this.tags 6 | } 7 | } 8 | 9 | provider "grafana" { 10 | url = "https://${data.terraform_remote_state.monitoring.outputs.grafana_workspaces.central.grafana_endpoint}" 11 | auth = var.grafana_auth 12 | } 13 | -------------------------------------------------------------------------------- /terraform/redis/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_vpc" "vpc" { 2 | id = var.vpc_id 3 | } 4 | 5 | resource "aws_elasticache_cluster" "cache" { 6 | cluster_id = module.this.id 7 | engine = "redis" 8 | node_type = var.node_type 9 | num_cache_nodes = var.num_cache_nodes 10 | parameter_group_name = "default.redis6.x" 11 | engine_version = var.node_engine_version 12 | port = 6379 13 | subnet_group_name = aws_elasticache_subnet_group.private_subnets.name 14 | security_group_ids = [ 15 | aws_security_group.service_security_group.id 16 | ] 17 | snapshot_retention_limit = 2 18 | } 19 | 20 | resource "aws_elasticache_subnet_group" "private_subnets" { 21 | name = "${module.this.id}-private-subnet-group" 22 | subnet_ids = var.subnets_ids 23 | } 24 | 25 | # Allow only the app to access Redis 26 | resource "aws_security_group" "service_security_group" { 27 | name = "${module.this.id}-redis-service-ingress" 28 | description = "Allow ingress from the application" 29 | vpc_id = var.vpc_id 30 | ingress { 31 | description = "${module.this.id} - ingress from application" 32 | from_port = 6379 33 | to_port = 6379 34 | protocol = "TCP" 35 | cidr_blocks = var.ingress_cidr_blocks == null ? [data.aws_vpc.vpc.cidr_block] : var.ingress_cidr_blocks 36 | } 37 | 38 | egress { 39 | description = "${module.this.id} - egress to application" 40 | from_port = 0 # Allowing any incoming port 41 | to_port = 0 # Allowing any outgoing port 42 | protocol = "-1" # Allowing any outgoing protocol 43 | cidr_blocks = var.egress_cidr_blocks == null ? [data.aws_vpc.vpc.cidr_block] : var.egress_cidr_blocks 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /terraform/redis/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_id" { 2 | description = "The ID of the cluster" 3 | value = aws_elasticache_cluster.cache.id 4 | } 5 | 6 | output "endpoint" { 7 | description = "The endpoint of the Redis cluster" 8 | value = "${aws_elasticache_cluster.cache.cache_nodes[0].address}:${aws_elasticache_cluster.cache.cache_nodes[0].port}" 9 | } 10 | -------------------------------------------------------------------------------- /terraform/redis/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.7" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /terraform/redis/variables.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------- 2 | # Nodes Configuration 3 | 4 | variable "node_type" { 5 | description = "The instance type to use for the database nodes" 6 | type = string 7 | default = "cache.t4g.small" # https://aws.amazon.com/elasticache/pricing/?nc=sn&loc=5#On-demand_nodes 8 | } 9 | 10 | variable "num_cache_nodes" { 11 | description = "The number of nodes to create in the cluster" 12 | type = number 13 | default = 1 14 | } 15 | 16 | variable "node_engine_version" { 17 | description = "The version of Redis to use" 18 | type = string 19 | default = "6.x" 20 | } 21 | 22 | #------------------------------------------------------------------------------- 23 | # Networking 24 | 25 | variable "vpc_id" { 26 | description = "The VPC ID to create the security group in" 27 | type = string 28 | } 29 | 30 | variable "subnets_ids" { 31 | description = "The list of subnet IDs to create the cluster in" 32 | type = set(string) 33 | } 34 | 35 | variable "ingress_cidr_blocks" { 36 | description = "The CIDR blocks to allow ingress from, default to VPC only." 37 | type = set(string) 38 | default = null 39 | } 40 | 41 | variable "egress_cidr_blocks" { 42 | description = "The CIDR blocks to allow egress to, default to VPC only." 43 | type = set(string) 44 | default = null 45 | } 46 | -------------------------------------------------------------------------------- /terraform/res_alerting.tf: -------------------------------------------------------------------------------- 1 | module "alerting" { 2 | source = "./alerting" 3 | context = module.this 4 | 5 | webhook_cloudwatch_p2 = var.webhook_cloudwatch_p2 6 | webhook_prometheus_p2 = var.webhook_prometheus_p2 7 | 8 | ecs_cluster_name = module.ecs.ecs_cluster_name 9 | ecs_service_name = module.ecs.ecs_service_name 10 | 11 | redis_cluster_id = module.redis.cluster_id 12 | } 13 | -------------------------------------------------------------------------------- /terraform/res_db.tf: -------------------------------------------------------------------------------- 1 | module "db_context" { 2 | source = "app.terraform.io/wallet-connect/label/null" 3 | version = "0.3.2" 4 | context = module.this 5 | 6 | attributes = [ 7 | "db" 8 | ] 9 | } 10 | 11 | module "postgres" { 12 | source = "./postgres" 13 | context = module.db_context 14 | attributes = ["postgres"] 15 | 16 | vpc_id = module.vpc.vpc_id 17 | subnet_ids = module.vpc.intra_subnets 18 | ingress_cidr_blocks = module.vpc.private_subnets_cidr_blocks 19 | 20 | cloudwatch_logs_key_arn = aws_kms_key.cloudwatch_logs.arn 21 | 22 | depends_on = [aws_iam_role.application_role] 23 | } 24 | -------------------------------------------------------------------------------- /terraform/res_dns.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | zones = { for k, v in tomap(data.terraform_remote_state.infra_aws.outputs.zones.blockchain[local.stage]) : v.id => v.name } 3 | zones_certificates = { for k, v in module.dns_certificate : v.zone_id => v.certificate_arn } 4 | } 5 | 6 | module "dns_certificate" { 7 | for_each = local.zones 8 | source = "app.terraform.io/wallet-connect/dns/aws" 9 | version = "0.1.3" 10 | context = module.this 11 | hosted_zone_name = each.value 12 | fqdn = each.value 13 | } 14 | -------------------------------------------------------------------------------- /terraform/res_monitoring.tf: -------------------------------------------------------------------------------- 1 | module "monitoring" { 2 | source = "./monitoring" 3 | context = module.this 4 | 5 | monitoring_role_arn = data.terraform_remote_state.monitoring.outputs.grafana_workspaces.central.iam_role_arn 6 | 7 | notification_channels = var.notification_channels 8 | prometheus_endpoint = aws_prometheus_workspace.prometheus.prometheus_endpoint 9 | ecs_service_name = module.ecs.ecs_service_name 10 | ecs_task_family = module.ecs.ecs_task_family 11 | ecs_target_group_arn = module.ecs.target_group_arn 12 | load_balancer_arn = module.ecs.load_balancer_arn_suffix 13 | redis_cluster_id = module.redis.cluster_id 14 | log_group_app_name = module.ecs.log_group_app_name 15 | log_group_app_arn = module.ecs.log_group_app_arn 16 | aws_account_id = data.aws_caller_identity.this.account_id 17 | } 18 | -------------------------------------------------------------------------------- /terraform/res_redis.tf: -------------------------------------------------------------------------------- 1 | module "redis" { 2 | source = "./redis" 3 | context = module.this 4 | 5 | vpc_id = module.vpc.vpc_id 6 | subnets_ids = module.vpc.intra_subnets 7 | } 8 | -------------------------------------------------------------------------------- /terraform/terraform.tf: -------------------------------------------------------------------------------- 1 | # Terraform Configuration 2 | terraform { 3 | required_version = ">= 1.0" 4 | 5 | backend "remote" { 6 | hostname = "app.terraform.io" 7 | organization = "wallet-connect" 8 | workspaces { 9 | prefix = "blockchain-" 10 | } 11 | } 12 | 13 | required_providers { 14 | aws = { 15 | source = "hashicorp/aws" 16 | version = ">= 5.7" 17 | } 18 | grafana = { 19 | source = "grafana/grafana" 20 | version = ">= 2.1" 21 | } 22 | random = { 23 | source = "hashicorp/random" 24 | version = "3.5.1" 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /tests/context/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(feature = "test-localhost"))] 2 | use std::env; 3 | 4 | use {self::server::RpcProxy, async_trait::async_trait, test_context::AsyncTestContext}; 5 | 6 | mod server; 7 | 8 | pub struct ServerContext { 9 | pub server: RpcProxy, 10 | } 11 | 12 | #[async_trait] 13 | impl AsyncTestContext for ServerContext { 14 | async fn setup() -> Self { 15 | #[cfg(feature = "test-localhost")] 16 | let server = RpcProxy::start().await; 17 | 18 | #[cfg(not(feature = "test-localhost"))] 19 | let server = { 20 | let public_addr = env::var("RPC_URL") 21 | .unwrap_or("https://staging.rpc.walletconnect.org".to_owned()) 22 | .parse() 23 | .unwrap(); 24 | 25 | { 26 | let project_id = env::var("PROJECT_ID").expect("PROJECT_ID must be set"); 27 | RpcProxy { 28 | public_addr, 29 | project_id, 30 | } 31 | } 32 | }; 33 | 34 | Self { server } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /tests/context/server.rs: -------------------------------------------------------------------------------- 1 | use url::Url; 2 | 3 | #[cfg(feature = "test-localhost")] 4 | use {rpc_proxy::test_helpers::spawn_blockchain_api, std::env}; 5 | 6 | pub struct RpcProxy { 7 | pub public_addr: Url, 8 | pub project_id: String, 9 | } 10 | 11 | #[derive(Debug, thiserror::Error)] 12 | pub enum Error {} 13 | 14 | #[cfg(feature = "test-localhost")] 15 | impl RpcProxy { 16 | pub async fn start() -> Self { 17 | let public_addr = spawn_blockchain_api().await; 18 | 19 | let project_id = 20 | env::var("TEST_RPC_PROXY_PROJECT_ID").expect("TEST_RPC_PROXY_PROJECT_ID must be set"); 21 | 22 | Self { 23 | public_addr, 24 | project_id, 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /tests/functional/README.md: -------------------------------------------------------------------------------- 1 | # Functional integration tests 2 | 3 | The following functional integration tests are presented: 4 | 5 | * Database tests 6 | * Providers tests 7 | * Providers functional tests should be `#[ignore]` by default, because they will run by 8 | the CI workflow specifically when the providers code is changed in the `src/provider` 9 | directory. 10 | * Providers test names should be in the format `{provider_name}_provider` and 11 | `{provider_name}_provider_*` aligning with the provider name file in the 12 | `src/providers` directory. 13 | * Example for the `coinbase` provider: 14 | * Implementation source file: `src/provider/coinbase.rs` 15 | Tests for the `coinbase` provider will run only if this file is changed. 16 | * Tests implementation for the `coinbase` provider can be in any files but should be 17 | `#[ignore]` by default and the test names must starts with the 18 | `coinbase_provider`. 19 | -------------------------------------------------------------------------------- /tests/functional/http/allnodes.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn allnodes_provider(ctx: &mut ServerContext) { 10 | let provider_kind = ProviderKind::Allnodes; 11 | 12 | // Ethereum Mainnet 13 | check_if_rpc_is_responding_correctly_for_supported_chain( 14 | ctx, 15 | &provider_kind, 16 | "eip155:1", 17 | "0x1", 18 | ) 19 | .await; 20 | } 21 | -------------------------------------------------------------------------------- /tests/functional/http/arbitrum.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn arbitrum_provider(ctx: &mut ServerContext) { 10 | let provider_kind = ProviderKind::Arbitrum; 11 | 12 | // Arbitrum One 13 | check_if_rpc_is_responding_correctly_for_supported_chain( 14 | ctx, 15 | &provider_kind, 16 | "eip155:42161", 17 | "0xa4b1", 18 | ) 19 | .await; 20 | 21 | // Arbitrum Sepolia 22 | check_if_rpc_is_responding_correctly_for_supported_chain( 23 | ctx, 24 | &provider_kind, 25 | "eip155:421614", 26 | "0x66eee", 27 | ) 28 | .await 29 | } 30 | -------------------------------------------------------------------------------- /tests/functional/http/aurora.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn aurora_provider(ctx: &mut ServerContext) { 10 | // Aurora Mainnet 11 | check_if_rpc_is_responding_correctly_for_supported_chain( 12 | ctx, 13 | &ProviderKind::Aurora, 14 | "eip155:1313161554", 15 | "0x4e454152", 16 | ) 17 | .await; 18 | 19 | // Aurora Testnet 20 | check_if_rpc_is_responding_correctly_for_supported_chain( 21 | ctx, 22 | &ProviderKind::Aurora, 23 | "eip155:1313161555", 24 | "0x4e454153", 25 | ) 26 | .await 27 | } 28 | -------------------------------------------------------------------------------- /tests/functional/http/base.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn base_provider_eip155_8453_and_84531(ctx: &mut ServerContext) { 10 | // Base mainnet 11 | check_if_rpc_is_responding_correctly_for_supported_chain( 12 | ctx, 13 | &ProviderKind::Base, 14 | "eip155:8453", 15 | "0x2105", 16 | ) 17 | .await; 18 | 19 | // Base Sepolia 20 | check_if_rpc_is_responding_correctly_for_supported_chain( 21 | ctx, 22 | &ProviderKind::Base, 23 | "eip155:84532", 24 | "0x14a34", 25 | ) 26 | .await 27 | } 28 | -------------------------------------------------------------------------------- /tests/functional/http/binance.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn binance_provider_eip155_56_and_97(ctx: &mut ServerContext) { 10 | // Binance mainnet 11 | check_if_rpc_is_responding_correctly_for_supported_chain( 12 | ctx, 13 | &ProviderKind::Binance, 14 | "eip155:56", 15 | "0x38", 16 | ) 17 | .await; 18 | 19 | // Binance testnet 20 | check_if_rpc_is_responding_correctly_for_supported_chain( 21 | ctx, 22 | &ProviderKind::Binance, 23 | "eip155:97", 24 | "0x61", 25 | ) 26 | .await 27 | } 28 | -------------------------------------------------------------------------------- /tests/functional/http/mantle.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn mantle_provider(ctx: &mut ServerContext) { 10 | // Mantle mainnet 11 | check_if_rpc_is_responding_correctly_for_supported_chain( 12 | ctx, 13 | &ProviderKind::Mantle, 14 | "eip155:5000", 15 | "0x1388", 16 | ) 17 | .await; 18 | // Mantle testnet 19 | check_if_rpc_is_responding_correctly_for_supported_chain( 20 | ctx, 21 | &ProviderKind::Mantle, 22 | "eip155:5003", 23 | "0x138b", 24 | ) 25 | .await; 26 | } 27 | -------------------------------------------------------------------------------- /tests/functional/http/monad.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn monad_provider(ctx: &mut ServerContext) { 10 | // Monad testnet 11 | check_if_rpc_is_responding_correctly_for_supported_chain( 12 | ctx, 13 | &ProviderKind::Monad, 14 | "eip155:10143", 15 | "0x279f", 16 | ) 17 | .await; 18 | } 19 | -------------------------------------------------------------------------------- /tests/functional/http/moonbeam.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn moonbeam_provider_eip155_1284(ctx: &mut ServerContext) { 10 | // Moonbeam GLMR 11 | check_if_rpc_is_responding_correctly_for_supported_chain( 12 | ctx, 13 | &ProviderKind::Moonbeam, 14 | "eip155:1284", 15 | "0x504", 16 | ) 17 | .await; 18 | } 19 | -------------------------------------------------------------------------------- /tests/functional/http/morph.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn morph_provider(ctx: &mut ServerContext) { 10 | let provider = ProviderKind::Morph; 11 | // Morph Mainnet 12 | check_if_rpc_is_responding_correctly_for_supported_chain( 13 | ctx, 14 | &provider, 15 | "eip155:2818", 16 | "0xb02", 17 | ) 18 | .await; 19 | 20 | // Morph Holesky 21 | check_if_rpc_is_responding_correctly_for_supported_chain( 22 | ctx, 23 | &provider, 24 | "eip155:2810", 25 | "0xafa", 26 | ) 27 | .await; 28 | } 29 | -------------------------------------------------------------------------------- /tests/functional/http/near.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_near_protocol, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn near_provider(ctx: &mut ServerContext) { 10 | check_if_rpc_is_responding_correctly_for_near_protocol(ctx, &ProviderKind::Near).await; 11 | } 12 | -------------------------------------------------------------------------------- /tests/functional/http/odyssey.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn odyssey_provider(ctx: &mut ServerContext) { 10 | // Odyssey 11 | check_if_rpc_is_responding_correctly_for_supported_chain( 12 | ctx, 13 | &ProviderKind::Odyssey, 14 | "eip155:911867", 15 | "0xde9fb", 16 | ) 17 | .await; 18 | } 19 | -------------------------------------------------------------------------------- /tests/functional/http/sui.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_sui, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn quicknode_provider_sui(ctx: &mut ServerContext) { 10 | let provider = ProviderKind::Publicnode; 11 | // Sui mainnet 12 | check_if_rpc_is_responding_correctly_for_sui(ctx, &provider, "mainnet", "35834a8a").await; 13 | // Sui testnet 14 | check_if_rpc_is_responding_correctly_for_sui(ctx, &provider, "testnet", "4c78adac").await; 15 | // Sui devnet 16 | check_if_rpc_is_responding_correctly_for_sui(ctx, &provider, "devnet", "6ee96fc3").await; 17 | } 18 | -------------------------------------------------------------------------------- /tests/functional/http/syndica.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_solana, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn syndica_provider_solana(ctx: &mut ServerContext) { 10 | let provider = ProviderKind::Syndica; 11 | // Solana mainnet 12 | check_if_rpc_is_responding_correctly_for_solana( 13 | ctx, 14 | "5eykt4UsFv8P8NJdTREpY1vzqKqZKvdp", 15 | &provider, 16 | ) 17 | .await; 18 | 19 | // Solana devnet 20 | check_if_rpc_is_responding_correctly_for_solana( 21 | ctx, 22 | "EtWTRABZaYq6iMfeYKouRu166VU2xqa1", 23 | &provider, 24 | ) 25 | .await; 26 | } 27 | -------------------------------------------------------------------------------- /tests/functional/http/unichain.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn unichain_provider_eip155_1301(ctx: &mut ServerContext) { 10 | // Unichain Sepolia 11 | check_if_rpc_is_responding_correctly_for_supported_chain( 12 | ctx, 13 | &ProviderKind::Unichain, 14 | "eip155:1301", 15 | "0x515", 16 | ) 17 | .await; 18 | } 19 | -------------------------------------------------------------------------------- /tests/functional/http/wemix.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn wemix_provider(ctx: &mut ServerContext) { 10 | // Wemix Mainnet 11 | check_if_rpc_is_responding_correctly_for_supported_chain( 12 | ctx, 13 | &ProviderKind::Wemix, 14 | "eip155:1111", 15 | "0x457", 16 | ) 17 | .await; 18 | 19 | // Wemix Testnet 20 | check_if_rpc_is_responding_correctly_for_supported_chain( 21 | ctx, 22 | &ProviderKind::Wemix, 23 | "eip155:1112", 24 | "0x458", 25 | ) 26 | .await; 27 | } 28 | -------------------------------------------------------------------------------- /tests/functional/http/zksync.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn zksync_provider_eip155_324_and_280(ctx: &mut ServerContext) { 10 | // ZkSync mainnet 11 | check_if_rpc_is_responding_correctly_for_supported_chain( 12 | ctx, 13 | &ProviderKind::ZKSync, 14 | "eip155:324", 15 | "0x144", 16 | ) 17 | .await; 18 | 19 | // ZkSync Sepolia testnet 20 | check_if_rpc_is_responding_correctly_for_supported_chain( 21 | ctx, 22 | &ProviderKind::ZKSync, 23 | "eip155:300", 24 | "0x12c", 25 | ) 26 | .await 27 | } 28 | -------------------------------------------------------------------------------- /tests/functional/http/zora.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | rpc_proxy::providers::ProviderKind, test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn zora_provider_eip155_7777777_and_999(ctx: &mut ServerContext) { 10 | // Zora mainnet 11 | check_if_rpc_is_responding_correctly_for_supported_chain( 12 | ctx, 13 | &ProviderKind::Zora, 14 | "eip155:7777777", 15 | "0x76adf1", 16 | ) 17 | .await; 18 | 19 | // Zora Sepolia 20 | check_if_rpc_is_responding_correctly_for_supported_chain( 21 | ctx, 22 | &ProviderKind::Zora, 23 | "eip155:999999999", 24 | "0x3b9ac9ff", 25 | ) 26 | .await 27 | } 28 | -------------------------------------------------------------------------------- /tests/functional/metrics.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "test-localhost")] 2 | #[test_context(ServerContext)] 3 | #[tokio::test] 4 | async fn metrics_check(ctx: &mut ServerContext) { 5 | let addr = format!("https://{}/metrics", ctx.server.private_addr.unwrap()); 6 | 7 | let client = Client::builder().build::<_, hyper::Body>(HttpsConnector::new()); 8 | 9 | let request = Request::builder() 10 | .method(Method::GET) 11 | .uri(addr) 12 | .body(Body::default()) 13 | .unwrap(); 14 | 15 | let response = client.request(request).await.unwrap(); 16 | 17 | assert_eq!(response.status(), http::StatusCode::OK) 18 | } 19 | -------------------------------------------------------------------------------- /tests/functional/mod.rs: -------------------------------------------------------------------------------- 1 | mod bundler; 2 | mod database; 3 | mod http; 4 | mod websocket; 5 | -------------------------------------------------------------------------------- /tests/functional/websocket/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{context::ServerContext, JSONRPC_VERSION}, 3 | futures_util::{SinkExt, StreamExt}, 4 | }; 5 | 6 | pub(crate) mod zora; 7 | 8 | async fn check_if_rpc_is_responding_correctly_for_supported_chain( 9 | ctx: &ServerContext, 10 | chain_id: &str, 11 | expected_id: &str, 12 | ) { 13 | let addr = format!( 14 | "{}ws?projectId={}&chainId={}", 15 | ctx.server.public_addr, ctx.server.project_id, chain_id 16 | ) 17 | .replace("http", "ws"); 18 | 19 | let (client, _) = async_tungstenite::tokio::connect_async(addr).await.unwrap(); 20 | let request = jsonrpc::Request { 21 | method: "eth_chainId", 22 | params: None, 23 | id: serde_json::Value::Number(1.into()), 24 | jsonrpc: JSONRPC_VERSION, 25 | }; 26 | 27 | let (mut tx, mut rx) = client.split(); 28 | 29 | tx.send(axum_tungstenite::Message::Text( 30 | serde_json::to_string(&request).unwrap(), 31 | )) 32 | .await 33 | .unwrap(); 34 | 35 | let response = rx.next().await.unwrap().unwrap(); 36 | let response: jsonrpc::Response = serde_json::from_str(&response.to_string()).unwrap(); 37 | 38 | assert!(response.error.is_none()); 39 | assert_eq!( 40 | response.result.unwrap().to_string(), 41 | format!("\"{expected_id}\"") 42 | ); 43 | } 44 | -------------------------------------------------------------------------------- /tests/functional/websocket/zora.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::check_if_rpc_is_responding_correctly_for_supported_chain, crate::context::ServerContext, 3 | test_context::test_context, 4 | }; 5 | 6 | #[test_context(ServerContext)] 7 | #[tokio::test] 8 | #[ignore] 9 | async fn zora_provider_websocket(ctx: &mut ServerContext) { 10 | // Zora mainnet 11 | check_if_rpc_is_responding_correctly_for_supported_chain(ctx, "eip155:7777777", "0x76adf1") 12 | .await; 13 | } 14 | -------------------------------------------------------------------------------- /tests/integration.rs: -------------------------------------------------------------------------------- 1 | pub const JSONRPC_VERSION: Option<&'static str> = Some("2.0"); 2 | 3 | mod context; 4 | mod functional; 5 | mod utils; 6 | -------------------------------------------------------------------------------- /tests/utils/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | axum::http::HeaderValue, 3 | hyper::{body, client::HttpConnector, Body, Client, Method, Request, StatusCode}, 4 | hyper_tls::HttpsConnector, 5 | sqlx::{postgres::PgPoolOptions, PgPool}, 6 | std::env, 7 | }; 8 | 9 | pub async fn send_jsonrpc_request( 10 | client: Client>, 11 | base_addr: String, 12 | chain: &str, 13 | rpc_request: jsonrpc::Request<'static>, 14 | ) -> (StatusCode, jsonrpc::Response) { 15 | let addr = base_addr + chain; 16 | 17 | let json = serde_json::to_string(&rpc_request).unwrap(); 18 | let req_body = Body::from(json.clone()); 19 | 20 | let request = Request::builder() 21 | .method(Method::POST) 22 | .uri(addr.clone()) 23 | .header("Content-Type", "application/json") 24 | .body(req_body) 25 | .unwrap(); 26 | 27 | let response = client.request(request).await.unwrap(); 28 | assert_eq!( 29 | response.headers().get("Content-Type"), 30 | Some(&HeaderValue::from_static("application/json")) 31 | ); 32 | 33 | let (parts, body) = response.into_parts(); 34 | let body = body::to_bytes(body).await.unwrap(); 35 | ( 36 | parts.status, 37 | serde_json::from_slice(&body).unwrap_or_else(|_| { 38 | panic!( 39 | "Failed to parse response '{:?}' ({} / {:?})", 40 | &body, &addr, &json 41 | ) 42 | }), 43 | ) 44 | } 45 | 46 | pub async fn get_postgres_pool() -> PgPool { 47 | let postgres = PgPoolOptions::new() 48 | .connect(&env::var("RPC_PROXY_POSTGRES_URI").unwrap()) 49 | .await 50 | .unwrap(); 51 | sqlx::migrate!("./migrations").run(&postgres).await.unwrap(); 52 | postgres 53 | } 54 | --------------------------------------------------------------------------------