├── .env.example ├── .github ├── ISSUE_TEMPLATE │ ├── bug.yml │ └── feature_request.yml ├── codeowners ├── pull_request_template.md └── workflows │ ├── dispatch_deploy.yaml │ ├── dispatch_publish.yaml │ ├── dispatch_validate.yaml │ ├── event_intake.yml │ ├── event_pr.yaml │ ├── event_release.yaml │ ├── sub-app-check.yml │ ├── sub-app-deploy.yml │ ├── sub-cd.yml │ ├── sub-ci.yml │ ├── sub-infra-apply.yml │ ├── sub-infra-check.yml │ ├── sub-infra-plan.yml │ ├── sub-publish-image.yml │ └── sub-validate.yml ├── .gitignore ├── .gitmodules ├── .pre-commit-config.yaml ├── .terraformignore ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── build.rs ├── justfile ├── ops ├── docker-compose.keyserver.yml └── docker-compose.storage.yml ├── rustfmt.toml ├── src ├── auth │ ├── did.rs │ ├── jwt │ │ ├── mod.rs │ │ └── tests.rs │ ├── mod.rs │ └── public_key.rs ├── config.rs ├── error.rs ├── handlers │ ├── health.rs │ ├── identity │ │ ├── mod.rs │ │ ├── register.rs │ │ ├── resolve.rs │ │ └── unregister.rs │ ├── invite │ │ ├── mod.rs │ │ ├── register.rs │ │ ├── resolve.rs │ │ └── unregister.rs │ ├── metrics.rs │ └── mod.rs ├── lib.rs ├── log │ └── mod.rs ├── macros.rs ├── main.rs ├── metrics │ └── mod.rs ├── state.rs └── stores │ ├── keys.rs │ └── mod.rs └── terraform ├── .terraform-docs.yml ├── .terraform.lock.hcl ├── .tflint.hcl ├── README.md ├── cloudwatch ├── README.md ├── alarms_docdb.tf ├── alarms_ecs.tf ├── context.tf ├── main.tf ├── terraform.tf └── variables.tf ├── context.tf ├── docdb ├── README.md ├── autoscaling.tf ├── context.tf ├── kms.tf ├── main.tf ├── network.tf ├── outputs.tf ├── terraform.tf └── variables.tf ├── ecs ├── README.md ├── cluster.tf ├── cluster_autoscaling.tf ├── cluster_iam.tf ├── context.tf ├── dns.tf ├── main.tf ├── network.tf ├── outputs.tf ├── terraform.tf └── variables.tf ├── inputs.tf ├── main.tf ├── monitoring ├── README.md ├── context.tf ├── dashboard.jsonnet ├── dashboard.tf ├── data_sources.tf ├── outputs.tf ├── panels │ ├── README.md │ ├── app │ │ ├── app_metric.libsonnet │ │ ├── identity │ │ │ ├── invalid_register_cacao.libsonnet │ │ │ ├── invalid_unregister_jwt.libsonnet │ │ │ ├── register.libsonnet │ │ │ ├── resolved.libsonnet │ │ │ └── unregister.libsonnet │ │ └── invite │ │ │ ├── invalid_register_jwt.libsonnet │ │ │ ├── invalid_unregister_jwt.libsonnet │ │ │ ├── register.libsonnet │ │ │ ├── resolved.libsonnet │ │ │ └── unregister.libsonnet │ ├── defaults.libsonnet │ ├── docdb │ │ ├── available_memory.libsonnet │ │ ├── buffer_cache_hit_ratio.libsonnet │ │ ├── connections.libsonnet │ │ ├── cpu.libsonnet │ │ ├── low_mem_op_throttled.libsonnet │ │ └── volume.libsonnet │ ├── ecs │ │ ├── cpu.libsonnet │ │ └── memory.libsonnet │ ├── lb │ │ ├── active_connections.libsonnet │ │ ├── error_4xx.libsonnet │ │ ├── error_5xx.libsonnet │ │ ├── healthy_hosts.libsonnet │ │ └── requests.libsonnet │ └── panels.libsonnet ├── terraform.tf └── variables.tf ├── outputs.tf ├── providers.tf ├── res_application.tf ├── res_cloudwatch.tf ├── res_dns.tf ├── res_keystore.tf ├── res_monitoring.tf ├── res_network.tf ├── res_prometheus.tf ├── terraform.tf └── variables.tf /.env.example: -------------------------------------------------------------------------------- 1 | PORT=8080 2 | LOG_LEVEL=INFO 3 | DATABASE_URL=mongodb://admin:admin@mongo:27017/keyserver?authSource=admin 4 | PROJECT_ID= 5 | 6 | # Telemetry 7 | TELEMETRY_ENABLED=true 8 | TELEMETRY_GRPC_URL=http://localhost:4317 9 | TELEMETRY_PROMETHEUS_PORT=8081 10 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug.yml: -------------------------------------------------------------------------------- 1 | name: Bug Report 2 | description: File a bug report 3 | title: "bug: " 4 | labels: 5 | - bug 6 | body: 7 | - type: markdown 8 | attributes: 9 | value: | 10 | Thanks for taking the time to fill out this bug report! 🐛 11 | - type: checkboxes 12 | attributes: 13 | label: Is there an existing issue for this? 14 | description: Please search to see if an issue already exists for the bug you encountered. 15 | options: 16 | - label: I have searched the existing issues 17 | required: true 18 | - type: textarea 19 | attributes: 20 | label: Current Behavior 21 | description: A concise description of what you're experiencing. 22 | validations: 23 | required: true 24 | - type: textarea 25 | attributes: 26 | label: Expected Behavior 27 | description: A concise description of what you expected to happen. 28 | validations: 29 | required: true 30 | - type: textarea 31 | attributes: 32 | label: Steps To Reproduce 33 | description: Steps to reproduce the behavior. 34 | placeholder: | 35 | 1. In this environment... 36 | 2. With this config... 37 | 3. Run '...' 38 | 4. See error... 39 | validations: 40 | required: true 41 | - type: textarea 42 | attributes: 43 | label: Environment 44 | description: | 45 | examples: 46 | - **OS**: MacOS Monterey 12.5 47 | - **rustc**: rustc 1.62.1 (e092d0b6b 2022-07-16) 48 | - **cargo**: cargo 1.62.1 (a748cf5a3 2022-06-08) 49 | 50 | > **Note** 51 | > If using docker image please provide docker version and the image's tag 52 | value: | 53 | - OS: 54 | - rustc: 55 | - cargo: 56 | render: markdown 57 | validations: 58 | required: false 59 | - type: textarea 60 | id: logs 61 | attributes: 62 | label: Relevant log output 63 | description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. 64 | render: shell 65 | validations: 66 | required: false 67 | - type: textarea 68 | attributes: 69 | label: Anything else? 70 | description: | 71 | Links? References? Anything that will give us more context about the issue you are encountering! 72 | 73 | Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. 74 | validations: 75 | required: false -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Feature Request 2 | description: Request a new feature be added 3 | title: "feat: " 4 | labels: 5 | - enhancement 6 | body: 7 | - type: markdown 8 | attributes: 9 | value: | 10 | Thanks for taking the time to suggest a new feature for Bouncer! ✨ 11 | - type: checkboxes 12 | attributes: 13 | label: Is there an existing issue for this? 14 | description: Please search to see if an issue already exists for the feature you would like. 15 | options: 16 | - label: I have searched the existing issues 17 | required: true 18 | - type: textarea 19 | attributes: 20 | label: Current Behavior 21 | description: A concise description of what you're experiencing. 22 | validations: 23 | required: true 24 | - type: textarea 25 | attributes: 26 | label: Requested Behavior 27 | description: A concise description of what you expected to happen. 28 | validations: 29 | required: true 30 | - type: textarea 31 | attributes: 32 | label: Anything else? 33 | description: | 34 | Links? References? Anything that will give us more context about the issue you are encountering! 35 | 36 | Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. 37 | validations: 38 | required: false 39 | -------------------------------------------------------------------------------- /.github/codeowners: -------------------------------------------------------------------------------- 1 | * @Xav 2 | * @Elyniss 3 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | 8 | 9 | Resolves # (issue) 10 | 11 | ## How Has This Been Tested? 12 | 13 | 18 | 19 | 20 | 21 | ## Due Diligence 22 | 23 | * [ ] Breaking change 24 | * [ ] Requires a documentation update 25 | * [ ] Requires a e2e/integration test update 26 | -------------------------------------------------------------------------------- /.github/workflows/dispatch_deploy.yaml: -------------------------------------------------------------------------------- 1 | name: ⚙️ Deploy 2 | run-name: "Deploy: ${{ github.sha }} ➠ ${{ inputs.version-type }}:${{ inputs.version-tag }}${{ (!inputs.deploy-infra && !inputs.deploy-app) && ' 👀 deploy nothing' || ''}}${{ inputs.deploy-infra && ' ❱❱  infra' || '' }}${{ inputs.deploy-app && ' ❱❱  app' || '' }}" 3 | 4 | on: 5 | workflow_dispatch: 6 | inputs: 7 | deploy-infra: 8 | description: "Deploy Infra" 9 | default: true 10 | required: true 11 | type: boolean 12 | deploy-app: 13 | description: "Deploy App" 14 | default: true 15 | required: true 16 | type: boolean 17 | stage: 18 | description: 'Target Environment' 19 | type: choice 20 | options: 21 | - staging 22 | - prod 23 | default: staging 24 | required: true 25 | version-type: 26 | description: "Release Version" 27 | type: choice 28 | options: 29 | - latest 30 | - manual 31 | default: 'latest' 32 | required: true 33 | version-tag: 34 | description: "Release Version Tag (for manual version)" 35 | type: string 36 | default: '' 37 | 38 | concurrency: deploy 39 | 40 | permissions: 41 | contents: read 42 | id-token: write 43 | packages: write 44 | 45 | jobs: 46 | 47 | select_version: 48 | name: Select Version 49 | if: ${{ always() && !cancelled() && !failure() }} 50 | runs-on: ubuntu-latest 51 | steps: 52 | - name: Checkout repository 53 | uses: actions/checkout@v4 54 | with: 55 | fetch-depth: 15 56 | fetch-tags: true 57 | - name: Select target version 58 | id: select_version 59 | run: | 60 | if [ "${{ inputs.version-type }}" == "latest" ]; then 61 | echo "version=$(git tag | sort --version-sort | tail -n1)" >> "$GITHUB_OUTPUT" 62 | else 63 | echo "version=${{ inputs.version-tag }}" >> "$GITHUB_OUTPUT" 64 | fi 65 | outputs: 66 | version: ${{ steps.select_version.outputs.version }} 67 | 68 | cd: 69 | name: CD 70 | needs: [ select_version ] 71 | uses: ./.github/workflows/sub-cd.yml 72 | secrets: inherit 73 | with: 74 | deploy-infra: ${{ inputs.deploy-infra }} 75 | deploy-app: ${{ inputs.deploy-app }} 76 | deploy-prod: ${{ inputs.stage == 'prod' }} 77 | version: ${{ needs.select_version.outputs.version }} 78 | -------------------------------------------------------------------------------- /.github/workflows/dispatch_publish.yaml: -------------------------------------------------------------------------------- 1 | name: ⚙️ Publish 2 | run-name: "Publish: ${{ github.sha }}${{ inputs.deploy-to != 'none' && format(' ❱❱ {0}', inputs.deploy-to) || ''}}" 3 | 4 | on: 5 | workflow_dispatch: 6 | inputs: 7 | deploy-to: 8 | description: "Deploy published image to" 9 | type: choice 10 | options: 11 | - none 12 | - staging 13 | - prod 14 | default: staging 15 | required: true 16 | 17 | concurrency: deploy 18 | 19 | permissions: 20 | contents: write 21 | id-token: write 22 | packages: write 23 | 24 | jobs: 25 | ci: 26 | name: CI 27 | uses: ./.github/workflows/sub-ci.yml 28 | secrets: inherit 29 | with: 30 | check-infra: false 31 | check-app: true 32 | 33 | update_version: 34 | name: Update Version 35 | runs-on: ubuntu-latest 36 | needs: [ci] 37 | steps: 38 | - name: Checkout 39 | uses: actions/checkout@v3 40 | with: 41 | submodules: recursive 42 | token: ${{ secrets.RELEASE_PAT }} 43 | fetch-depth: 0 44 | 45 | - name: Release 46 | id: release 47 | uses: WalletConnect/actions/github/update-rust-version/@2.1.5 48 | with: 49 | token: ${{ secrets.RELEASE_PAT }} 50 | outputs: 51 | version: ${{ steps.release.outputs.version }} 52 | 53 | released_version: 54 | name: Version ➠ ${{ needs.update_version.outputs.version }} 55 | runs-on: ubuntu-latest 56 | needs: [ update_version ] 57 | steps: 58 | - run: echo "Version = ${{ needs.update_version.outputs.version }}" 59 | 60 | publish_image-staging: 61 | name: Publish ${{ needs.update_version.outputs.version }} ❱❱ Staging ECR 62 | uses: ./.github/workflows/sub-publish-image.yml 63 | needs: [ update_version ] 64 | with: 65 | version: ${{ needs.update_version.outputs.version }} 66 | aws-role-arn: ${{ vars.AWS_ROLE_STAGING }} 67 | 68 | publish_image-prod: 69 | name: Publish ${{ needs.update_version.outputs.version }} ❱❱ Prod ECR 70 | uses: ./.github/workflows/sub-publish-image.yml 71 | needs: [ update_version ] 72 | with: 73 | version: ${{ needs.update_version.outputs.version }} 74 | aws-role-arn: ${{ vars.AWS_ROLE_PROD }} 75 | 76 | cd: 77 | name: CD 78 | uses: ./.github/workflows/sub-cd.yml 79 | needs: [update_version, publish_image-staging, publish_image-prod] 80 | if: ${{ inputs.deploy-to == 'staging' || inputs.deploy-to == 'prod' }} 81 | secrets: inherit 82 | with: 83 | deploy-infra: false 84 | deploy-app: true 85 | deploy-prod: ${{ inputs.deploy-to == 'prod' }} 86 | version: ${{ needs.update_version.outputs.version }} 87 | -------------------------------------------------------------------------------- /.github/workflows/dispatch_validate.yaml: -------------------------------------------------------------------------------- 1 | name: ⚙️ Validate 2 | run-name: "Validate: ${{ github.sha }}${{ (!inputs.infra && !inputs.app) && '👀 validate nothing' || ''}}${{ inputs.infra && ' ✓  infra' || '' }}${{ inputs.app && ' ✓  app' || '' }}" 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | check-infra: 7 | description: "Validate Infra" 8 | default: true 9 | required: true 10 | type: boolean 11 | check-app: 12 | description: "Validate App" 13 | default: true 14 | required: true 15 | type: boolean 16 | 17 | permissions: 18 | contents: read 19 | id-token: write 20 | 21 | jobs: 22 | ci: 23 | name: CI 24 | uses: ./.github/workflows/sub-ci.yml 25 | secrets: inherit 26 | with: 27 | check-infra: ${{ inputs.check-infra }} 28 | check-app: ${{ inputs.check-app }} 29 | -------------------------------------------------------------------------------- /.github/workflows/event_intake.yml: -------------------------------------------------------------------------------- 1 | # This workflow moves issues to the Project board when they receive the "accepted" label 2 | # When WalletConnect Org members create issues they are automatically "accepted". 3 | # Otherwise, they need to manually receive that label during intake. 4 | name: ⚡ Intake 5 | 6 | on: 7 | issues: 8 | types: [opened, labeled] 9 | 10 | jobs: 11 | add-to-project: 12 | name: Add issue to board 13 | if: github.event_name == 'issues' && github.event.action == 'labeled' && github.event.label.name == 'accepted' 14 | runs-on: 15 | group: ${{ vars.RUN_GROUP }} 16 | steps: 17 | - uses: actions/add-to-project@v0.1.0 18 | with: 19 | project-url: https://github.com/orgs/WalletConnect/projects/20 20 | github-token: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} 21 | labeled: accepted 22 | label-operator: OR 23 | 24 | auto-promote: 25 | name: auto-promote 26 | if: github.event.action == 'opened' 27 | runs-on: ubuntu-latest 28 | steps: 29 | - name: Check Core Team membership 30 | uses: tspascoal/get-user-teams-membership@v1 31 | id: is-core-team 32 | with: 33 | username: ${{ github.event_name != 'pull_request' && github.event.issue.user.login || github.event.sender.login }} 34 | team: "Core Team" 35 | GITHUB_TOKEN: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} 36 | - name: Print result 37 | env: 38 | CREATOR: ${{ github.event_name != 'pull_request' && github.event.issue.user.login || github.event.sender.login }} 39 | IS_TEAM_MEMBER: ${{ steps.is-core-team.outputs.isTeamMember }} 40 | run: echo "$CREATOR (Core Team Member $IS_TEAM_MEMBER) created this issue/PR" 41 | - name: Label issues 42 | if: ${{ steps.is-core-team.outputs.isTeamMember == 'true' }} 43 | uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 44 | with: 45 | add-labels: "accepted" 46 | repo-token: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} 47 | -------------------------------------------------------------------------------- /.github/workflows/event_pr.yaml: -------------------------------------------------------------------------------- 1 | name: ⚡ Pull-Request 2 | run-name: 'PR / ${{ github.event.pull_request.title }}' 3 | 4 | on: 5 | pull_request: 6 | types: 7 | - opened # A pull request was created. 8 | - reopened # A closed pull request was reopened. 9 | - edited # A pull request's title, body, or labels are edited. 10 | - synchronize # A pull request's branch was synchronized with its base branch. 11 | - unlocked # Conversation on a pull request was unlocked. 12 | paths-ignore: 13 | - 'docs/**' 14 | - 'README.md' 15 | 16 | concurrency: 17 | group: pr-${{ github.event.pull_request.number }} 18 | cancel-in-progress: true 19 | 20 | permissions: 21 | contents: read 22 | id-token: write 23 | issues: read 24 | pull-requests: write 25 | 26 | jobs: 27 | check_pr: 28 | name: Check PR 29 | runs-on: ubuntu-latest 30 | permissions: 31 | statuses: write 32 | steps: 33 | - name: Check PR Title 34 | uses: aslafy-z/conventional-pr-title-action@v3 35 | env: 36 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 37 | 38 | paths_filter: 39 | name: Paths Filter 40 | runs-on: ubuntu-latest 41 | steps: 42 | - uses: actions/checkout@v3 43 | - uses: dorny/paths-filter@v2 44 | id: filter 45 | with: 46 | filters: | 47 | infra: 48 | - '${{ vars.TF_DIRECTORY }}/**' 49 | app: 50 | - 'src/**' 51 | outputs: 52 | infra: ${{ steps.filter.outputs.infra }} 53 | app: ${{ steps.filter.outputs.app }} 54 | 55 | ci: 56 | name: CI 57 | uses: ./.github/workflows/sub-ci.yml 58 | needs: [ paths_filter ] 59 | secrets: inherit 60 | with: 61 | check-infra: ${{ needs.paths_filter.outputs.infra == 'true' }} 62 | check-app: ${{ needs.paths_filter.outputs.app == 'true' }} 63 | 64 | merge_check: 65 | name: Merge Check 66 | runs-on: ubuntu-latest 67 | if: ${{ always() && !cancelled() && !failure() }} 68 | needs: [check_pr, ci] 69 | steps: 70 | - run: echo "CI is successful" 71 | -------------------------------------------------------------------------------- /.github/workflows/event_release.yaml: -------------------------------------------------------------------------------- 1 | name: ⚡ Release 2 | run-name: 'Release / ${{ github.event.head_commit.message }}' 3 | 4 | on: 5 | push: 6 | branches: 7 | - master 8 | paths-ignore: 9 | - '.github/**' 10 | - 'docs/**' 11 | - 'Cargo.toml' 12 | - 'Cargo.lock' 13 | - 'README.md' 14 | - 'CHANGELOG.md' 15 | - 'LICENSE' 16 | - 'justfile' 17 | - 'rustfmt.toml' 18 | - '.editorconfig' 19 | - '.pre-commit-config.yaml' 20 | - '.terraformignore' 21 | - '.env.example' 22 | 23 | concurrency: deploy 24 | 25 | permissions: 26 | contents: write 27 | id-token: write 28 | packages: write 29 | 30 | jobs: 31 | 32 | # We skip the CI part here since it must have already run thanks to branch protection. 33 | 34 | paths_filter: 35 | name: Paths Filter 36 | runs-on: ubuntu-latest 37 | steps: 38 | - uses: actions/checkout@v3 39 | - uses: dorny/paths-filter@v2 40 | id: filter 41 | with: 42 | filters: | 43 | infra: 44 | - '${{ vars.TF_DIRECTORY }}/**' 45 | app: 46 | - 'src/**' 47 | outputs: 48 | infra: ${{ steps.filter.outputs.infra }} 49 | app: ${{ steps.filter.outputs.app }} 50 | 51 | update_version: 52 | name: Update Version 53 | runs-on: ubuntu-latest 54 | if: ${{ needs.paths_filter.outputs.app == 'true' }} 55 | needs: [paths_filter] 56 | steps: 57 | - name: Checkout 58 | uses: actions/checkout@v3 59 | with: 60 | submodules: recursive 61 | token: ${{ secrets.RELEASE_PAT }} 62 | fetch-depth: 0 63 | 64 | - name: Release 65 | id: release 66 | uses: WalletConnect/actions/github/update-rust-version/@2.1.5 67 | with: 68 | token: ${{ secrets.RELEASE_PAT }} 69 | outputs: 70 | version: ${{ steps.release.outputs.version }} 71 | 72 | released_version: 73 | name: Release Version ➠ ${{ needs.update_version.outputs.version }} 74 | runs-on: ubutnu-latest 75 | needs: [ update_version ] 76 | steps: 77 | - run: echo "Version = ${{ needs.update_version.outputs.version }}" 78 | 79 | publish_image-staging: 80 | name: Publish ${{ needs.update_version.outputs.version }} ❱❱ Staging ECR 81 | uses: ./.github/workflows/sub-publish-image.yml 82 | needs: [ update_version ] 83 | with: 84 | version: ${{ needs.update_version.outputs.version }} 85 | aws-role-arn: ${{ vars.AWS_ROLE_STAGING }} 86 | 87 | publish_image-prod: 88 | name: Publish ${{ needs.update_version.outputs.version }} ❱❱ Prod ECR 89 | uses: ./.github/workflows/sub-publish-image.yml 90 | needs: [ update_version ] 91 | with: 92 | version: ${{ needs.update_version.outputs.version }} 93 | aws-role-arn: ${{ vars.AWS_ROLE_PROD }} 94 | 95 | get_version: 96 | name: Get Version 97 | runs-on: ubuntu-latest 98 | needs: [ paths_filter, update_version, publish_image-staging, publish_image-prod ] 99 | if: ${{ always() && !cancelled() && !failure() }} 100 | steps: 101 | - name: Get task definition from ECS 102 | id: get_task 103 | if: ${{ needs.paths_filter.outputs.app != 'true' }} 104 | uses: WalletConnect/actions/aws/ecs/get-task-image/@2.1.4 105 | with: 106 | aws-role-arn: ${{ vars.AWS_ROLE_STAGING }} 107 | aws-region: ${{ vars.AWS_REGION }} 108 | task-definition-name: ${{ vars.IMAGE_NAME }} 109 | container-name: ${{ vars.IMAGE_NAME }} 110 | 111 | - name: Get target version 112 | id: get_target_version 113 | run: | 114 | if [ "${{ needs.paths_filter.outputs.app }}" == "true" ]; then 115 | echo "version=${{ needs.update_version.outputs.version }}" >> "$GITHUB_OUTPUT" 116 | else 117 | echo "version=${{ steps.get_task.outputs.tag }}" >> "$GITHUB_OUTPUT" 118 | fi 119 | outputs: 120 | version: ${{ steps.get_target_version.outputs.version }} 121 | 122 | used_version: 123 | name: Version ➠ ${{ needs.get_version.outputs.version }} 124 | if: ${{ always() && !cancelled() && !failure() }} 125 | runs-on: ubuntu-latest 126 | needs: [ get_version ] 127 | steps: 128 | - run: echo "Version = ${{ needs.get_version.outputs.version }}" 129 | 130 | cd: 131 | name: CD 132 | uses: ./.github/workflows/sub-cd.yml 133 | if: ${{ always() && !cancelled() && !failure() }} 134 | needs: [paths_filter, get_version, publish_image-staging, publish_image-prod] 135 | secrets: inherit 136 | with: 137 | deploy-infra: ${{ needs.paths_filter.outputs.infra == 'true' }} 138 | deploy-app: ${{ needs.paths_filter.outputs.app == 'true' && needs.paths_filter.outputs.infra != 'true' }} 139 | deploy-prod: true 140 | version: ${{ needs.get_version.outputs.version }} 141 | -------------------------------------------------------------------------------- /.github/workflows/sub-app-check.yml: -------------------------------------------------------------------------------- 1 | name: ❖ App - Check 2 | 3 | on: 4 | workflow_call: 5 | 6 | env: 7 | RUST_BACKTRACE: full 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | clippy: 14 | name: Clippy 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v3 19 | 20 | - name: "Install Rust ${{ inputs.version }}" 21 | uses: WalletConnect/actions-rs/toolchain@1.0.0 22 | with: 23 | toolchain: ${{ vars.RUST_VERSION }} 24 | profile: 'minimal' 25 | components: 'cargo,clippy' 26 | override: true 27 | 28 | - name: Install Protoc 29 | uses: arduino/setup-protoc@v2 30 | with: 31 | repo-token: ${{ secrets.GITHUB_TOKEN }} 32 | 33 | - name: Run sccache-cache 34 | uses: mozilla-actions/sccache-action@v0.0.3 35 | 36 | - name: "Clippy" 37 | uses: WalletConnect/actions-rs/cargo@1.0.0 38 | with: 39 | command: clippy 40 | args: -- -D warnings 41 | 42 | formatting: 43 | name: Formatting 44 | runs-on: ubuntu-latest 45 | steps: 46 | - name: Checkout 47 | uses: actions/checkout@v3 48 | 49 | - name: "Install Rust ${{ inputs.version }}" 50 | uses: WalletConnect/actions-rs/toolchain@1.0.0 51 | with: 52 | toolchain: ${{ vars.RUST_VERSION }} 53 | profile: 'default' 54 | override: true 55 | 56 | - name: Run sccache-cache 57 | uses: mozilla-actions/sccache-action@v0.0.3 58 | 59 | - name: "Check Formatting" 60 | uses: WalletConnect/actions-rs/cargo@1.0.0 61 | with: 62 | command: fmt 63 | args: -- --check 64 | 65 | tests: 66 | name: Tests 67 | runs-on: ubuntu-latest 68 | steps: 69 | - name: Checkout 70 | uses: actions/checkout@v3 71 | 72 | - name: "Install Rust ${{ inputs.version }}" 73 | uses: WalletConnect/actions-rs/toolchain@1.0.0 74 | with: 75 | toolchain: ${{ vars.RUST_VERSION }} 76 | profile: 'default' 77 | override: true 78 | 79 | - name: Install Protoc 80 | uses: arduino/setup-protoc@v2 81 | with: 82 | repo-token: ${{ secrets.GITHUB_TOKEN }} 83 | 84 | - name: Run sccache-cache 85 | uses: mozilla-actions/sccache-action@v0.0.3 86 | 87 | - name: "Unit Tests" 88 | uses: WalletConnect/actions-rs/cargo@1.0.0 89 | with: 90 | command: test 91 | args: --all-features 92 | -------------------------------------------------------------------------------- /.github/workflows/sub-app-deploy.yml: -------------------------------------------------------------------------------- 1 | name: ❖ App - Deploy 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | version: 7 | description: 'the release version' 8 | type: string 9 | required: true 10 | stage: 11 | description: 'the environment to deploy to' 12 | type: string 13 | default: 'staging' 14 | stage-url: 15 | description: 'the URL of the environment' 16 | type: string 17 | default: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 18 | aws-role-arn: 19 | description: 'the ARN of the AWS role to assume' 20 | type: string 21 | default: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 22 | 23 | permissions: 24 | contents: read 25 | id-token: write 26 | 27 | jobs: 28 | deploy-app: 29 | name: Deploy App `${{ inputs.stage }}` 30 | runs-on: ubuntu-latest 31 | environment: 32 | name: ${{ inputs.stage }} 33 | url: ${{ inputs.environment_url }} 34 | steps: 35 | - name: Checkout 36 | uses: actions/checkout@v3 37 | 38 | - name: Configure AWS Credentials 39 | uses: aws-actions/configure-aws-credentials@v2 40 | with: 41 | role-to-assume: ${{ inputs.aws-role-arn }} 42 | aws-region: ${{ vars.AWS_REGION }} 43 | 44 | - name: Login to ECR repository 45 | id: login-ecr 46 | uses: aws-actions/amazon-ecr-login@v1 47 | 48 | - name: Deploy image to ECS 49 | id: deploy 50 | uses: WalletConnect/actions/aws/ecs/deploy-image/@2.1.4 51 | with: 52 | aws-role-arn: ${{ inputs.aws-role-arn }} 53 | aws-region: ${{ vars.AWS_REGION }} 54 | cluster-name: walletconnect-${{ vars.AWS_REGION }}-${{ inputs.stage }}-${{ vars.IMAGE_NAME }}_cluster 55 | service-name: ${{ vars.IMAGE_NAME }}-service 56 | task-definition-name: ${{ vars.IMAGE_NAME }} 57 | image-name: ${{ steps.login-ecr.outputs.registry }}/${{ vars.IMAGE_NAME }}:${{ inputs.version }} 58 | -------------------------------------------------------------------------------- /.github/workflows/sub-cd.yml: -------------------------------------------------------------------------------- 1 | name: ❖ CD 2 | 3 | #TODO: If we publish the app then deploy infra, we don't need to run app-deploy 4 | 5 | on: 6 | workflow_call: 7 | inputs: 8 | deploy-infra: 9 | description: "Deploy infrastructure" 10 | type: boolean 11 | default: true 12 | deploy-app: 13 | description: "Deploy app" 14 | type: boolean 15 | default: true 16 | deploy-prod: 17 | description: "Deploy to production" 18 | type: boolean 19 | default: false 20 | version: 21 | description: "The release version" 22 | type: string 23 | required: true 24 | 25 | concurrency: cd 26 | 27 | permissions: 28 | contents: read 29 | id-token: write 30 | 31 | jobs: 32 | deploy-infra-staging: 33 | name: Deploy Infra Staging 34 | uses: ./.github/workflows/sub-infra-apply.yml 35 | if: ${{ inputs.deploy-infra }} 36 | secrets: inherit 37 | with: 38 | version: ${{ inputs.version }} 39 | stage: staging 40 | stage-url: https://staging.${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 41 | 42 | deploy-app-staging: 43 | name: Deploy App Staging 44 | uses: ./.github/workflows/sub-app-deploy.yml 45 | if: ${{ (always() && !failure() && !cancelled()) && inputs.deploy-app }} 46 | needs: [deploy-infra-staging] 47 | with: 48 | version: ${{ inputs.version }} 49 | stage: staging 50 | stage-url: https://staging.${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 51 | aws-role-arn: ${{ vars.AWS_ROLE_STAGING }} 52 | 53 | validate-staging: 54 | name: Validate - Staging 55 | if: ${{ always() && !failure() && !cancelled() }} 56 | needs: [deploy-infra-staging, deploy-app-staging] 57 | uses: ./.github/workflows/sub-validate.yml 58 | with: 59 | stage: staging 60 | stage-url: https://staging.${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 61 | 62 | deploy-infra-prod: 63 | name: Deploy Infra Prod 64 | uses: ./.github/workflows/sub-infra-apply.yml 65 | if: ${{ always() && !failure() && !cancelled() && inputs.deploy-infra && inputs.deploy-prod }} 66 | needs: [validate-staging] 67 | secrets: inherit 68 | with: 69 | version: ${{ inputs.version }} 70 | stage: prod 71 | stage-url: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 72 | 73 | deploy-app-prod: 74 | name: Deploy App Prod 75 | uses: ./.github/workflows/sub-app-deploy.yml 76 | if: ${{ always() && !failure() && !cancelled() && inputs.deploy-app && inputs.deploy-prod }} 77 | needs: [validate-staging, deploy-infra-prod] 78 | with: 79 | version: ${{ inputs.version }} 80 | stage: prod 81 | stage-url: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 82 | aws-role-arn: ${{ vars.AWS_ROLE_PROD }} 83 | 84 | 85 | validate-prod: 86 | name: Validate - Prod 87 | if: ${{ always() && !failure() && !cancelled() }} 88 | needs: [deploy-infra-prod, deploy-app-prod] 89 | uses: ./.github/workflows/sub-validate.yml 90 | with: 91 | stage: prod 92 | stage-url: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 93 | -------------------------------------------------------------------------------- /.github/workflows/sub-ci.yml: -------------------------------------------------------------------------------- 1 | name: ❖ CI 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | check-infra: 7 | description: 'Run Infrastructure CI' 8 | type: boolean 9 | default: true 10 | check-app: 11 | description: 'Run App CI' 12 | type: boolean 13 | default: true 14 | 15 | permissions: 16 | contents: read 17 | id-token: write 18 | 19 | jobs: 20 | check-infra: 21 | name: Infra CI 22 | uses: ./.github/workflows/sub-infra-check.yml 23 | if: ${{ inputs.check-infra }} 24 | secrets: inherit 25 | with: 26 | stage: staging 27 | 28 | check-app: 29 | name: App CI 30 | uses: ./.github/workflows/sub-app-check.yml 31 | if: ${{ inputs.check-app }} 32 | secrets: inherit 33 | 34 | plan-staging: 35 | name: Infra Plan Staging 36 | uses: ./.github/workflows/sub-infra-plan.yml 37 | needs: [check-infra] 38 | secrets: inherit 39 | with: 40 | version: 'latest' 41 | stage: staging 42 | stage-url: https://staging.${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 43 | 44 | plan-prod: 45 | name: Infra Plan Prod 46 | uses: ./.github/workflows/sub-infra-plan.yml 47 | needs: [check-infra] 48 | secrets: inherit 49 | with: 50 | version: 'latest' 51 | stage: prod 52 | stage-url: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 53 | -------------------------------------------------------------------------------- /.github/workflows/sub-infra-apply.yml: -------------------------------------------------------------------------------- 1 | name: ❖ Infra - Apply 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | stage: 7 | description: 'the environment to deploy to' 8 | type: string 9 | default: staging 10 | stage-url: 11 | description: 'the URL of the environment' 12 | type: string 13 | default: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 14 | version: 15 | description: 'The ECR tag to deploy e.g. 1.7.0' 16 | type: string 17 | secrets: 18 | TF_API_TOKEN: 19 | required: true 20 | 21 | permissions: 22 | contents: read 23 | id-token: write 24 | 25 | jobs: 26 | apply-infra: 27 | name: Apply Infra `${{ inputs.stage }}` 28 | runs-on: ubuntu-latest 29 | environment: 30 | name: ${{ inputs.stage }} 31 | url: ${{ inputs.stage-url }} 32 | env: 33 | TF_API_TOKEN: ${{ secrets.TF_API_TOKEN }} 34 | TF_WORKSPACE: wl-${{ inputs.stage }} 35 | steps: 36 | - name: Checkout 37 | uses: actions/checkout@v3 38 | with: 39 | submodules: recursive 40 | 41 | - name: Configure AWS Credentials for Monitoring account 42 | uses: aws-actions/configure-aws-credentials@v2 43 | with: 44 | role-to-assume: ${{ vars.AWS_ROLE_MONITORING }} 45 | aws-region: ${{ vars.AWS_REGION }} 46 | 47 | - name: Get Grafana details 48 | id: grafana-get-details 49 | uses: WalletConnect/actions/aws/grafana/get-details-by-name/@2.1.4 50 | with: 51 | workspace-name: ${{ vars.GRAFANA_WORKSPACE_NAME }} 52 | 53 | - name: Get Grafana key 54 | id: grafana-get-key 55 | uses: WalletConnect/actions/aws/grafana/get-key/@2.1.4 56 | with: 57 | key-prefix: ${{ github.event.repository.name }} 58 | workspace-id: ${{ steps.grafana-get-details.outputs.workspace-id }} 59 | 60 | - name: Setup Terraform 61 | uses: hashicorp/setup-terraform@v2 62 | with: 63 | cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} 64 | 65 | - name: Cache Terraform data 66 | uses: actions/cache@v3 67 | with: 68 | path: ${{ vars.TF_DIRECTORY }}/.terraform 69 | key: terraform-${{ hashFiles('${{ vars.TF_DIRECTORY }}/.terraform.lock.hcl') }} 70 | 71 | - name: Init Terraform 72 | uses: WalletConnect/actions/terraform/init/@2.1.4 73 | with: 74 | environment: ${{ inputs.stage }} 75 | use-tfvars: false 76 | 77 | - name: Configure Terraform Variables 78 | id: configure-tfvars 79 | working-directory: ${{ vars.TF_DIRECTORY }} 80 | run: | 81 | echo 'ofac_blocked_countries="${{ vars.OFAC_BLOCKED_COUNTRIES }}"' >> plan.auto.tfvars 82 | echo 'image_version="${{ inputs.version }}"' >> plan.auto.tfvars 83 | echo 'grafana_auth="${{ steps.grafana-get-key.outputs.key }}"' >> plan.auto.tfvars 84 | 85 | - name: Apply on ${{ inputs.stage }} 86 | id: tf-plan-apply 87 | working-directory: ${{ vars.TF_DIRECTORY }} 88 | run: | 89 | terraform apply -auto-approve -no-color 90 | 91 | - name: Delete Grafana key 92 | id: grafana-delete-key 93 | if: ${{ always() }} 94 | uses: WalletConnect/actions/aws/grafana/delete-key/@2.1.4 95 | with: 96 | key-name: ${{ steps.grafana-get-key.outputs.key-name }} 97 | workspace-id: ${{ steps.grafana-get-details.outputs.workspace-id }} 98 | -------------------------------------------------------------------------------- /.github/workflows/sub-infra-check.yml: -------------------------------------------------------------------------------- 1 | name: ❖ Infra - Check 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | stage: 7 | description: 'The name of the Terraform workspace to use' 8 | type: string 9 | default: staging 10 | secrets: 11 | TF_API_TOKEN: 12 | required: true 13 | 14 | permissions: 15 | contents: read 16 | 17 | jobs: 18 | check-fmt: 19 | name: Formatting 20 | runs-on: ubuntu-latest 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@v3 24 | with: 25 | submodules: recursive 26 | 27 | - name: Setup Terraform 28 | uses: hashicorp/setup-terraform@v2 29 | with: 30 | cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} 31 | 32 | - name: Check Formatting 33 | uses: WalletConnect/actions/terraform/check-fmt/@2.1.4 34 | with: 35 | terraform-path: ${{ vars.TF_DIRECTORY }} 36 | 37 | validate: 38 | name: Validate 39 | runs-on: ubuntu-latest 40 | steps: 41 | - name: Checkout 42 | uses: actions/checkout@v3 43 | with: 44 | submodules: recursive 45 | 46 | - name: Setup Terraform 47 | uses: hashicorp/setup-terraform@v2 48 | with: 49 | cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} 50 | 51 | - name: Cache Terraform data 52 | uses: actions/cache@v3 53 | with: 54 | path: ${{ vars.TF_DIRECTORY }}/.terraform 55 | key: terraform-${{ hashFiles('${{ vars.TF_DIRECTORY }}/.terraform.lock.hcl') }} 56 | 57 | - name: Init Terraform 58 | uses: WalletConnect/actions/terraform/init/@2.1.4 59 | with: 60 | environment: ${{ inputs.stage }} 61 | use-tfvars: false 62 | 63 | - name: Validate Terraform 64 | id: tf-fmt 65 | uses: WalletConnect/actions/terraform/validate/@2.1.4 66 | with: 67 | terraform-path: ${{ vars.TF_DIRECTORY }} 68 | 69 | tfsec: 70 | name: TFSec 71 | runs-on: ubuntu-latest 72 | steps: 73 | - name: Checkout 74 | uses: actions/checkout@v3 75 | with: 76 | submodules: recursive 77 | 78 | - name: Setup Terraform 79 | uses: hashicorp/setup-terraform@v2 80 | with: 81 | cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} 82 | 83 | - name: Cache Terraform data 84 | uses: actions/cache@v3 85 | with: 86 | path: ${{ vars.TF_DIRECTORY }}/.terraform 87 | key: terraform-${{ hashFiles('${{ vars.TF_DIRECTORY }}/.terraform.lock.hcl') }} 88 | 89 | - name: Init Terraform 90 | uses: WalletConnect/actions/terraform/init/@2.1.4 91 | with: 92 | environment: ${{ inputs.stage }} 93 | use-tfvars: false 94 | 95 | - uses: aquasecurity/tfsec-action@v1.0.3 96 | with: 97 | working_directory: ${{ vars.TF_DIRECTORY }} 98 | github_token: ${{ secrets.GITHUB_TOKEN }} 99 | 100 | tflint: 101 | name: TFLint 102 | runs-on: ubuntu-latest 103 | steps: 104 | - name: Checkout 105 | uses: actions/checkout@v3 106 | with: 107 | submodules: recursive 108 | 109 | - name: Setup Terraform 110 | uses: hashicorp/setup-terraform@v2 111 | with: 112 | cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} 113 | 114 | - name: Cache Terraform data 115 | uses: actions/cache@v3 116 | with: 117 | path: ${{ vars.TF_DIRECTORY }}/.terraform 118 | key: terraform-${{ hashFiles('${{ vars.TF_DIRECTORY }}/.terraform.lock.hcl') }} 119 | 120 | - name: Init Terraform 121 | uses: WalletConnect/actions/terraform/init/@2.1.4 122 | with: 123 | environment: ${{ inputs.stage }} 124 | use-tfvars: false 125 | 126 | - name: Setup TFLint 127 | uses: terraform-linters/setup-tflint@v2 128 | with: 129 | tflint_version: v0.45.0 130 | 131 | - name: Cache `tflint` plugins 132 | uses: actions/cache@v3 133 | with: 134 | path: ~/.tflint.d/plugins 135 | key: tflint-${{ hashFiles('${{ vars.TF_DIRECTORY }}/.tflint.hcl') }} 136 | 137 | - name: Init TFLint 138 | run: tflint --init 139 | working-directory: ${{ vars.TF_DIRECTORY }} 140 | 141 | - name: Check 142 | run: tflint --recursive --format=compact 143 | working-directory: ${{ vars.TF_DIRECTORY }} 144 | -------------------------------------------------------------------------------- /.github/workflows/sub-infra-plan.yml: -------------------------------------------------------------------------------- 1 | name: ❖ Infra - Plan 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | stage: 7 | description: 'the environment to plan against' 8 | required: true 9 | type: string 10 | default: 'staging' 11 | stage-url: 12 | description: 'the URL of the environment' 13 | required: true 14 | type: string 15 | default: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 16 | version: 17 | description: 'the image version to use in the ECS task definition' 18 | required: true 19 | type: string 20 | secrets: 21 | TF_API_TOKEN: 22 | required: true 23 | 24 | permissions: 25 | contents: read 26 | id-token: write 27 | 28 | concurrency: ${{ inputs.stage }} 29 | 30 | jobs: 31 | plan: 32 | name: Plan `${{ inputs.stage }}` 33 | runs-on: ubuntu-latest 34 | environment: 35 | name: ${{ inputs.stage }} 36 | url: ${{ inputs.stage-url }} 37 | env: 38 | TF_API_TOKEN: ${{ secrets.TF_API_TOKEN }} 39 | TF_WORKSPACE: wl-${{ inputs.stage }} 40 | steps: 41 | - name: Checkout 42 | uses: actions/checkout@v3 43 | with: 44 | submodules: recursive 45 | 46 | - name: Configure AWS Credentials for Monitoring account 47 | uses: aws-actions/configure-aws-credentials@v2 48 | with: 49 | role-to-assume: ${{ vars.AWS_ROLE_MONITORING }} 50 | aws-region: ${{ vars.AWS_REGION }} 51 | 52 | - name: Get Grafana details 53 | id: grafana-get-details 54 | uses: WalletConnect/actions/aws/grafana/get-details-by-name/@2.1.4 55 | with: 56 | workspace-name: ${{ vars.GRAFANA_WORKSPACE_NAME }} 57 | 58 | - name: Get Grafana key 59 | id: grafana-get-key 60 | uses: WalletConnect/actions/aws/grafana/get-key/@2.1.4 61 | with: 62 | key-prefix: ${{ github.event.repository.name }} 63 | workspace-id: ${{ steps.grafana-get-details.outputs.workspace-id }} 64 | 65 | - name: Setup Terraform 66 | uses: hashicorp/setup-terraform@v2 67 | with: 68 | cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} 69 | 70 | - name: Cache Terraform data 71 | uses: actions/cache@v3 72 | with: 73 | path: ${{ vars.TF_DIRECTORY }}/.terraform 74 | key: terraform-${{ hashFiles('${{ vars.TF_DIRECTORY }}/.terraform.lock.hcl') }} 75 | 76 | - name: Init Terraform 77 | uses: WalletConnect/actions/terraform/init/@2.1.4 78 | with: 79 | environment: ${{ inputs.stage }} 80 | use-tfvars: false 81 | 82 | - name: Configure Terraform Variables 83 | working-directory: ${{ vars.TF_DIRECTORY }} 84 | run: | 85 | echo 'ofac_blocked_countries="${{ vars.OFAC_BLOCKED_COUNTRIES }}"' >> plan.auto.tfvars 86 | echo 'image_version="${{ inputs.version }}"' >> plan.auto.tfvars 87 | echo 'grafana_auth="${{ steps.grafana-get-key.outputs.key }}"' >> plan.auto.tfvars 88 | 89 | - name: Plan ${{ inputs.stage }} 90 | working-directory: ${{ vars.TF_DIRECTORY }} 91 | run: | 92 | terraform plan -no-color 93 | 94 | - name: Delete Grafana key 95 | id: grafana-delete-key 96 | if: ${{ always() }} 97 | uses: WalletConnect/actions/aws/grafana/delete-key/@2.1.4 98 | with: 99 | key-name: ${{ steps.grafana-get-key.outputs.key-name }} 100 | workspace-id: ${{ steps.grafana-get-details.outputs.workspace-id }} 101 | -------------------------------------------------------------------------------- /.github/workflows/sub-publish-image.yml: -------------------------------------------------------------------------------- 1 | name: ❖ Publish Image 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | version: 7 | description: 'The version to publish' 8 | type: string 9 | required: true 10 | aws-role-arn: 11 | description: 'the ARN of the AWS role to assume' 12 | type: string 13 | default: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 14 | 15 | permissions: 16 | contents: read 17 | id-token: write 18 | packages: write 19 | 20 | jobs: 21 | build-container: 22 | name: Build 23 | runs-on: ubuntu-latest 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@v3 27 | with: 28 | ref: ${{ inputs.version }} 29 | 30 | - name: Configure AWS Credentials 31 | uses: aws-actions/configure-aws-credentials@v2 32 | with: 33 | role-to-assume: ${{ inputs.aws-role-arn }} 34 | aws-region: ${{ vars.AWS_REGION }} 35 | 36 | - name: Login to Amazon ECR 37 | id: login-ecr 38 | uses: aws-actions/amazon-ecr-login@v1 39 | with: 40 | mask-password: 'true' 41 | 42 | - name: Login to GitHub Container Registry 43 | uses: docker/login-action@v2 44 | with: 45 | registry: ghcr.io 46 | username: ${{ github.actor }} 47 | password: ${{ secrets.GITHUB_TOKEN }} 48 | logout: false 49 | 50 | - name: Docker meta 51 | id: meta 52 | uses: docker/metadata-action@v4 53 | with: 54 | images: | 55 | ghcr.io/${{ github.repository }} 56 | ${{ steps.login-ecr.outputs.registry }}/${{ vars.IMAGE_NAME }} 57 | walletconnect/${{ vars.IMAGE_NAME }},enable=false 58 | flavor: | 59 | latest=auto 60 | tags: | 61 | type=semver,pattern={{version}} 62 | type=semver,pattern={{major}}.{{minor}} 63 | type=raw,value=${{ inputs.version }} 64 | 65 | - name: Setup Docker Buildx 66 | uses: docker/setup-buildx-action@v2 67 | 68 | - name: Build, tag, and push image 69 | uses: docker/build-push-action@v3 70 | with: 71 | context: . 72 | push: true 73 | tags: ${{ steps.meta.outputs.tags }} 74 | labels: ${{ steps.meta.outputs.labels }} 75 | cache-from: type=gha 76 | cache-to: type=gha,mode=max 77 | -------------------------------------------------------------------------------- /.github/workflows/sub-validate.yml: -------------------------------------------------------------------------------- 1 | name: ❖ Validate 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | stage: 7 | description: 'the environment to validate' 8 | required: true 9 | type: string 10 | default: 'staging' 11 | stage-url: 12 | description: 'the URL of the environment' 13 | required: true 14 | type: string 15 | default: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health 16 | 17 | jobs: 18 | health-check: 19 | name: Health Check - ${{ inputs.stage }} 20 | runs-on: ubuntu-latest 21 | environment: 22 | name: ${{ inputs.stage }} 23 | url: ${{ inputs.stage-url }} 24 | steps: 25 | - name: health-check 26 | run: curl "${{ inputs.stage-url }}" 27 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | #--------------------------------------- 2 | # General 3 | 4 | .DS_Store 5 | .AppleDouble 6 | .LSOverride 7 | [Dd]esktop.ini 8 | 9 | #--------------------------------------- 10 | # Environment 11 | 12 | .direnv 13 | .envrc 14 | .actrc 15 | 16 | #--------------------------------------- 17 | # Editors 18 | 19 | # JetBrains 20 | .idea/ 21 | out/ 22 | .fleet 23 | *.iws 24 | 25 | # VSCode 26 | .vscode/ 27 | .history/ 28 | *.code-workspace 29 | 30 | #--------------------------------------- 31 | # Rust/Cargo 32 | 33 | # Generated by Cargo, will have compiled files and executables 34 | debug/ 35 | target/ 36 | 37 | # Backup files generated by rustfmt 38 | **/*.rs.bk 39 | 40 | # MSVC Windows builds of rustc generate these, which store debugging information 41 | *.pdb 42 | 43 | #--------------------------------------- 44 | # Terraform 45 | 46 | # Local .terraform directories 47 | **/.terraform/* 48 | 49 | # .tfstate files 50 | *.tfstate 51 | *.tfstate.* 52 | 53 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 54 | # password, private keys, and other secrets. These should not be part of version 55 | # control as they are data points which are potentially sensitive and subject 56 | # to change depending on the environment. 57 | *.tfvars 58 | *.tfvars.json 59 | 60 | # Ignore override files as they are usually used to override resources locally and so are not checked in 61 | override.tf 62 | override.tf.json 63 | *_override.tf 64 | *_override.tf.json 65 | 66 | # Include override files you do wish to add to version control using negated pattern 67 | # 68 | # !example_override.tf 69 | 70 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 71 | *tfplan* 72 | 73 | # Ignore CLI configuration files 74 | .terraformrc 75 | terraform.rc 76 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "terraform/monitoring/grafonnet-lib"] 2 | path = terraform/monitoring/grafonnet-lib 3 | url = git@github.com:WalletConnect/grafonnet-lib.git 4 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/antonbabenko/pre-commit-terraform 3 | rev: v1.77.0 4 | hooks: 5 | - id: terraform_fmt 6 | - id: terraform_tflint 7 | # - id: terraform_tfsec 8 | - id: terraform_docs 9 | args: 10 | - --args=--config=./terraform/.terraform-docs.yml 11 | 12 | - repo: https://github.com/pre-commit/pre-commit-hooks 13 | rev: v4.4.0 14 | hooks: 15 | - id: check-merge-conflict 16 | - id: check-yaml 17 | - id: end-of-file-fixer 18 | - id: trailing-whitespace 19 | - id: detect-aws-credentials 20 | - id: detect-private-key 21 | - id: forbid-new-submodules 22 | - id: no-commit-to-branch 23 | - id: mixed-line-ending 24 | -------------------------------------------------------------------------------- /.terraformignore: -------------------------------------------------------------------------------- 1 | #--------------------------------------- 2 | # General 3 | .DS_Store 4 | .AppleDouble 5 | .LSOverride 6 | [Dd]esktop.ini 7 | .gitignore 8 | .gitmodules 9 | .pre-commit-config.yaml 10 | CHANGELOG.md 11 | LICENSE 12 | README.md 13 | .github/ 14 | ops/ 15 | Dockerfile 16 | justfile 17 | crash.log 18 | 19 | #--------------------------------------- 20 | # Rust/Cargo 21 | 22 | # Generated by Cargo, will have compiled files and executables 23 | src/ 24 | debug/ 25 | target/ 26 | build.rs 27 | cargo.lock 28 | cargo.toml 29 | rustfmt.toml 30 | 31 | # Backup files generated by rustfmt 32 | **/*.rs.bk 33 | 34 | # MSVC Windows builds of rustc generate these, which store debugging information 35 | *.pdb 36 | 37 | #--------------------------------------- 38 | # Environment 39 | .env.example 40 | .direnv 41 | .envrc 42 | 43 | #--------------------------------------- 44 | # JetBrains 45 | .idea/ 46 | out/ 47 | .fleet 48 | *.iws 49 | 50 | #--------------------------------------- 51 | # VSCode 52 | .vscode/ 53 | .history/ 54 | *.code-workspace 55 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "keyserver" 3 | version = "1.14.6" 4 | edition = "2021" 5 | authors = [ 6 | "Derek Rein ", 7 | "Szymon Rząd ", 8 | "Xavier Basty-Kjellberg ", 9 | "Maciej Rak ", 10 | ] 11 | build = "build.rs" 12 | 13 | [dependencies] 14 | wc = { git = "https://github.com/WalletConnect/utils-rs.git", tag = "v0.11.1", features = [ 15 | "geoip", 16 | "geoblock", 17 | ] } 18 | relay_rpc = { git = "https://github.com/WalletConnect/WalletConnectRust.git", tag = "v0.30.0", features = [ 19 | "cacao", 20 | ] } 21 | blockchain_api = { git = "https://github.com/WalletConnect/WalletConnectRust.git", tag = "v0.30.0" } 22 | 23 | aws-config = "1.1.9" 24 | aws-sdk-s3 = "1.21.0" 25 | 26 | axum = "0.7.5" 27 | axum-macros = "0.3.0" 28 | tokio = { version = "1.0", features = ["full"] } 29 | tower = { version = "0.4.13", features = [ 30 | "util", 31 | "timeout", 32 | "load-shed", 33 | "limit", 34 | ] } 35 | tower-http = { version = "0.5.2", features = [ 36 | "add-extension", 37 | "auth", 38 | "compression-full", 39 | "trace", 40 | "cors", 41 | "request-id", 42 | "util", 43 | ] } 44 | hyper = "1.2.0" 45 | http = "1.0.0" 46 | 47 | # Database 48 | wither = { git = "https://github.com/WalletConnect/wither.git", rev = "6a70e74", features = [ 49 | "bson-chrono-0_4", 50 | ] } 51 | wither_derive = { git = "https://github.com/WalletConnect/wither.git", rev = "6a70e74" } 52 | 53 | # Seralisation 54 | serde = { version = "1.0", features = ["derive"] } 55 | serde_json = "1.0" 56 | 57 | # JWT 58 | jsonwebtoken = "8" 59 | 60 | # Env Vars 61 | dotenv = "0.15" 62 | envy = "0.4" 63 | 64 | # Metrics & Traces 65 | prometheus-core = { package = "prometheus", version = "0.13" } 66 | opentelemetry = { version = "0.18", features = ["metrics", "rt-tokio"] } 67 | opentelemetry-prometheus = "0.11" 68 | opentelemetry-otlp = "0.11" 69 | 70 | # Logging 71 | tracing = "0.1" 72 | tracing-subscriber = { version = "0.3", features = [ 73 | "env-filter", 74 | "parking_lot", 75 | ] } 76 | tracing-appender = "0.2" 77 | tracing-opentelemetry = "0.18" 78 | atty = "0.2" 79 | 80 | # Misc 81 | build-info = "0.0.29" 82 | derive_more = "0.99" 83 | bs58 = "0.4" 84 | log = "0.4" 85 | thiserror = "1.0" 86 | async-trait = "0.1" 87 | anyhow = "1" 88 | validator = { version = "0.16", features = ["derive"] } 89 | data-encoding = "2.3" 90 | iri-string = "0.7.0" 91 | time = "0.3.17" 92 | k256 = "0.12.0" 93 | sha3 = "0.10.6" 94 | hex = "0.4.3" 95 | 96 | [build-dependencies] 97 | build-info-build = "0.0.29" 98 | 99 | # [patch.'https://github.com/WalletConnect/WalletConnectRust.git'] 100 | # relay_rpc = { path = "../WalletConnectRust/relay_rpc" } 101 | # relay_client = { path = "../WalletConnectRust/relay_client" } 102 | # blockchain_api = { path = "../WalletConnectRust/blockchain_api" } 103 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # Build args 4 | # 5 | ################################################################################ 6 | ARG base="rust:buster" 7 | ARG runtime="debian:buster-slim" 8 | ARG bin="keyserver" 9 | ARG version="unknown" 10 | ARG sha="unknown" 11 | ARG maintainer="WalletConnect" 12 | ARG release="" 13 | 14 | ################################################################################ 15 | # 16 | # Install cargo-chef 17 | # 18 | ################################################################################ 19 | FROM ${base} AS chef 20 | 21 | WORKDIR /app 22 | RUN cargo install cargo-chef 23 | 24 | ################################################################################ 25 | # 26 | # Generate recipe file 27 | # 28 | ################################################################################ 29 | FROM chef AS plan 30 | 31 | WORKDIR /app 32 | COPY Cargo.lock Cargo.toml ./ 33 | COPY src ./src 34 | RUN cargo chef prepare --recipe-path recipe.json 35 | 36 | ################################################################################ 37 | # 38 | # Build the binary 39 | # 40 | ################################################################################ 41 | FROM chef AS build 42 | 43 | ARG release 44 | ENV RELEASE=${release:+--release} 45 | 46 | # This is a build requirement of `opentelemetry-otlp`. Once the new version 47 | # is rolled out, which no longer requires the `protoc`, we'll be able to 48 | # get rid of this. 49 | RUN apt-get update \ 50 | && apt-get install -y --no-install-recommends protobuf-compiler 51 | 52 | WORKDIR /app 53 | # Cache dependancies 54 | COPY --from=plan /app/recipe.json recipe.json 55 | RUN cargo chef cook --recipe-path recipe.json ${RELEASE} 56 | # Build the local binary 57 | COPY . . 58 | RUN cargo build --bin keyserver ${RELEASE} 59 | # Certificate file required to use TLS with AWS DocumentDB. 60 | RUN wget https://s3.amazonaws.com/rds-downloads/rds-combined-ca-bundle.pem 61 | 62 | ################################################################################ 63 | # 64 | # Runtime image 65 | # 66 | ################################################################################ 67 | FROM ${runtime} AS runtime 68 | 69 | ARG bin 70 | ARG version 71 | ARG sha 72 | ARG maintainer 73 | ARG release 74 | ARG binpath=${release:+release} 75 | 76 | LABEL version=${version} 77 | LABEL sha=${sha} 78 | LABEL maintainer=${maintainer} 79 | 80 | WORKDIR /app 81 | COPY --from=build /app/target/${binpath:-debug}/keyserver /usr/local/bin/keyserver 82 | COPY --from=build /app/rds-combined-ca-bundle.pem /app/rds-combined-ca-bundle.pem 83 | 84 | RUN apt-get update \ 85 | && apt-get install -y --no-install-recommends ca-certificates libssl-dev \ 86 | && apt-get clean \ 87 | && rm -rf /var/lib/apt/lists/* 88 | 89 | USER 1001:1001 90 | ENTRYPOINT ["/usr/local/bin/keyserver"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 WalletConnect 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Keys-Server 2 | 3 | ## API Guide 4 | 5 | For API see [WalletConnect Docs](https://docs.walletconnect.com/2.0/specs/servers/keys/keys-server-api) 6 | 7 | ## Running Locally 8 | 9 | Setup: 10 | - Install [`rust`](https://www.rust-lang.org/tools/install); 11 | - Install [`docker`](https://docs.docker.com/get-docker/); 12 | - Install [`just`](https://github.com/casey/just#packages); 13 | - Copy the env file: 14 | ```sh 15 | $ cp .env.example .env 16 | ``` 17 | - Fill `.env` file with necessary values 18 | 19 | Running the keys-server: 20 | ```sh 21 | $ source .env # make sure the env variables are set 22 | $ just run 23 | ``` 24 | 25 | Running the docker-compose set up (MongoDB + MongoExpress + Jaeger + Keys-Server): 26 | ```sh 27 | $ source .env # make sure the env variables are set 28 | $ just build-docker 29 | $ just run-docker 30 | ``` 31 | 32 | Running tests: 33 | ```sh 34 | $ just test 35 | ``` 36 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | build_info_build::build_script(); 3 | } 4 | -------------------------------------------------------------------------------- /ops/docker-compose.keyserver.yml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | 3 | services: 4 | jaeger: 5 | image: jaegertracing/opentelemetry-all-in-one:latest 6 | ports: 7 | - "3001:16686" 8 | 9 | keyserver: 10 | build: 11 | dockerfile: ./Dockerfile 12 | context: ../ 13 | image: keyserver 14 | ports: 15 | - 8080:8080 16 | healthcheck: 17 | test: [ "CMD", "curl", "localhost:8080/health" ] 18 | interval: 5s 19 | timeout: 5s 20 | retries: 5 21 | depends_on: 22 | mongo: 23 | condition: service_healthy 24 | jaeger: 25 | condition: service_started 26 | environment: 27 | - RUST_BACKTRACE=1 28 | - PORT=8080 29 | - LOG_LEVEL=INFO 30 | - DATABASE_URL=mongodb://admin:admin@mongo:27017/keyserver?authSource=admin 31 | - TELEMETRY_ENABLED=true 32 | - TELEMETRY_GRPC_URL=http://jaeger:4317 -------------------------------------------------------------------------------- /ops/docker-compose.storage.yml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | 3 | services: 4 | mongo: 5 | image: mongo:4 6 | ports: 7 | - 27017:27017 8 | healthcheck: 9 | test: 10 | [ 11 | "CMD", 12 | "mongo", 13 | "--eval", 14 | "'db.runCommand(\"ping\").ok'", 15 | "localhost:27017/test", 16 | "--quiet" 17 | ] 18 | interval: 5s 19 | timeout: 5s 20 | retries: 5 21 | environment: 22 | - MONGO_INITDB_ROOT_USERNAME=admin 23 | - MONGO_INITDB_ROOT_PASSWORD=admin 24 | - MONGO_INITDB_DATABASE=keyserver 25 | 26 | mongo-express: 27 | image: mongo-express 28 | ports: 29 | - 8085:8081 30 | depends_on: 31 | mongo: 32 | condition: service_healthy 33 | environment: 34 | - ME_CONFIG_MONGODB_ADMINUSERNAME=admin 35 | - ME_CONFIG_MONGODB_ADMINPASSWORD=admin 36 | - ME_CONFIG_MONGODB_URL="mongodb://admin:admin@mongo:27017" -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | #fn_single_line = false 3 | #format_code_in_doc_comments = true 4 | #format_strings = true 5 | #imports_layout = "HorizontalVertical" 6 | #imports_granularity = "One" 7 | #normalize_comments = true 8 | #normalize_doc_attributes = true 9 | reorder_imports = true 10 | #reorder_impl_items = true 11 | #group_imports = "StdExternalCrate" 12 | use_try_shorthand = true 13 | #wrap_comments = true 14 | #overflow_delimited_expr = true 15 | remove_nested_parens = true 16 | reorder_modules = true 17 | #unstable_features = true 18 | use_field_init_shorthand = true 19 | -------------------------------------------------------------------------------- /src/auth/did.rs: -------------------------------------------------------------------------------- 1 | pub const DID_DELIMITER: &str = ":"; 2 | pub const DID_PREFIX: &str = "did"; 3 | pub const DID_METHOD_KEY: &str = "key"; 4 | pub const DID_METHOD_PKH: &str = "pkh"; 5 | 6 | use thiserror::Error as ThisError; 7 | 8 | #[derive(Debug, ThisError)] 9 | pub enum DidError { 10 | #[error("Invalid issuer DID prefix")] 11 | Prefix, 12 | 13 | #[error("Invalid issuer DID method")] 14 | Method, 15 | 16 | #[error("Invalid issuer format")] 17 | Format, 18 | } 19 | 20 | pub fn extract_did_data<'a>(did: &'a str, method: &'a str) -> Result<&'a str, DidError> { 21 | let data = did 22 | .strip_prefix(DID_PREFIX) 23 | .ok_or(DidError::Prefix)? 24 | .strip_prefix(DID_DELIMITER) 25 | .ok_or(DidError::Format)? 26 | .strip_prefix(method) 27 | .ok_or(DidError::Method)? 28 | .strip_prefix(DID_DELIMITER) 29 | .ok_or(DidError::Format)?; 30 | Ok(data) 31 | } 32 | 33 | /// Checks whether the provided string is a valid `did` according to the 34 | /// X25519[1] spec. 35 | /// 36 | /// [1]: https://w3c-ccg.github.io/did-method-key/#x25519 37 | pub fn validate_x25519(did: &str) -> bool { 38 | did.starts_with("did:key:z6LS") 39 | } 40 | -------------------------------------------------------------------------------- /src/auth/jwt/tests.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | #[derive(Serialize, Deserialize, Debug)] 4 | struct TestClaims { 5 | pub iss: String, 6 | pub sub: String, 7 | pub aud: String, 8 | pub iat: u64, 9 | pub exp: u64, 10 | } 11 | 12 | impl JwtClaims for TestClaims { 13 | fn is_valid(&self) -> bool { 14 | true 15 | } 16 | } 17 | 18 | impl JwtVerifierByIssuer for TestClaims { 19 | fn get_iss(&self) -> &str { 20 | &self.iss 21 | } 22 | } 23 | 24 | #[derive(Serialize, Deserialize, Debug)] 25 | struct TestInviteKeyMockClaims { 26 | pub iss: String, 27 | pub sub: String, 28 | pub aud: String, 29 | pub iat: u64, 30 | pub exp: u64, 31 | pub pkh: String, 32 | } 33 | 34 | impl JwtClaims for TestInviteKeyMockClaims { 35 | fn is_valid(&self) -> bool { 36 | true 37 | } 38 | } 39 | 40 | impl JwtVerifierByIssuer for TestInviteKeyMockClaims { 41 | fn get_iss(&self) -> &str { 42 | &self.iss 43 | } 44 | } 45 | 46 | /// Test that we can decode a JWT 47 | #[test] 48 | #[should_panic] 49 | fn jwt_new_should_panic_with_invalid_token() { 50 | Jwt::::new("1.2.3").unwrap(); 51 | } 52 | 53 | /// Test that we can verify a JWT. 54 | #[test] 55 | fn jwt_verify_success() { 56 | let payload = "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJkaWQ6a2V5Ono2TWtvZEhad25lVlJTaHRhTGY4SktZa3hwREdwMXZHWm5wR21kQnBYOE0yZXh4SCIsInN1YiI6ImM0NzlmZTVkYzQ2NGU3NzFlNzhiMTkzZDIzOWE2NWI1OGQyNzhjYWQxYzM0YmZiMGI1NzE2ZTViYjUxNDkyOGUiLCJhdWQiOiJ3c3M6Ly9yZWxheS53YWxsZXRjb25uZWN0LmNvbSIsImlhdCI6MTY1NjkxMDA5NywiZXhwIjoxNjU2OTk2NDk3fQ.bAKl1swvwqqV_FgwvD4Bx3Yp987B9gTpZctyBviA-EkAuWc8iI8SyokOjkv9GJESgid4U8Tf2foCgrQp2qrxBA"; 57 | let jwt = Jwt::::new(payload).unwrap(); 58 | assert!(jwt.verify().is_ok()); 59 | } 60 | 61 | #[test] 62 | fn jwt_verify_invite_key_success() { 63 | let payload = "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJkaWQ6a2V5Ono2TWt0bXRQY3JWdDZiQkRURFMzVVpOb3lMVGNNTTZjbVV2d1oyU0pVVGJtZlNaRyIsInN1YiI6ImQ0YzkyYWQ0MzA0YWVmOTJhMDM3MWRhZmUzMDFmOGU5YTg2NzQwNGVkM2EwNTM2NGY0NzM2ZDVkMTFhN2FjYzMiLCJhdWQiOiJodHRwczovL3N0YWdpbmcua2V5cy53YWxsZXRjb25uZWN0LmNvbSIsImlhdCI6MTY3NDc0MDQxOSwiZXhwIjoxNzYxMTQwNDE5LCJwa2giOiJkaWQ6cGtoOmVpcDE1NToxOjB4MmNGNjFEMTJhNzA3OGM3OTY1YjQ2NjRlMUM3NEI5ODNmMDNhODNCNiJ9.cjaoYZVsEAPN5oLlyPAHMLEMR7SIFOSLfin3APl8cPslIsx8h0XROA6Iz__dQo228DuE29G_iwaouzZptGgWDw"; 64 | let jwt = Jwt::::new(payload).unwrap(); 65 | assert!(jwt.verify().is_ok()); 66 | } 67 | 68 | /// Test that we can verify a JWT. 69 | #[test] 70 | fn jwt_verify_fail() { 71 | let payload = "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJkaWQ6a2V5Ono2TWtwSHlDalBqQWs5TmVGWlJuOFJGUVRiaGZ6TEs0Tm5ialJnTGNVVGdzU1RBQyIsInN1YiI6ImY3NjUyYWZiNmRjNGUwN2JmMWNlZTc2NzNkYTExMzI1M2U1NjcwNTJmZGVmZmFjYzdlOTQwNTZmMTQ3NDI1NzMiLCJhdWQiOiJodHRwOi8vMTAuMC4yLjI6ODA4MCIsImlhdCI6MTY3Mzk4NTg1MywiZXhwIjoxNjc0MDcyMjUzLCJwa2giOiJkaWQ6cGtoOmVpcDE1NToxOjB4ZTcyZjk4YWY3YmZlOWEzN2EwNmE2YmY2M2U2OTEyNTYzMTMxN2NlZCJ9.tIx08nEkoJ4M2VZ1uJI6SKSxKhZ31ANa7dXu_b07fXhmKYgujHEyyFk7Ge4OEIEtfH0wrLBOAbnpwEFY2JEwAQ"; 72 | let jwt = Jwt::::new(payload).unwrap(); 73 | assert!(jwt.verify().is_err()); 74 | } 75 | -------------------------------------------------------------------------------- /src/auth/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod did; 2 | pub mod jwt; 3 | pub mod public_key; 4 | -------------------------------------------------------------------------------- /src/auth/public_key.rs: -------------------------------------------------------------------------------- 1 | use { 2 | serde::{Deserialize, Serialize}, 3 | std::str::FromStr, 4 | }; 5 | 6 | pub const MULTICODEC_ED25519_BASE: &str = "z"; 7 | pub const MULTICODEC_ED25519_HEADER: [u8; 2] = [237, 1]; 8 | pub const MULTICODEC_ED25519_LENGTH: usize = 32; 9 | 10 | use { 11 | derive_more::{AsMut, AsRef}, 12 | thiserror::Error as ThisError, 13 | }; 14 | 15 | #[derive(Debug, ThisError)] 16 | pub enum PublicKeyDecodingError { 17 | #[error("Invalid issuer multicodec base")] 18 | Base, 19 | 20 | #[error("Invalid issuer base58")] 21 | Encoding, 22 | 23 | #[error("Invalid multicodec header")] 24 | Header, 25 | 26 | #[error("Invalid issuer pubkey length")] 27 | Length, 28 | } 29 | 30 | #[derive(Debug, Default, Clone, PartialEq, Eq, AsRef, AsMut, Serialize, Deserialize)] 31 | #[as_ref(forward)] 32 | #[as_mut(forward)] 33 | pub struct PublicKey(pub [u8; MULTICODEC_ED25519_LENGTH]); 34 | 35 | impl FromStr for PublicKey { 36 | type Err = PublicKeyDecodingError; 37 | 38 | fn from_str(val: &str) -> Result { 39 | const TOTAL_DECODED_LENGTH: usize = 40 | MULTICODEC_ED25519_HEADER.len() + MULTICODEC_ED25519_LENGTH; 41 | 42 | let stripped = val 43 | .strip_prefix(MULTICODEC_ED25519_BASE) 44 | .ok_or(PublicKeyDecodingError::Base)?; 45 | 46 | let mut decoded: [u8; TOTAL_DECODED_LENGTH] = [0; TOTAL_DECODED_LENGTH]; 47 | 48 | let decoded_len = bs58::decode(stripped) 49 | .into(&mut decoded) 50 | .map_err(|_| PublicKeyDecodingError::Encoding)?; 51 | 52 | if decoded_len != TOTAL_DECODED_LENGTH { 53 | return Err(PublicKeyDecodingError::Length); 54 | } 55 | 56 | let pub_key = decoded 57 | .strip_prefix(&MULTICODEC_ED25519_HEADER) 58 | .ok_or(PublicKeyDecodingError::Header)?; 59 | 60 | let mut data = Self::default(); 61 | data.0.copy_from_slice(pub_key); 62 | 63 | Ok(data) 64 | } 65 | } 66 | 67 | impl std::fmt::Display for PublicKey { 68 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 69 | const PREFIX_LEN: usize = MULTICODEC_ED25519_HEADER.len(); 70 | const TOTAL_LEN: usize = MULTICODEC_ED25519_LENGTH + PREFIX_LEN; 71 | 72 | let mut prefixed_data: [u8; TOTAL_LEN] = [0; TOTAL_LEN]; 73 | prefixed_data[..PREFIX_LEN].copy_from_slice(&MULTICODEC_ED25519_HEADER); 74 | prefixed_data[PREFIX_LEN..].copy_from_slice(&self.0); 75 | 76 | let encoded_data = bs58::encode(prefixed_data).into_string(); 77 | 78 | write!(f, "{MULTICODEC_ED25519_BASE}{encoded_data}") 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use {crate::error, relay_rpc::domain::ProjectId, serde::Deserialize, std::str::FromStr}; 2 | 3 | #[derive(Deserialize, Debug, Clone, Eq, PartialEq)] 4 | pub struct Configuration { 5 | #[serde(default = "default_port")] 6 | pub port: u16, 7 | #[serde(default = "default_log_level")] 8 | pub log_level: String, 9 | pub database_url: String, 10 | pub project_id: ProjectId, 11 | 12 | #[serde(default = "default_blockchain_api_endpoint")] 13 | pub blockchain_api_endpoint: Option, 14 | 15 | // Telemetry 16 | pub telemetry_enabled: Option, 17 | pub telemetry_grpc_url: Option, 18 | pub telemetry_prometheus_port: Option, 19 | 20 | // AWS 21 | pub s3_endpoint: Option, 22 | 23 | // GeoIP 24 | pub geoip_db_bucket: Option, 25 | pub geoip_db_key: Option, 26 | 27 | // GeoBlocking 28 | pub blocked_countries: Vec, 29 | } 30 | 31 | impl Configuration { 32 | pub fn new() -> error::Result { 33 | let config = envy::from_env::()?; 34 | Ok(config) 35 | } 36 | 37 | pub fn log_level(&self) -> tracing::Level { 38 | tracing::Level::from_str(self.log_level.as_str()).expect("Invalid log level") 39 | } 40 | } 41 | 42 | fn default_port() -> u16 { 43 | 8080 44 | } 45 | 46 | fn default_log_level() -> String { 47 | "WARN".to_string() 48 | } 49 | 50 | fn default_blockchain_api_endpoint() -> Option { 51 | Some("https://rpc.walletconnect.com".to_string()) 52 | } 53 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{auth, handlers::ResponseError, stores::StoreError}, 3 | axum::response::{IntoResponse, Response}, 4 | hyper::StatusCode, 5 | relay_rpc::auth::cacao::CacaoError, 6 | tracing::{error, info, warn}, 7 | }; 8 | 9 | pub type Result = std::result::Result; 10 | 11 | #[derive(Debug, thiserror::Error)] 12 | pub enum Error { 13 | #[error(transparent)] 14 | Anyhow(#[from] anyhow::Error), 15 | 16 | #[error(transparent)] 17 | Envy(#[from] envy::Error), 18 | 19 | #[error(transparent)] 20 | Trace(#[from] opentelemetry::trace::TraceError), 21 | 22 | #[error(transparent)] 23 | Metrics(#[from] opentelemetry::metrics::MetricsError), 24 | 25 | #[error(transparent)] 26 | Prometheus(#[from] prometheus_core::Error), 27 | 28 | #[error(transparent)] 29 | Database(#[from] wither::mongodb::error::Error), 30 | 31 | #[error(transparent)] 32 | Store(#[from] crate::stores::StoreError), 33 | 34 | #[error(transparent)] 35 | Validation(#[from] validator::ValidationErrors), 36 | 37 | #[error(transparent)] 38 | JwtVerification(#[from] auth::jwt::JwtError), 39 | 40 | #[error(transparent)] 41 | Did(#[from] auth::did::DidError), 42 | 43 | #[error(transparent)] 44 | Cacao(#[from] CacaoError), 45 | 46 | #[error("Blockchain API error: {0}")] 47 | BlockchainApi(blockchain_api::Error), 48 | 49 | #[error("IO Error: {0}")] 50 | Io(#[from] std::io::Error), 51 | } 52 | 53 | impl IntoResponse for Error { 54 | fn into_response(self) -> Response { 55 | info!("Responding with error: {self:?}"); 56 | let response = match &self { 57 | Error::Database(e) => crate::handlers::Response::new_failure( 58 | StatusCode::INTERNAL_SERVER_ERROR, 59 | ResponseError { 60 | name: "mongodb".to_string(), 61 | message: e.to_string(), 62 | }, 63 | ), 64 | Error::Store(e) => match e { 65 | StoreError::Database(e) => crate::handlers::Response::new_failure( 66 | StatusCode::INTERNAL_SERVER_ERROR, 67 | ResponseError { 68 | name: "mongodb".to_string(), 69 | message: e.to_string(), 70 | }, 71 | ), 72 | StoreError::NotFound(entity, id) => crate::handlers::Response::new_failure( 73 | StatusCode::NOT_FOUND, 74 | ResponseError { 75 | name: format!("{} not found", &entity), 76 | message: format!("Cannot find {} with specified identifier {}", entity, id), 77 | }, 78 | ), 79 | }, 80 | Error::Validation(e) => crate::handlers::Response::new_failure( 81 | StatusCode::BAD_REQUEST, 82 | ResponseError { 83 | name: "validation".to_string(), 84 | message: e.to_string(), 85 | }), 86 | Error::JwtVerification(e) => crate::handlers::Response::new_failure( 87 | StatusCode::BAD_REQUEST, 88 | ResponseError { 89 | name: "jwt_verification".to_string(), 90 | message: e.to_string(), 91 | }), 92 | Error::Did(e) => crate::handlers::Response::new_failure( 93 | StatusCode::BAD_REQUEST, 94 | ResponseError { 95 | name: "did".to_string(), 96 | message: e.to_string(), 97 | }), 98 | Error::Cacao(e) => crate::handlers::Response::new_failure( 99 | StatusCode::BAD_REQUEST, 100 | ResponseError { 101 | name: "cacao".to_string(), 102 | message: e.to_string(), 103 | }), 104 | _ => crate::handlers::Response::new_failure( 105 | StatusCode::INTERNAL_SERVER_ERROR, 106 | ResponseError { 107 | name: "unknown_error".to_string(), 108 | message: "This error should not have occurred. Please file an issue at: https://github.com/walletconnect/keys-server".to_string(), 109 | } 110 | ), 111 | }.into_response(); 112 | 113 | if response.status().is_client_error() { 114 | warn!("HTTP Client Error: {self:?}"); 115 | } 116 | 117 | if response.status().is_server_error() { 118 | error!("HTTP Server Error: {self:?}"); 119 | } 120 | 121 | response 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /src/handlers/health.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::state::AppState, 3 | axum::{extract::State, http::StatusCode, response::IntoResponse}, 4 | std::sync::Arc, 5 | }; 6 | 7 | pub async fn handler(State(state): State>) -> impl IntoResponse { 8 | ( 9 | StatusCode::OK, 10 | format!( 11 | "OK, {} v{}", 12 | state.build_info.crate_info.name, state.build_info.crate_info.version 13 | ), 14 | ) 15 | } 16 | -------------------------------------------------------------------------------- /src/handlers/identity/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod register; 2 | pub mod resolve; 3 | pub mod unregister; 4 | -------------------------------------------------------------------------------- /src/handlers/identity/register.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::super::{validate_caip10_account, validate_identity_key, Response}, 3 | crate::{ 4 | error::{self}, 5 | increment_counter, 6 | log::prelude::{info, warn}, 7 | state::AppState, 8 | }, 9 | axum::extract::{Json, State}, 10 | relay_rpc::auth::cacao::Cacao, 11 | serde::Deserialize, 12 | std::sync::Arc, 13 | validator::Validate, 14 | }; 15 | 16 | #[derive(Deserialize)] 17 | pub struct RegisterIdentityPayload { 18 | pub cacao: Cacao, 19 | } 20 | 21 | #[derive(Validate, Debug)] 22 | pub struct RegisterIdentityParams { 23 | #[validate(custom = "validate_caip10_account")] 24 | account: String, 25 | #[validate(custom = "validate_identity_key")] 26 | identity_key: String, 27 | cacao: Cacao, 28 | } 29 | 30 | pub async fn handler( 31 | State(state): State>, 32 | Json(payload): Json, 33 | ) -> error::Result { 34 | let cacao = payload.cacao.clone(); 35 | 36 | info!( 37 | "Handling - Register identity with cacao: {:?}", 38 | payload.cacao 39 | ); 40 | 41 | cacao 42 | .verify(state.provider.as_ref()) 43 | .await 44 | .map_err(|error| { 45 | increment_counter!(state.metrics, invalid_identity_register_cacao); 46 | info!( 47 | "Failure - Register identity with cacao: {:?}, error: {:?}", 48 | payload.cacao, error 49 | ); 50 | error 51 | })?; 52 | 53 | let identity_key = cacao.p.identity_key()?; 54 | let account = cacao.p.caip_10_address()?; 55 | let params = RegisterIdentityParams { 56 | account, 57 | identity_key, 58 | cacao, 59 | }; 60 | 61 | params.validate().map_err(|error| { 62 | info!( 63 | "Failure - Register identity with cacao: {:?}, error: {:?}", 64 | payload.cacao, error 65 | ); 66 | error 67 | })?; 68 | 69 | // Note to future: accounts can have both ERC-55 and lowercase variants, with duplicates. Make sure these are merged/treated as the same account 70 | // See for context: https://github.com/WalletConnect/keys-server/pull/173 71 | state 72 | .keys_persitent_storage 73 | .create_account_if_not_exists_and_add_identity_key( 74 | ¶ms.account, 75 | ¶ms.identity_key, 76 | ¶ms.cacao, 77 | ) 78 | .await 79 | .map_err(|error| { 80 | warn!( 81 | "Failure - Register identity with cacao: {:?}, error: {:?}", 82 | payload.cacao, error 83 | ); 84 | error 85 | })?; 86 | 87 | info!( 88 | "Success - Register identity with cacao: {:?}", 89 | payload.cacao 90 | ); 91 | increment_counter!(state.metrics, identity_register); 92 | 93 | Ok(Response::default()) 94 | } 95 | -------------------------------------------------------------------------------- /src/handlers/identity/resolve.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::super::Response, 3 | crate::{ 4 | error, 5 | handlers::validate_identity_key, 6 | increment_counter, 7 | log::prelude::{info, warn}, 8 | state::AppState, 9 | }, 10 | axum::extract::{Query, State}, 11 | http::StatusCode, 12 | relay_rpc::auth::cacao::Cacao, 13 | serde::{Deserialize, Serialize}, 14 | serde_json::{json, Value}, 15 | std::sync::Arc, 16 | tracing::instrument, 17 | validator::Validate, 18 | }; 19 | 20 | #[derive(Deserialize, Debug, Validate)] 21 | pub struct ResolveIdentityPayload { 22 | #[serde(rename = "publicKey")] 23 | #[validate(custom = "validate_identity_key")] 24 | public_key: String, 25 | } 26 | 27 | #[derive(Serialize)] 28 | pub struct ResolveIdentityResponse { 29 | cacao: Cacao, 30 | } 31 | 32 | impl From for Value { 33 | fn from(response: ResolveIdentityResponse) -> Self { 34 | json!(response) 35 | } 36 | } 37 | 38 | #[instrument(name = "resolve_handler", skip_all)] 39 | pub async fn handler( 40 | State(state): State>, 41 | Query(params): Query, 42 | ) -> error::Result { 43 | info!("Handling - Resolve identity with params: {:?}", params); 44 | 45 | info!("Timing - Validating params - Start"); 46 | params.validate().map_err(|error| { 47 | info!( 48 | "Failure - Resolve identity with params: {:?}, error: {:?}", 49 | params, error 50 | ); 51 | error 52 | })?; 53 | info!("Timing - Validating params - End"); 54 | 55 | info!("Timing - get_cacao_by_identity_key - Start"); 56 | let cacao = state 57 | .keys_persitent_storage 58 | .get_cacao_by_identity_key(¶ms.public_key) 59 | .await 60 | .map_err(|error| { 61 | warn!( 62 | "Failure - Resolve identity with params: {:?}, error: {:?}", 63 | params, error 64 | ); 65 | error 66 | })?; 67 | info!("Timing - get_cacao_by_identity_key - End"); 68 | 69 | let response = ResolveIdentityResponse { cacao }; 70 | 71 | info!("Success - Resolve identity with params: {:?}", params); 72 | increment_counter!(state.metrics, identity_resolved); 73 | info!("Incremented counter"); 74 | 75 | Ok(Response::new_success_with_value( 76 | StatusCode::OK, 77 | response.into(), 78 | )) 79 | } 80 | -------------------------------------------------------------------------------- /src/handlers/identity/unregister.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::super::{validate_caip10_account, validate_identity_key, Response}, 3 | crate::{ 4 | auth::{ 5 | did::{extract_did_data, DID_METHOD_KEY, DID_METHOD_PKH}, 6 | jwt::{Jwt, JwtClaims, JwtVerifierByIssuer}, 7 | }, 8 | error, increment_counter, 9 | log::prelude::{info, warn}, 10 | state::AppState, 11 | }, 12 | axum::{extract::State, Json}, 13 | serde::{Deserialize, Serialize}, 14 | std::sync::Arc, 15 | validator::Validate, 16 | }; 17 | 18 | #[derive(Deserialize)] 19 | pub struct UnregisterIdentityPayload { 20 | #[serde(rename = "idAuth")] 21 | id_auth: String, 22 | } 23 | 24 | #[derive(Validate, Debug)] 25 | pub struct UnregisterIdentityParams { 26 | #[validate(custom = "validate_caip10_account")] 27 | account: String, 28 | #[validate(custom = "validate_identity_key")] 29 | identity_key: String, 30 | } 31 | 32 | #[derive(Debug, Serialize, Deserialize)] 33 | pub struct UnregisterIdentityKeyClaims { 34 | aud: String, // keys server url used for registering 35 | exp: usize, // timestamp when jwt must expire TODO: Should be 1 hour 36 | iat: usize, // timestamp when jwt was issued 37 | iss: String, // public identity key in form of did:key, also used to verify jwt signature 38 | pkh: String, // corresponding blockchain account (did:pkh) 39 | act: String, // description of action intent. Must be equal to "unregister_identity" 40 | } 41 | 42 | impl JwtClaims for UnregisterIdentityKeyClaims { 43 | fn is_valid(&self) -> bool { 44 | // TODO: Add validation: 45 | // aud must be equal this dns? 46 | // exp must be in future 47 | // iat must be in past 48 | // iss must be valid did:key 49 | // pkh must be valid did:pkh 50 | self.act == "unregister_identity" 51 | } 52 | } 53 | 54 | impl JwtVerifierByIssuer for UnregisterIdentityKeyClaims { 55 | fn get_iss(&self) -> &str { 56 | &self.iss 57 | } 58 | } 59 | 60 | pub async fn handler( 61 | State(state): State>, 62 | Json(payload): Json, 63 | ) -> error::Result { 64 | info!( 65 | "Handling - Unregister identity with jwt: {:?}", 66 | payload.id_auth 67 | ); 68 | 69 | let jwt = Jwt::::new(&payload.id_auth).map_err(|error| { 70 | increment_counter!(state.metrics, invalid_identity_unregister_jwt); 71 | info!( 72 | "Failure - Unregister identity with jwt: {:?}, error: {:?}", 73 | payload.id_auth, error 74 | ); 75 | error 76 | })?; 77 | 78 | jwt.verify().map_err(|error| { 79 | increment_counter!(state.metrics, invalid_identity_unregister_jwt); 80 | info!( 81 | "Failure - Unregister identity with jwt: {:?}, error: {:?}", 82 | payload.id_auth, error 83 | ); 84 | error 85 | })?; 86 | 87 | let claims: UnregisterIdentityKeyClaims = jwt.claims; 88 | let account = extract_did_data(&claims.pkh, DID_METHOD_PKH)?; 89 | let identity_key = extract_did_data(&claims.iss, DID_METHOD_KEY)?; 90 | 91 | let params = UnregisterIdentityParams { 92 | account: account.to_string(), 93 | identity_key: identity_key.to_string(), 94 | }; 95 | 96 | params.validate().map_err(|error| { 97 | info!( 98 | "Failure - Unregister identity with jwt: {:?}, error: {:?}", 99 | payload.id_auth, error 100 | ); 101 | error 102 | })?; 103 | 104 | state 105 | .keys_persitent_storage 106 | .remove_identity_key(¶ms.account, ¶ms.identity_key) 107 | .await 108 | .map_err(|error| { 109 | warn!( 110 | "Failure - Unregister identity with jwt: {:?}, error: {:?}", 111 | payload.id_auth, error 112 | ); 113 | error 114 | })?; 115 | 116 | info!( 117 | "Success - Unregister identity with jwt: {:?}", 118 | payload.id_auth 119 | ); 120 | increment_counter!(state.metrics, identity_unregister); 121 | 122 | Ok(Response::default()) 123 | } 124 | -------------------------------------------------------------------------------- /src/handlers/invite/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::auth::{ 3 | did, 4 | jwt::{JwtClaims, JwtVerifierByIssuer}, 5 | }, 6 | serde::{Deserialize, Serialize}, 7 | }; 8 | 9 | pub mod register; 10 | pub mod resolve; 11 | pub mod unregister; 12 | 13 | #[derive(Debug, Serialize, Deserialize)] 14 | pub struct InviteKeyClaims { 15 | aud: String, // keys server url used for registering 16 | exp: usize, // timestamp when jwt must expire TODO: Should be 1 hour 17 | iat: usize, // timestamp when jwt was issued 18 | 19 | /// Public identity key in form of did:key according to the [Ed25519][1] 20 | /// 21 | /// [1]: https://w3c-ccg.github.io/did-method-key/#ed25519-x25519 22 | iss: String, 23 | 24 | /// Public key for chat invite key in form of did:key according to the 25 | /// [X25519][1] 26 | /// 27 | /// [1]: https://w3c-ccg.github.io/did-method-key/#x25519 28 | sub: String, 29 | 30 | pkh: String, // corresponding blockchain account (did:pkh) 31 | } 32 | 33 | impl JwtClaims for InviteKeyClaims { 34 | fn is_valid(&self) -> bool { 35 | // TODO: Add validation: 36 | // aud must be equal this dns? 37 | // exp must be in future 38 | // iat must be in past 39 | // iss must be valid did:key 40 | // pkh must be valid did:pkh 41 | 42 | did::validate_x25519(&self.sub) 43 | } 44 | } 45 | 46 | impl JwtVerifierByIssuer for InviteKeyClaims { 47 | fn get_iss(&self) -> &str { 48 | &self.iss 49 | } 50 | } 51 | 52 | #[cfg(test)] 53 | mod test_claims_validation { 54 | use super::{InviteKeyClaims, JwtClaims as _}; 55 | 56 | fn default() -> InviteKeyClaims { 57 | InviteKeyClaims { 58 | aud: String::new(), 59 | exp: 0, 60 | iat: 0, 61 | iss: String::new(), 62 | sub: String::new(), 63 | pkh: String::new(), 64 | } 65 | } 66 | 67 | #[test] 68 | fn fails_on_incorrect_claims() { 69 | let mut claims = default(); 70 | assert!(!claims.is_valid()); 71 | 72 | claims.sub = "ababagalamaga".to_string(); 73 | assert!(!claims.is_valid()); 74 | 75 | claims.sub = "did:key:zQ3shokFTS3brHcDQrn82RUDfCZESWL1ZdCEJwekUDPQiYBme".to_string(); 76 | assert!(!claims.is_valid()); 77 | 78 | claims.sub = "did:abc".to_string(); 79 | assert!(!claims.is_valid()); 80 | } 81 | 82 | #[test] 83 | fn succeeds_on_correct_claims() { 84 | let mut claims = default(); 85 | 86 | claims.sub = "did:key:z6LSeu9HkTHSfLLeUs2nnzUSNedgDUevfNQgQjQC23ZCit6F".to_string(); 87 | assert!(claims.is_valid()); 88 | 89 | claims.sub = "did:key:z6LStiZsmxiK4odS4Sb6JmdRFuJ6e1SYP157gtiCyJKfrYha".to_string(); 90 | assert!(claims.is_valid()); 91 | 92 | claims.sub = "did:key:z6LSoMdmJz2Djah2P4L9taDmtqeJ6wwd2HhKZvNToBmvaczQ".to_string(); 93 | assert!(claims.is_valid()); 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/handlers/invite/register.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::{ 3 | super::{validate_caip10_account, Response}, 4 | InviteKeyClaims, 5 | }, 6 | crate::{ 7 | auth::{ 8 | did::{extract_did_data, DID_METHOD_PKH}, 9 | jwt::Jwt, 10 | }, 11 | error::{self}, 12 | increment_counter, 13 | log::prelude::{info, warn}, 14 | state::AppState, 15 | }, 16 | axum::extract::{Json, State}, 17 | serde::Deserialize, 18 | std::sync::Arc, 19 | validator::Validate, 20 | }; 21 | 22 | #[derive(Deserialize)] 23 | pub struct RegisterInviteKeyPayload { 24 | #[serde(rename = "idAuth")] 25 | id_auth: String, 26 | } 27 | 28 | #[derive(Validate, Debug)] 29 | pub struct RegisterInviteKeyParams { 30 | #[validate(custom = "validate_caip10_account")] 31 | account: String, 32 | invite_key: String, 33 | } 34 | 35 | /// Registers invite key for given account. 36 | pub async fn handler( 37 | State(state): State>, 38 | Json(payload): Json, 39 | ) -> error::Result { 40 | info!("Handling - Register invite with jwt: {:?}", payload.id_auth); 41 | 42 | let jwt = Jwt::::new(&payload.id_auth).map_err(|error| { 43 | increment_counter!(state.metrics, invalid_invite_register_jwt); 44 | info!( 45 | "Failure - Register invite with jwt: {:?}, error: {:?}", 46 | payload.id_auth, error 47 | ); 48 | error 49 | })?; 50 | 51 | jwt.verify().map_err(|error| { 52 | increment_counter!(state.metrics, invalid_invite_register_jwt); 53 | info!( 54 | "Failure - Register invite with jwt: {:?}, error: {:?}", 55 | payload.id_auth, error 56 | ); 57 | error 58 | })?; 59 | 60 | let claims: InviteKeyClaims = jwt.claims; 61 | let account = extract_did_data(&claims.pkh, DID_METHOD_PKH)?; 62 | let params = RegisterInviteKeyParams { 63 | account: account.to_string(), 64 | invite_key: claims.sub, 65 | }; 66 | 67 | params.validate().map_err(|error| { 68 | info!( 69 | "Failure - Register invite with jwt: {:?}, error: {:?}", 70 | payload.id_auth, error 71 | ); 72 | error 73 | })?; 74 | 75 | state 76 | .keys_persitent_storage 77 | .upsert_invite_key(¶ms.account, ¶ms.invite_key) 78 | .await 79 | .map_err(|error| { 80 | warn!( 81 | "Failure - Register invite with jwt: {:?}, error: {:?}", 82 | payload.id_auth, error 83 | ); 84 | error 85 | })?; 86 | 87 | info!("Success - Register invite with jwt: {:?}", payload.id_auth); 88 | increment_counter!(state.metrics, invite_register); 89 | 90 | Ok(Response::default()) 91 | } 92 | -------------------------------------------------------------------------------- /src/handlers/invite/resolve.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::super::{validate_caip10_account, Response}, 3 | crate::{ 4 | error, increment_counter, 5 | log::prelude::{info, warn}, 6 | state::AppState, 7 | }, 8 | axum::extract::{Query, State}, 9 | http::StatusCode, 10 | serde::{Deserialize, Serialize}, 11 | serde_json::{json, Value}, 12 | std::sync::Arc, 13 | validator::Validate, 14 | }; 15 | 16 | #[derive(Deserialize)] 17 | pub struct ResolveInvitePayload { 18 | account: String, 19 | } 20 | 21 | #[derive(Validate)] 22 | pub struct ResolveInviteParams { 23 | #[validate(custom = "validate_caip10_account")] 24 | account: String, 25 | } 26 | 27 | #[derive(Serialize)] 28 | pub struct ResolveInviteResponse { 29 | #[serde(rename = "inviteKey")] 30 | invite_key: String, 31 | } 32 | 33 | impl From for Value { 34 | fn from(response: ResolveInviteResponse) -> Self { 35 | json!(response) 36 | } 37 | } 38 | 39 | pub async fn handler( 40 | State(state): State>, 41 | Query(payload): Query, 42 | ) -> error::Result { 43 | info!( 44 | "Handling - Resolve invite of account: {:?}", 45 | payload.account 46 | ); 47 | 48 | let params = ResolveInviteParams { 49 | account: payload.account.clone(), 50 | }; 51 | params.validate().map_err(|error| { 52 | info!( 53 | "Failure - Resolve invite of account: {:?}, error: {:?}", 54 | payload.account, error 55 | ); 56 | error 57 | })?; 58 | 59 | let invite_key = state 60 | .keys_persitent_storage 61 | .retrieve_invite_key(¶ms.account) 62 | .await 63 | .map_err(|error| { 64 | warn!( 65 | "Failure - Resolve invite of account: {:?}, error: {:?}", 66 | payload.account, error 67 | ); 68 | error 69 | })?; 70 | 71 | let response = ResolveInviteResponse { invite_key }; 72 | 73 | info!("Success - Resolve invite of account: {:?}", payload.account); 74 | increment_counter!(state.metrics, invite_resolved); 75 | Ok(Response::new_success_with_value( 76 | StatusCode::OK, 77 | response.into(), 78 | )) 79 | } 80 | -------------------------------------------------------------------------------- /src/handlers/invite/unregister.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::{ 3 | super::{validate_caip10_account, Response}, 4 | InviteKeyClaims, 5 | }, 6 | crate::{ 7 | auth::{ 8 | did::{extract_did_data, DID_METHOD_PKH}, 9 | jwt::Jwt, 10 | }, 11 | error::{self}, 12 | increment_counter, 13 | log::prelude::{info, warn}, 14 | state::AppState, 15 | }, 16 | axum::extract::{Json, State}, 17 | serde::Deserialize, 18 | std::sync::Arc, 19 | validator::Validate, 20 | }; 21 | 22 | #[derive(Deserialize)] 23 | pub struct UnregisterInviteKeyPayload { 24 | #[serde(rename = "idAuth")] 25 | id_auth: String, 26 | } 27 | 28 | #[derive(Validate)] 29 | pub struct UnregisterInviteKeyParams { 30 | #[validate(custom = "validate_caip10_account")] 31 | account: String, 32 | } 33 | 34 | /// Unsets invite key for given account. 35 | pub async fn handler( 36 | State(state): State>, 37 | Json(payload): Json, 38 | ) -> error::Result { 39 | info!( 40 | "Handling - Unregister invite with jwt: {:?}", 41 | payload.id_auth 42 | ); 43 | 44 | let jwt = Jwt::::new(&payload.id_auth).map_err(|error| { 45 | increment_counter!(state.metrics, invalid_identity_unregister_jwt); 46 | info!( 47 | "Failure - Unregister invite with jwt: {:?}, error: {:?}", 48 | payload.id_auth, error 49 | ); 50 | error 51 | })?; 52 | 53 | jwt.verify().map_err(|error| { 54 | increment_counter!(state.metrics, invalid_invite_unregister_jwt); 55 | info!( 56 | "Failure - Unregister invite with jwt: {:?}, error: {:?}", 57 | payload.id_auth, error 58 | ); 59 | error 60 | })?; 61 | 62 | let claims: InviteKeyClaims = jwt.claims; 63 | let account = extract_did_data(&claims.pkh, DID_METHOD_PKH)?; 64 | 65 | let params = UnregisterInviteKeyParams { 66 | account: account.to_string(), 67 | }; 68 | params.validate().map_err(|error| { 69 | warn!( 70 | "Failure - Unregister invite with jwt: {:?}, error: {:?}", 71 | payload.id_auth, error 72 | ); 73 | error 74 | })?; 75 | 76 | state 77 | .keys_persitent_storage 78 | .remove_invite_key(¶ms.account) 79 | .await 80 | .map_err(|error| { 81 | warn!( 82 | "Failure - Unregister invite with jwt: {:?}, error: {:?}", 83 | payload.id_auth, error 84 | ); 85 | error 86 | })?; 87 | 88 | info!( 89 | "Success - Unregister invite with jwt: {:?}", 90 | payload.id_auth 91 | ); 92 | increment_counter!(state.metrics, invite_unregister); 93 | 94 | Ok(Response::default()) 95 | } 96 | -------------------------------------------------------------------------------- /src/handlers/metrics.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{error::Result, state::AppState}, 3 | axum::{extract::State, http::StatusCode}, 4 | std::sync::Arc, 5 | }; 6 | 7 | pub async fn handler(State(state): State>) -> Result<(StatusCode, String)> { 8 | if let Some(metrics) = &state.metrics { 9 | let exported = metrics.export()?; 10 | 11 | Ok((StatusCode::OK, exported)) 12 | } else { 13 | // No Metrics! 14 | Ok(( 15 | StatusCode::INTERNAL_SERVER_ERROR, 16 | "Metrics not enabled.".to_string(), 17 | )) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/handlers/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | axum::{response::IntoResponse, Json}, 3 | hyper::StatusCode, 4 | serde_json::{json, Value}, 5 | validator::ValidationError, 6 | }; 7 | 8 | pub mod health; 9 | pub mod identity; 10 | pub mod invite; 11 | pub mod metrics; 12 | 13 | #[derive(serde::Serialize)] 14 | #[serde(rename_all = "UPPERCASE")] 15 | pub enum ResponseStatus { 16 | Success, 17 | Failure, 18 | } 19 | 20 | #[derive(serde::Serialize)] 21 | pub struct ResponseError { 22 | pub name: String, 23 | pub message: String, 24 | } 25 | 26 | #[derive(serde::Serialize)] 27 | pub struct Response { 28 | pub status: ResponseStatus, 29 | #[serde(skip_serializing)] 30 | pub status_code: StatusCode, 31 | pub error: Option, 32 | pub value: Option, 33 | } 34 | 35 | impl Response { 36 | pub fn new_success_with_value(status: StatusCode, value: Value) -> Self { 37 | Response { 38 | status: ResponseStatus::Success, 39 | status_code: status, 40 | error: None, 41 | value: Some(value), 42 | } 43 | } 44 | 45 | pub fn new_success(status: StatusCode) -> Self { 46 | Response { 47 | status: ResponseStatus::Success, 48 | status_code: status, 49 | error: None, 50 | value: None, 51 | } 52 | } 53 | 54 | pub fn new_failure(status: StatusCode, error: ResponseError) -> Self { 55 | Response { 56 | status: ResponseStatus::Failure, 57 | status_code: status, 58 | error: Some(error), 59 | value: None, 60 | } 61 | } 62 | } 63 | 64 | impl IntoResponse for Response { 65 | fn into_response(self) -> axum::response::Response { 66 | let status = self.status_code; 67 | let json: Json = self.into(); 68 | 69 | (status, json).into_response() 70 | } 71 | } 72 | 73 | impl From for Json { 74 | fn from(response: Response) -> Self { 75 | Json(json!(response)) 76 | } 77 | } 78 | 79 | impl Default for Response { 80 | fn default() -> Self { 81 | Response::new_success(StatusCode::OK) 82 | } 83 | } 84 | 85 | /// Minimum length of 5 characters as per CAIP-10 specs: https://github.com/ChainAgnostic/CAIPs/blob/master/CAIPs/caip-10.md 86 | fn validate_caip10_account(account: &str) -> Result<(), ValidationError> { 87 | if account.len() < 5 || account.len() > 168 { 88 | return Err(ValidationError::new("invalid lenght")); 89 | } 90 | 91 | Ok(()) 92 | } 93 | 94 | /// https://w3c-ccg.github.io/did-method-key/#ed25519-x25519 95 | fn validate_identity_key(identity_key: &str) -> Result<(), ValidationError> { 96 | if !identity_key.starts_with("z6Mk") { 97 | return Err(ValidationError::new("invalid prefix")); 98 | } 99 | 100 | Ok(()) 101 | } 102 | -------------------------------------------------------------------------------- /src/log/mod.rs: -------------------------------------------------------------------------------- 1 | //! This library serves as a thin, opinionated wrapper over the underlying 2 | //! logger apparatus. By default, this crate only exports the various macros, 3 | //! traits and types used in library logging. 4 | //! 5 | //! However, the top level binary may enable the "logger" feature to gain access 6 | //! to the machinery for initializing the global logger. 7 | //! 8 | //! There also some other utility functions that may be accessed by their 9 | //! feature gate. See the [features] section of Cargo.toml for more. 10 | pub use tracing::{debug, error, info, trace, warn}; 11 | use { 12 | opentelemetry::sdk::trace, 13 | opentelemetry_otlp::WithExportConfig, 14 | tracing_appender::non_blocking::WorkerGuard, 15 | tracing_subscriber::{prelude::*, EnvFilter}, 16 | }; 17 | 18 | pub mod prelude { 19 | //! Reexport of the most common macros and traits used for logging. 20 | //! 21 | //! Typically you may simply add `use log::prelude::*` and get access to all 22 | //! of the usual macros (info!, error!, debug!, etc). 23 | 24 | pub use tracing::{debug, error, info, trace, warn}; 25 | } 26 | 27 | /// The default log level for the stderr logger, which is used as a fallback if 28 | /// no other can be found. 29 | const DEFAULT_LOG_LEVEL_STDERR: tracing::Level = tracing::Level::WARN; 30 | 31 | /// The default log level for the telemetry logger, which is used as a fallback 32 | /// if no other can be found. 33 | const DEFAULT_LOG_LEVEL_OTEL: tracing::Level = tracing::Level::WARN; 34 | 35 | /// The environment variable used to control the stderr logger. 36 | const ENV_LOG_LEVEL_STDERR: &str = "LOG_LEVEL"; 37 | 38 | /// The environment variable used to control the telemetry logger. 39 | const ENV_LOG_LEVEL_OTEL: &str = "LOG_LEVEL_OTEL"; 40 | 41 | /// The endpoint for the OpenTelemetry gRPC collector, e.g. "localhost:4317". 42 | const OTEL_EXPORTER_OTLP_ENDPOINT: &str = "OTEL_EXPORTER_OTLP_ENDPOINT"; 43 | 44 | pub struct Logger { 45 | _guard: WorkerGuard, 46 | } 47 | 48 | impl Logger { 49 | pub fn init() -> crate::error::Result { 50 | let stderr_filter = EnvFilter::try_from_env(ENV_LOG_LEVEL_STDERR) 51 | .unwrap_or_else(|_| EnvFilter::new(DEFAULT_LOG_LEVEL_STDERR.to_string())); 52 | 53 | let (writer, guard) = tracing_appender::non_blocking(std::io::stderr()); 54 | 55 | let logger = tracing_subscriber::fmt::layer() 56 | .with_target(false) 57 | .with_ansi(atty::is(atty::Stream::Stderr)) 58 | .with_writer(writer) 59 | .with_filter(stderr_filter) 60 | .boxed(); 61 | 62 | let subscriber = tracing_subscriber::registry().with(logger); 63 | 64 | if std::env::var(OTEL_EXPORTER_OTLP_ENDPOINT).is_ok() { 65 | let telemetry = { 66 | let tracer = opentelemetry_otlp::new_pipeline() 67 | .tracing() 68 | .with_exporter(opentelemetry_otlp::new_exporter().tonic().with_env()) 69 | .with_trace_config( 70 | trace::config().with_id_generator(trace::XrayIdGenerator::default()), 71 | ) 72 | .install_batch(opentelemetry::runtime::Tokio)?; 73 | 74 | tracing_opentelemetry::layer() 75 | .with_tracer(tracer) 76 | .with_filter( 77 | EnvFilter::try_from_env(ENV_LOG_LEVEL_OTEL) 78 | .unwrap_or_else(|_| EnvFilter::new(DEFAULT_LOG_LEVEL_OTEL.to_string())), 79 | ) 80 | .boxed() 81 | }; 82 | 83 | subscriber.with(telemetry).init(); 84 | } else { 85 | subscriber.init(); 86 | }; 87 | 88 | Ok(Self { _guard: guard }) 89 | } 90 | 91 | pub fn stop(self) { 92 | // Consume self to trigger drop. 93 | } 94 | } 95 | 96 | impl Drop for Logger { 97 | fn drop(&mut self) { 98 | opentelemetry::global::shutdown_tracer_provider(); 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! increment_counter { 3 | ($state:ident$(.$property:ident)*, $metric:ident) => {{ 4 | use {opentelemetry::Context, tracing::debug}; 5 | 6 | if let Some(metrics) = &$state$(.$property)* { 7 | metrics.$metric.add(&Context::current(), 1, &[]); 8 | debug!("incremented `{}` counter", stringify!($metric)); 9 | } 10 | }}; 11 | } 12 | 13 | #[macro_export] 14 | macro_rules! increment_counter_with { 15 | ($state:ident$(.$property:ident)*, $metric:ident, $value:expr) => {{ 16 | use {opentelemetry::Context, tracing::debug}; 17 | 18 | if let Some(metrics) = &$state$(.$property)* { 19 | metrics.$metric.add(&Context::current(), $value, &[]); 20 | debug!("incremented `{}` counter", stringify!($metric)); 21 | } 22 | }}; 23 | } 24 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use { 2 | dotenv::dotenv, 3 | keyserver::{bootstrap, config::Configuration, error, log::Logger}, 4 | tokio::sync::broadcast, 5 | }; 6 | 7 | #[tokio::main] 8 | async fn main() -> error::Result<()> { 9 | let logger = Logger::init().expect("Failed to start logging"); 10 | let (_signal, shutdown) = broadcast::channel(1); 11 | dotenv().ok(); 12 | let config = Configuration::new().expect("Failed to load config!"); 13 | let result = bootstrap(shutdown, config).await; 14 | 15 | logger.stop(); 16 | 17 | result 18 | } 19 | -------------------------------------------------------------------------------- /src/metrics/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::error::{Error, Result}, 3 | opentelemetry::{ 4 | metrics::Counter, 5 | sdk::{ 6 | self, 7 | export::metrics::aggregation, 8 | metrics::{processors, selectors}, 9 | Resource, 10 | }, 11 | }, 12 | opentelemetry_prometheus::PrometheusExporter, 13 | prometheus_core::TextEncoder, 14 | }; 15 | 16 | #[derive(Clone)] 17 | pub struct Metrics { 18 | pub prometheus_exporter: PrometheusExporter, 19 | 20 | // Invite counters 21 | pub invite_register: Counter, 22 | pub invite_resolved: Counter, 23 | pub invite_unregister: Counter, 24 | 25 | // Identity counters 26 | pub identity_register: Counter, 27 | pub identity_resolved: Counter, 28 | pub identity_unregister: Counter, 29 | 30 | // Handlers params counters 31 | pub invalid_identity_register_cacao: Counter, 32 | pub invalid_identity_unregister_jwt: Counter, 33 | pub invalid_invite_register_jwt: Counter, 34 | pub invalid_invite_unregister_jwt: Counter, 35 | } 36 | 37 | impl Metrics { 38 | pub fn new(resource: Resource) -> Result { 39 | let controller = sdk::metrics::controllers::basic( 40 | processors::factory( 41 | selectors::simple::histogram(vec![]), 42 | aggregation::cumulative_temporality_selector(), 43 | ) 44 | .with_memory(true), 45 | ) 46 | .with_resource(resource) 47 | .build(); 48 | 49 | let prometheus_exporter = opentelemetry_prometheus::exporter(controller).init(); 50 | 51 | let meter = prometheus_exporter.meter_provider().unwrap(); 52 | 53 | opentelemetry::global::set_meter_provider(meter); 54 | 55 | let meter = opentelemetry::global::meter("keyserver"); 56 | 57 | let invite_register = meter 58 | .u64_counter("invite_register") 59 | .with_description("The number of invite keys registered") 60 | .init(); 61 | 62 | let invite_resolved = meter 63 | .u64_counter("invite_resolved") 64 | .with_description("The number of invite keys resolved") 65 | .init(); 66 | 67 | let invite_unregister = meter 68 | .u64_counter("invite_unregister") 69 | .with_description("The number of invite keys unregistered") 70 | .init(); 71 | 72 | let identity_register = meter 73 | .u64_counter("identity_register") 74 | .with_description("The number of identity keys registered") 75 | .init(); 76 | 77 | let identity_resolved = meter 78 | .u64_counter("identity_resolved") 79 | .with_description("The number of identity keys resolved") 80 | .init(); 81 | 82 | let identity_unregister = meter 83 | .u64_counter("identity_unregister") 84 | .with_description("The number of identity keys unregistered") 85 | .init(); 86 | 87 | let invalid_identity_register_cacao = meter 88 | .u64_counter("invalid_identity_register_cacao") 89 | .with_description("The number of invalid cacaos received for registering an identity") 90 | .init(); 91 | 92 | let invalid_identity_unregister_jwt = meter 93 | .u64_counter("invalid_identity_unregister_jwt") 94 | .with_description("The number of invalid jwt received for unregistering an identity") 95 | .init(); 96 | 97 | let invalid_invite_register_jwt = meter 98 | .u64_counter("invalid_invite_register_jwt") 99 | .with_description("The number of invalid jwt received for registering an invite") 100 | .init(); 101 | 102 | let invalid_invite_unregister_jwt = meter 103 | .u64_counter("invalid_invite_unregister_jwt") 104 | .with_description("The number of invalid jwt received for unregistering an invite") 105 | .init(); 106 | 107 | Ok(Metrics { 108 | prometheus_exporter, 109 | invite_register, 110 | invite_resolved, 111 | invite_unregister, 112 | identity_register, 113 | identity_resolved, 114 | identity_unregister, 115 | invalid_identity_register_cacao, 116 | invalid_identity_unregister_jwt, 117 | invalid_invite_register_jwt, 118 | invalid_invite_unregister_jwt, 119 | }) 120 | } 121 | 122 | pub fn export(&self) -> Result { 123 | let data = self.prometheus_exporter.registry().gather(); 124 | TextEncoder::new() 125 | .encode_to_string(&data) 126 | .map_err(Error::Prometheus) 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /src/state.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{config::Configuration, metrics::Metrics, stores::keys::KeysPersistentStorageArc}, 3 | blockchain_api::BlockchainApiProvider, 4 | build_info::BuildInfo, 5 | }; 6 | 7 | #[derive(Clone)] 8 | pub struct AppState { 9 | pub config: Configuration, 10 | pub build_info: BuildInfo, 11 | pub metrics: Option, 12 | pub keys_persitent_storage: KeysPersistentStorageArc, 13 | pub provider: Option, 14 | } 15 | 16 | build_info::build_info!(fn build_info); 17 | 18 | impl AppState { 19 | pub fn new( 20 | config: Configuration, 21 | keys_persitent_storage: KeysPersistentStorageArc, 22 | provider: Option, 23 | ) -> crate::error::Result { 24 | let build_info: &BuildInfo = build_info(); 25 | 26 | Ok(AppState { 27 | config, 28 | build_info: build_info.clone(), 29 | metrics: None, 30 | keys_persitent_storage, 31 | provider, 32 | }) 33 | } 34 | 35 | pub fn set_metrics(&mut self, metrics: Metrics) { 36 | self.metrics = Some(metrics); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/stores/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod keys; 2 | 3 | #[derive(Debug, thiserror::Error)] 4 | pub enum StoreError { 5 | /// Not found error, params are entity name and identifier 6 | #[error("Cannot find {0} with specified identifier {1}")] 7 | NotFound(String, String), 8 | 9 | #[error(transparent)] 10 | Database(#[from] wither::WitherError), 11 | } 12 | -------------------------------------------------------------------------------- /terraform/.terraform-docs.yml: -------------------------------------------------------------------------------- 1 | formatter: 'markdown table' 2 | 3 | recursive: 4 | enabled: true 5 | path: . 6 | 7 | output: 8 | file: README.md 9 | mode: inject 10 | template: |- 11 | 12 | {{ .Content }} 13 | 14 | 15 | content: | 16 | {{ .Header }} 17 | {{ .Requirements }} 18 | {{ .Providers }} 19 | {{ .Modules }} 20 | 21 | ## Inputs 22 | {{- $hideInputs := list "namespace" "region" "stage" "name" "delimiter" "attributes" "tags" "regex_replace_chars" "id_length_limit" "label_key_case" "label_value_case" "label_order" }} 23 | {{- $filteredInputs := list -}} 24 | {{- range .Module.Inputs -}} 25 | {{- if not (has .Name $hideInputs) -}} 26 | {{- $filteredInputs = append $filteredInputs . -}} 27 | {{- end -}} 28 | {{- end -}} 29 | {{ if not $filteredInputs }} 30 | 31 | No inputs. 32 | {{ else }} 33 | | Name | Description | Type | Default | Required | 34 | |------|-------------|------|---------|:--------:| 35 | {{- range $filteredInputs }} 36 | | {{ anchorNameMarkdown "input" .Name }} | {{ tostring .Description | sanitizeMarkdownTbl }} | {{ printf " " }}
{{ tostring .Type | sanitizeMarkdownTbl }}
| {{ printf " " }}
{{ .GetValue | sanitizeMarkdownTbl }}
| {{ printf " " }}{{ ternary .Required "yes" "no" }} | 37 | {{- end }} 38 | {{- end }} 39 | {{ .Outputs }} 40 | {{/** End of file fixer */}} 41 | -------------------------------------------------------------------------------- /terraform/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/alxrem/jsonnet" { 5 | version = "2.2.0" 6 | constraints = "~> 2.2.0" 7 | hashes = [ 8 | "h1:618oQ4FUqJKIihf/Tmxl3Tu9MsiuUpHYwn5BH79SJ0Y=", 9 | "zh:36d073bffcbdc47a3e3d5b19f5c511f38e4075026b467d98395d27436aeb0234", 10 | "zh:3e252ca26d6a6dbad61a10d3a9231daf0cf565d418efbd651d4e67afe1d6f500", 11 | "zh:3e275f0ff014e7d32b3cc7d655b14a1ba82781757f65830d4e5b6349a82d1062", 12 | "zh:42ddeed65087338ec73724e5b211157a804d9ab9ef6913cbb48e362d30c6b5c0", 13 | "zh:5034cd7aaa3f27d914813eb3fb9c344a4670f3226476123379d9ec95d8a5381f", 14 | "zh:6a0650d9f4302f0b6107612b149ea55c22eb9d19a1483e08dacb2ba22b5be5d3", 15 | "zh:97e9f0b32e33d33d109b5e751342a6ba5949165c23d8a88dd147a6b082fee109", 16 | "zh:a10faaf69352ee9ecb9a68a7b4ceea647f6a10840ecdf0b1a4edd59fe755d600", 17 | "zh:c6bb0612a6eb489b74aa74dc5ff740b601bbdf2335a29f87b571c19cd232a62d", 18 | "zh:d061a59d5c11e6e7b167d0cf6795cdf7de199c926fe4cc96edd434de71374c61", 19 | "zh:da49f78a7ec1f598c2e4b5c3f84c2785fdb210ef61cfd70ae6d48b03143a416b", 20 | "zh:e5bb54e1737c196dc6f8a585251f51fdd717fdc24a480816e1b86958693b993b", 21 | "zh:f10ef2044905b08d9ed7c74a8b778f41ce48e86afd62c4119ab54a80810b795a", 22 | "zh:f787d511f302d5714cb6349fae5f44053c14ebf6cb0435c733e7c822c929fe36", 23 | ] 24 | } 25 | 26 | provider "registry.terraform.io/grafana/grafana" { 27 | version = "2.7.0" 28 | constraints = "~> 2.0, >= 2.1.0" 29 | hashes = [ 30 | "h1:JtEAv6ZvXvn0KWcSSHwfXSP0YAhTG8GxxSrlneznMYg=", 31 | "zh:0733a30042ed9c30620a27527940af0406c5f5da40dad3d9babe05dd87487c4c", 32 | "zh:5316d67a1c4732cc7817177fe45fc79b6d91164b28c33f95157adbf6b79116fc", 33 | "zh:5583340bb558e6ea0b18f04b44a989f128f80dc0d00e3f4bd86d4665537d6f2a", 34 | "zh:5eeebad608f1302bb28685a6398cf3e1cd885cb59c078ee25add0d5f02ca36da", 35 | "zh:606fe1904a1e0326d2e203c18543ba3378ba8b50de960d76bf6cee8356362b1f", 36 | "zh:64447abf224592d68c630ad80652bec2d25f5e2ba622b6bdf290fe07f08606eb", 37 | "zh:7278fea42f571c0879639079016e8fb6bb9cf20e97d95badd1de4c1d932360e9", 38 | "zh:765af3d815dea711ad8b8078008ca5e7185fa784820f6a9c1649a1ffc1f09632", 39 | "zh:c402167af23ffb8aa84ff03fa525921061fba74d089445e2bd72941067aa4aa1", 40 | "zh:e231c62e803c4679973d88a78594817f84f712852d29b5e4a985ce0826415f48", 41 | "zh:e6e7f58509521c783248d17b9b9618940ac0ef142a1842b4a24f31a67d92b413", 42 | "zh:e80d2caf1a34cf9cb4023fec22774db6912bd9bc5c49d0a8e1c12a27f6d1f2d6", 43 | "zh:edd538b9afa164b05b58f374fec7ca47408e6b978288daaf3cf2cc5bfeb6ec3c", 44 | "zh:f2fd599ab5bbf50654d0a1c63fe247aa6fc872d1eb052f80d548b837493cb2b9", 45 | ] 46 | } 47 | 48 | provider "registry.terraform.io/hashicorp/aws" { 49 | version = "5.28.0" 50 | constraints = ">= 4.9.0, >= 4.50.0, >= 5.0.0, >= 5.7.0, ~> 5.7" 51 | hashes = [ 52 | "h1:xIGISViKIAzm5yJ9PZejQDDxwkVMwp1CSxINPP18Fc8=", 53 | "zh:062171f23f3e9d09dde4bdef4e2e1be6c10ce5392e5acb2d5674ca8d18e4efe2", 54 | "zh:081f9aa09f571a95334c13eb11f7dd9e421250e5c64b2005509638eee382ccd7", 55 | "zh:115f73d02f240f6626e9e4b4551dab9618a713cc238e0340155b9468b16da785", 56 | "zh:1372084815a5f2e795edc1020969401786ca9032a510e0543d1e048fd699c565", 57 | "zh:177a2fd380bec9fcda440d028fdf13db701d054ca637cdc860b70d62d3caafcf", 58 | "zh:18274cf43f8bb0a48da25a8f511020aa4a3052582be4e48eeff4c914c0e10a31", 59 | "zh:2f9d8e5b5375da4528e9ae437bbf93c2be91a50f814ca61046f3b2d16aabb3cb", 60 | "zh:565a4d9e124f118fef41bd2c82e9ae3ea7316821db8f3a03838f84af7db72efb", 61 | "zh:62f9f297c0ce50720e2380bd36fa1f27a210cfac08e993b0dcdb85ecf6559e07", 62 | "zh:8a185766ecd16752aff72260e55e3df28a3d7e4bf28e357fbf9c0460b7ed5b39", 63 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 64 | "zh:9b698d94915a5077d1c10a705b8d449f719eb87f25f6d46ff165b6bb9fb12778", 65 | "zh:ba2c2ad8f160d9f57eaefde2171cf35697e4912f15c5cafd0ef471d1f38531f9", 66 | "zh:d78d25aeed4851907817f6b281598ed853a60ca65c6bd711c8539ca3f55a841f", 67 | "zh:f743437743605727edcc77c02e3a60358c222311f7a3015e883601e4e4844c1e", 68 | ] 69 | } 70 | 71 | provider "registry.terraform.io/hashicorp/random" { 72 | version = "3.5.1" 73 | constraints = "~> 3.5, 3.5.1" 74 | hashes = [ 75 | "h1:IL9mSatmwov+e0+++YX2V6uel+dV6bn+fC/cnGDK3Ck=", 76 | "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64", 77 | "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d", 78 | "zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831", 79 | "zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3", 80 | "zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f", 81 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 82 | "zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b", 83 | "zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2", 84 | "zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865", 85 | "zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03", 86 | "zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602", 87 | "zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014", 88 | ] 89 | } 90 | -------------------------------------------------------------------------------- /terraform/.tflint.hcl: -------------------------------------------------------------------------------- 1 | config { 2 | format = "default" 3 | module = true 4 | } 5 | 6 | plugin "terraform" { 7 | enabled = true 8 | preset = "all" 9 | } 10 | 11 | plugin "aws" { 12 | enabled = true 13 | version = "0.18.0" 14 | source = "github.com/terraform-linters/tflint-ruleset-aws" 15 | } 16 | 17 | rule "terraform_workspace_remote" { 18 | enabled = false 19 | } 20 | -------------------------------------------------------------------------------- /terraform/README.md: -------------------------------------------------------------------------------- 1 | # Terraform Infrastructure 2 | 3 | Get yourself some AWS creds and then init your workspace: 4 | 5 | `terraform -chdir=terraform init -var-file="vars/dev.tfvars"` 6 | 7 | Use the dev workspace: 8 | 9 | `terraform -chdir=terraform workspace select dev` 10 | 11 | Now you can apply the changes: 12 | 13 | `terraform -chdir=terraform apply -var-file="vars/dev.tfvars"` 14 | 15 | 16 | 17 | ## Requirements 18 | 19 | | Name | Version | 20 | |------|---------| 21 | | [terraform](#requirement\_terraform) | >= 1.0 | 22 | | [aws](#requirement\_aws) | >= 5.7 | 23 | | [grafana](#requirement\_grafana) | >= 2.1 | 24 | | [random](#requirement\_random) | 3.5.1 | 25 | ## Providers 26 | 27 | | Name | Version | 28 | |------|---------| 29 | | [aws](#provider\_aws) | 5.17.0 | 30 | | [random](#provider\_random) | 3.5.1 | 31 | | [terraform](#provider\_terraform) | n/a | 32 | ## Modules 33 | 34 | | Name | Source | Version | 35 | |------|--------|---------| 36 | | [cloudwatch](#module\_cloudwatch) | ./cloudwatch | n/a | 37 | | [dns\_certificate](#module\_dns\_certificate) | app.terraform.io/wallet-connect/dns/aws | 0.1.3 | 38 | | [ecs](#module\_ecs) | ./ecs | n/a | 39 | | [keystore](#module\_keystore) | ./docdb | n/a | 40 | | [monitoring](#module\_monitoring) | ./monitoring | n/a | 41 | | [this](#module\_this) | app.terraform.io/wallet-connect/label/null | 0.3.2 | 42 | | [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | 5.1 | 43 | | [vpc\_endpoints](#module\_vpc\_endpoints) | terraform-aws-modules/vpc/aws//modules/vpc-endpoints | 5.1 | 44 | | [vpc\_flow\_s3\_bucket](#module\_vpc\_flow\_s3\_bucket) | terraform-aws-modules/s3-bucket/aws | ~> 3.14 | 45 | 46 | ## Inputs 47 | | Name | Description | Type | Default | Required | 48 | |------|-------------|------|---------|:--------:| 49 | | [betterstack\_cloudwatch\_webhook](#input\_betterstack\_cloudwatch\_webhook) | The BetterStack webhook to send CloudWatch alerts to |
string
|
n/a
| yes | 50 | | [betterstack\_prometheus\_webhook](#input\_betterstack\_prometheus\_webhook) | The BetterStack webhook to send Prometheus alerts to |
string
|
n/a
| yes | 51 | | [geoip\_db\_key](#input\_geoip\_db\_key) | The name to the GeoIP database |
string
|
"GeoLite2-City.mmdb"
| no | 52 | | [grafana\_auth](#input\_grafana\_auth) | The API Token for the Grafana instance |
string
|
""
| no | 53 | | [image\_version](#input\_image\_version) | The version of the image to deploy |
string
|
n/a
| yes | 54 | | [keystore\_primary\_instance\_class](#input\_keystore\_primary\_instance\_class) | The instance class of the primary docdb instances |
string
|
n/a
| yes | 55 | | [keystore\_primary\_instance\_count](#input\_keystore\_primary\_instance\_count) | The number of primary docdb instances to deploy |
number
|
n/a
| yes | 56 | | [keystore\_replica\_instance\_class](#input\_keystore\_replica\_instance\_class) | The instance class of the replica docdb instances |
string
|
n/a
| yes | 57 | | [keystore\_replica\_instance\_count](#input\_keystore\_replica\_instance\_count) | The number of replica docdb instances to deploy |
number
|
n/a
| yes | 58 | | [log\_level](#input\_log\_level) | Defines logging level for the application |
string
|
n/a
| yes | 59 | | [notification\_channels](#input\_notification\_channels) | The notification channels to send alerts to |
list(any)
|
[]
| no | 60 | | [ofac\_blocked\_countries](#input\_ofac\_blocked\_countries) | The list of countries to block |
string
|
""
| no | 61 | ## Outputs 62 | 63 | No outputs. 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /terraform/cloudwatch/README.md: -------------------------------------------------------------------------------- 1 | # `cloudwatch` module 2 | 3 | This module configures the cloudwatch alarms and webhook forwarding. 4 | 5 | 6 | 7 | ## Requirements 8 | 9 | | Name | Version | 10 | |------|---------| 11 | | [terraform](#requirement\_terraform) | ~> 1.0 | 12 | | [aws](#requirement\_aws) | ~> 5.7 | 13 | ## Providers 14 | 15 | | Name | Version | 16 | |------|---------| 17 | | [aws](#provider\_aws) | ~> 5.7 | 18 | ## Modules 19 | 20 | | Name | Source | Version | 21 | |------|--------|---------| 22 | | [cloudwatch](#module\_cloudwatch) | app.terraform.io/wallet-connect/cloudwatch-constants/aws | 1.0.0 | 23 | | [this](#module\_this) | app.terraform.io/wallet-connect/label/null | 0.3.2 | 24 | 25 | ## Inputs 26 | | Name | Description | Type | Default | Required | 27 | |------|-------------|------|---------|:--------:| 28 | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes and tags, which are merged. |
any
|
n/a
| yes | 29 | | [docdb\_cluster\_id](#input\_docdb\_cluster\_id) | The DocumentDB cluster ID |
string
|
n/a
| yes | 30 | | [docdb\_cpu\_threshold](#input\_docdb\_cpu\_threshold) | The DocumentDB CPU utilization alarm threshold in percents |
number
|
80
| no | 31 | | [docdb\_low\_memory\_throttling\_threshold](#input\_docdb\_low\_memory\_throttling\_threshold) | The DocumentDB low memory throttling alarm threshold in number of operations per period |
number
|
2
| no | 32 | | [docdb\_memory\_threshold](#input\_docdb\_memory\_threshold) | The DocumentDB available memory alarm threshold in GiB |
number
|
3
| no | 33 | | [ecs\_cluster\_name](#input\_ecs\_cluster\_name) | The name of the ECS cluster running the application |
string
|
n/a
| yes | 34 | | [ecs\_cpu\_threshold](#input\_ecs\_cpu\_threshold) | The ECS CPU utilization alarm threshold in percents |
number
|
80
| no | 35 | | [ecs\_memory\_threshold](#input\_ecs\_memory\_threshold) | The ECS memory utilization alarm threshold in percents |
number
|
80
| no | 36 | | [ecs\_service\_name](#input\_ecs\_service\_name) | The name of the ECS service running the application |
string
|
n/a
| yes | 37 | | [webhook\_url](#input\_webhook\_url) | The URL of the webhook to be called on alarms |
string
|
n/a
| yes | 38 | ## Outputs 39 | 40 | No outputs. 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /terraform/cloudwatch/alarms_docdb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "docdb_cpu_utilization" { 2 | alarm_name = "${local.alarm_prefix} - DocumentDB CPU Utilization" 3 | alarm_description = "${local.alarm_prefix} - DocumentDB CPU utilization is high (over ${var.docdb_cpu_threshold}%)" 4 | 5 | namespace = module.cloudwatch.namespaces.DocumentDB 6 | dimensions = { 7 | DBClusterIdentifier = var.docdb_cluster_id 8 | } 9 | metric_name = module.cloudwatch.metrics.DocumentDB.CPUUtilization 10 | 11 | evaluation_periods = local.evaluation_periods 12 | period = local.period 13 | 14 | statistic = module.cloudwatch.statistics.Average 15 | comparison_operator = module.cloudwatch.operators.GreaterThanOrEqualToThreshold 16 | threshold = var.docdb_cpu_threshold 17 | treat_missing_data = "breaching" 18 | 19 | alarm_actions = [aws_sns_topic.webhook.arn] 20 | insufficient_data_actions = [aws_sns_topic.webhook.arn] 21 | } 22 | 23 | resource "aws_cloudwatch_metric_alarm" "docdb_available_memory" { 24 | alarm_name = "${local.alarm_prefix} - DocumentDB Available Memory" 25 | alarm_description = "${local.alarm_prefix} - DocumentDB available memory is low (less than ${var.docdb_memory_threshold}GiB)" 26 | 27 | namespace = module.cloudwatch.namespaces.DocumentDB 28 | dimensions = { 29 | DBClusterIdentifier = var.docdb_cluster_id 30 | } 31 | metric_name = module.cloudwatch.metrics.DocumentDB.FreeableMemory 32 | 33 | evaluation_periods = local.evaluation_periods 34 | period = local.period 35 | 36 | statistic = module.cloudwatch.statistics.Average 37 | comparison_operator = module.cloudwatch.operators.LessThanOrEqualToThreshold 38 | threshold = var.docdb_memory_threshold * pow(1000, 3) 39 | treat_missing_data = "breaching" 40 | 41 | alarm_actions = [aws_sns_topic.webhook.arn] 42 | insufficient_data_actions = [aws_sns_topic.webhook.arn] 43 | } 44 | 45 | resource "aws_cloudwatch_metric_alarm" "docdb_low_memory_throttling" { 46 | alarm_name = "${local.alarm_prefix} - DocumentDB Low Memory Throttling" 47 | alarm_description = "${local.alarm_prefix} - DocumentDB is throttling operations due to low memory" 48 | 49 | namespace = module.cloudwatch.namespaces.DocumentDB 50 | dimensions = { 51 | DBClusterIdentifier = var.docdb_cluster_id 52 | } 53 | metric_name = module.cloudwatch.metrics.DocumentDB.LowMemNumOperationsThrottled 54 | 55 | evaluation_periods = local.evaluation_periods 56 | period = local.period 57 | 58 | statistic = module.cloudwatch.statistics.Maximum 59 | comparison_operator = module.cloudwatch.operators.GreaterThanThreshold 60 | threshold = var.docdb_low_memory_throttling_threshold 61 | treat_missing_data = "breaching" 62 | 63 | alarm_actions = [aws_sns_topic.webhook.arn] 64 | insufficient_data_actions = [aws_sns_topic.webhook.arn] 65 | } 66 | -------------------------------------------------------------------------------- /terraform/cloudwatch/alarms_ecs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "ecs_cpu_utilization" { 2 | alarm_name = "${local.alarm_prefix} - ECS CPU Utilization" 3 | alarm_description = "${local.alarm_prefix} - ECS CPU utilization is high (over ${var.ecs_cpu_threshold}%)" 4 | 5 | namespace = module.cloudwatch.namespaces.ECS 6 | dimensions = { 7 | ClusterName = var.ecs_cluster_name 8 | ServiceName = var.ecs_service_name 9 | } 10 | metric_name = module.cloudwatch.metrics.ECS.CPUUtilization 11 | 12 | evaluation_periods = local.evaluation_periods 13 | period = local.period 14 | 15 | statistic = module.cloudwatch.statistics.Average 16 | comparison_operator = module.cloudwatch.operators.GreaterThanOrEqualToThreshold 17 | threshold = var.ecs_cpu_threshold 18 | treat_missing_data = "breaching" 19 | 20 | alarm_actions = [aws_sns_topic.webhook.arn] 21 | insufficient_data_actions = [aws_sns_topic.webhook.arn] 22 | } 23 | 24 | resource "aws_cloudwatch_metric_alarm" "ecs_mem_utilization" { 25 | alarm_name = "${local.alarm_prefix} - ECS Memory Utilization" 26 | alarm_description = "${local.alarm_prefix} - ECS Memory utilization is high (over ${var.ecs_memory_threshold}%)" 27 | 28 | namespace = module.cloudwatch.namespaces.ECS 29 | dimensions = { 30 | ClusterName = var.ecs_cluster_name 31 | ServiceName = var.ecs_service_name 32 | } 33 | metric_name = module.cloudwatch.metrics.ECS.MemoryUtilization 34 | 35 | evaluation_periods = local.evaluation_periods 36 | period = local.period 37 | 38 | statistic = module.cloudwatch.statistics.Average 39 | comparison_operator = module.cloudwatch.operators.GreaterThanOrEqualToThreshold 40 | threshold = var.ecs_memory_threshold 41 | treat_missing_data = "breaching" 42 | 43 | alarm_actions = [aws_sns_topic.webhook.arn] 44 | insufficient_data_actions = [aws_sns_topic.webhook.arn] 45 | } 46 | -------------------------------------------------------------------------------- /terraform/cloudwatch/main.tf: -------------------------------------------------------------------------------- 1 | module "cloudwatch" { 2 | source = "app.terraform.io/wallet-connect/cloudwatch-constants/aws" 3 | version = "1.0.0" 4 | } 5 | 6 | locals { 7 | alarm_prefix = "${title(module.this.name)} - ${title(module.this.stage)}" 8 | evaluation_periods = 1 9 | period = 60 * 5 10 | } 11 | 12 | #tfsec:ignore:aws-sns-enable-topic-encryption 13 | resource "aws_sns_topic" "webhook" { 14 | name = "cloudwatch-webhook" 15 | display_name = "CloudWatch Webhook forwarding to BetterUptime" 16 | } 17 | 18 | resource "aws_sns_topic_subscription" "webhook" { 19 | endpoint = var.webhook_url 20 | protocol = "https" 21 | topic_arn = aws_sns_topic.webhook.arn 22 | } 23 | -------------------------------------------------------------------------------- /terraform/cloudwatch/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.7" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /terraform/cloudwatch/variables.tf: -------------------------------------------------------------------------------- 1 | variable "webhook_url" { 2 | description = "The URL of the webhook to be called on alarms" 3 | type = string 4 | } 5 | 6 | #------------------------------------------------------------------------------- 7 | # ECS 8 | 9 | variable "ecs_cluster_name" { 10 | description = "The name of the ECS cluster running the application" 11 | type = string 12 | } 13 | 14 | variable "ecs_service_name" { 15 | description = "The name of the ECS service running the application" 16 | type = string 17 | } 18 | 19 | variable "ecs_cpu_threshold" { 20 | description = "The ECS CPU utilization alarm threshold in percents" 21 | type = number 22 | default = 80 23 | } 24 | 25 | variable "ecs_memory_threshold" { 26 | description = "The ECS memory utilization alarm threshold in percents" 27 | type = number 28 | default = 80 29 | } 30 | 31 | #------------------------------------------------------------------------------- 32 | # DocumentDB 33 | 34 | variable "docdb_cluster_id" { 35 | description = "The DocumentDB cluster ID" 36 | type = string 37 | } 38 | 39 | variable "docdb_cpu_threshold" { 40 | description = "The DocumentDB CPU utilization alarm threshold in percents" 41 | type = number 42 | default = 80 43 | } 44 | 45 | variable "docdb_memory_threshold" { 46 | description = "The DocumentDB available memory alarm threshold in GiB" 47 | type = number 48 | default = 3 49 | } 50 | 51 | variable "docdb_low_memory_throttling_threshold" { 52 | description = "The DocumentDB low memory throttling alarm threshold in number of operations per period" 53 | type = number 54 | default = 2 55 | } 56 | -------------------------------------------------------------------------------- /terraform/context.tf: -------------------------------------------------------------------------------- 1 | module "this" { 2 | source = "app.terraform.io/wallet-connect/label/null" 3 | version = "0.3.2" 4 | 5 | namespace = "walletconnect" 6 | region = var.region 7 | stage = local.stage 8 | name = var.name 9 | 10 | tags = { 11 | Application = "keyserver" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /terraform/docdb/README.md: -------------------------------------------------------------------------------- 1 | # `docdb` module 2 | 3 | Creates a DocumentDB cluster with auto-scaled read replicas. 4 | 5 | 6 | 7 | ## Requirements 8 | 9 | | Name | Version | 10 | |------|---------| 11 | | [terraform](#requirement\_terraform) | ~> 1.0 | 12 | | [aws](#requirement\_aws) | ~> 5.7 | 13 | | [random](#requirement\_random) | ~> 3.5 | 14 | ## Providers 15 | 16 | | Name | Version | 17 | |------|---------| 18 | | [aws](#provider\_aws) | ~> 5.7 | 19 | | [random](#provider\_random) | ~> 3.5 | 20 | ## Modules 21 | 22 | | Name | Source | Version | 23 | |------|--------|---------| 24 | | [this](#module\_this) | app.terraform.io/wallet-connect/label/null | 0.3.2 | 25 | 26 | ## Inputs 27 | | Name | Description | Type | Default | Required | 28 | |------|-------------|------|---------|:--------:| 29 | | [allowed\_egress\_cidr\_blocks](#input\_allowed\_egress\_cidr\_blocks) | The CIDR blocks to allow egress to |
list(string)
|
n/a
| yes | 30 | | [allowed\_ingress\_cidr\_blocks](#input\_allowed\_ingress\_cidr\_blocks) | The CIDR blocks to allow ingress from |
list(string)
|
n/a
| yes | 31 | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes and tags, which are merged. |
any
|
n/a
| yes | 32 | | [db\_name](#input\_db\_name) | The name of the mongo database |
string
|
n/a
| yes | 33 | | [default\_database](#input\_default\_database) | The name of the default database to create |
string
|
n/a
| yes | 34 | | [port](#input\_port) | The port the mongo database will listen on |
number
|
27017
| no | 35 | | [primary\_instance\_class](#input\_primary\_instance\_class) | The instance class of the primary instances |
string
|
n/a
| yes | 36 | | [primary\_instance\_count](#input\_primary\_instance\_count) | The number of primary instances to create |
number
|
n/a
| yes | 37 | | [private\_subnet\_ids](#input\_private\_subnet\_ids) | The IDs of the private subnets to deploy to |
list(string)
|
n/a
| yes | 38 | | [replica\_instance\_class](#input\_replica\_instance\_class) | The instance class of the replica instances |
string
|
n/a
| yes | 39 | | [replica\_instance\_count](#input\_replica\_instance\_count) | The number of replica instances to create |
number
|
n/a
| yes | 40 | | [vpc\_id](#input\_vpc\_id) | The ID of the VPC to deploy to |
string
|
n/a
| yes | 41 | ## Outputs 42 | 43 | | Name | Description | 44 | |------|-------------| 45 | | [cluster\_id](#output\_cluster\_id) | The cluster identifier | 46 | | [connection\_url](#output\_connection\_url) | The connection url | 47 | | [endpoint](#output\_endpoint) | The connection endpoint | 48 | | [password](#output\_password) | The master password | 49 | | [port](#output\_port) | The connection port | 50 | | [username](#output\_username) | The master username | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /terraform/docdb/autoscaling.tf: -------------------------------------------------------------------------------- 1 | #module "docdb-autoscaling" { 2 | # source = "app.terraform.io/wallet-connect/docdb-autoscaling/aws" 3 | # version = "0.1.3" 4 | # context = module.this 5 | # 6 | # cluster_identifier = aws_docdb_cluster.main.id 7 | # min_capacity = 1 8 | # max_capacity = 6 9 | # 10 | # scale_out_policy = [{ 11 | # metric_name = "CPUUtilization" 12 | # target = 80 13 | # statistic = "Average" 14 | # cooldown = 900 15 | # }] 16 | # 17 | # scale_in_policy = [{ 18 | # metric_name = "CPUUtilization" 19 | # target = 20 20 | # statistic = "Average" 21 | # cooldown = 900 22 | # }] 23 | #} 24 | -------------------------------------------------------------------------------- /terraform/docdb/kms.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | master_password = aws_secretsmanager_secret_version.master_password.secret_string 3 | } 4 | 5 | resource "random_password" "master_password" { 6 | length = 16 7 | special = false 8 | } 9 | 10 | #tfsec:ignore:aws-ssm-secret-use-customer-key 11 | resource "aws_secretsmanager_secret" "master_password" { 12 | name = "${local.name_prefix}-master-password" 13 | } 14 | 15 | resource "aws_secretsmanager_secret_version" "master_password" { 16 | secret_id = aws_secretsmanager_secret.master_password.id 17 | secret_string = random_password.master_password.result 18 | } 19 | 20 | resource "aws_kms_key" "docdb_encryption" { 21 | enable_key_rotation = true 22 | } 23 | -------------------------------------------------------------------------------- /terraform/docdb/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name_prefix = replace("${module.this.stage}-${module.this.name}-${var.db_name}", "_", "-") 3 | } 4 | 5 | resource "aws_docdb_cluster" "main" { 6 | cluster_identifier = local.name_prefix 7 | master_username = "keyserver" 8 | master_password = local.master_password 9 | port = var.port 10 | db_subnet_group_name = aws_docdb_subnet_group.private_subnets.name 11 | storage_encrypted = true 12 | kms_key_id = aws_kms_key.docdb_encryption.arn 13 | enabled_cloudwatch_logs_exports = ["audit"] 14 | 15 | vpc_security_group_ids = [ 16 | aws_security_group.service_security_group.id 17 | ] 18 | skip_final_snapshot = true 19 | } 20 | 21 | #tfsec:ignore:aws-documentdb-encryption-customer-key 22 | resource "aws_docdb_cluster_instance" "primary" { 23 | count = var.primary_instance_count 24 | identifier = "${local.name_prefix}-primary-instance-${count.index}" 25 | cluster_identifier = aws_docdb_cluster.main.id 26 | instance_class = var.primary_instance_class 27 | promotion_tier = 0 28 | } 29 | 30 | #tfsec:ignore:aws-documentdb-encryption-customer-key 31 | resource "aws_docdb_cluster_instance" "replica" { 32 | count = var.replica_instance_count 33 | identifier = "${local.name_prefix}-replica-instance-${count.index}" 34 | cluster_identifier = aws_docdb_cluster.main.id 35 | instance_class = var.replica_instance_class 36 | promotion_tier = 1 37 | } 38 | -------------------------------------------------------------------------------- /terraform/docdb/network.tf: -------------------------------------------------------------------------------- 1 | resource "aws_docdb_subnet_group" "private_subnets" { 2 | name = "${local.name_prefix}-private-subnet-group" 3 | subnet_ids = var.private_subnet_ids 4 | } 5 | 6 | resource "aws_security_group" "service_security_group" { 7 | name = "${local.name_prefix}-service" 8 | description = "Allow ingress from the application" 9 | vpc_id = var.vpc_id 10 | 11 | ingress { 12 | description = "Allow inbound traffic to the DocDB cluster" 13 | from_port = 27017 14 | to_port = 27017 15 | protocol = "TCP" 16 | cidr_blocks = var.allowed_ingress_cidr_blocks 17 | } 18 | 19 | egress { 20 | description = "Allow outbound traffic from the DocDB cluster" 21 | from_port = 0 # Allowing any incoming port 22 | to_port = 0 # Allowing any outgoing port 23 | protocol = "-1" # Allowing any outgoing protocol 24 | cidr_blocks = var.allowed_egress_cidr_blocks 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /terraform/docdb/outputs.tf: -------------------------------------------------------------------------------- 1 | output "endpoint" { 2 | description = "The connection endpoint" 3 | value = aws_docdb_cluster.main.endpoint 4 | } 5 | 6 | output "username" { 7 | description = "The master username" 8 | value = aws_docdb_cluster.main.master_username 9 | } 10 | 11 | output "password" { 12 | description = "The master password" 13 | value = aws_docdb_cluster.main.master_password 14 | } 15 | 16 | output "port" { 17 | description = "The connection port" 18 | value = aws_docdb_cluster.main.port 19 | } 20 | 21 | output "connection_url" { 22 | description = "The connection url" 23 | value = "mongodb://${aws_docdb_cluster.main.master_username}:${aws_docdb_cluster.main.master_password}@${aws_docdb_cluster.main.endpoint}:${aws_docdb_cluster.main.port}/${var.default_database}?tls=true&tlsCaFile=rds-combined-ca-bundle.pem&tlsAllowInvalidCertificates=true&replicaSet=rs0&readPreference=secondaryPreferred&retryWrites=false&minPoolSize=32&maxPoolSize=256&maxIdleTimeMS=30000&connectTimeoutMS=30000" 24 | } 25 | 26 | output "cluster_id" { 27 | description = "The cluster identifier" 28 | value = aws_docdb_cluster.main.cluster_identifier 29 | } 30 | -------------------------------------------------------------------------------- /terraform/docdb/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.7" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "~> 3.5" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terraform/docdb/variables.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------- 2 | # DocDB Cluster 3 | 4 | variable "db_name" { 5 | description = "The name of the mongo database" 6 | type = string 7 | } 8 | 9 | variable "port" { 10 | description = "The port the mongo database will listen on" 11 | type = number 12 | default = 27017 13 | } 14 | 15 | variable "default_database" { 16 | description = "The name of the default database to create" 17 | type = string 18 | } 19 | 20 | variable "primary_instance_count" { 21 | description = "The number of primary instances to create" 22 | type = number 23 | } 24 | 25 | variable "primary_instance_class" { 26 | description = "The instance class of the primary instances" 27 | type = string 28 | } 29 | 30 | variable "replica_instance_count" { 31 | description = "The number of replica instances to create" 32 | type = number 33 | } 34 | 35 | variable "replica_instance_class" { 36 | description = "The instance class of the replica instances" 37 | type = string 38 | } 39 | 40 | #------------------------------------------------------------------------------- 41 | # Networking 42 | 43 | variable "vpc_id" { 44 | description = "The ID of the VPC to deploy to" 45 | type = string 46 | } 47 | 48 | variable "private_subnet_ids" { 49 | description = "The IDs of the private subnets to deploy to" 50 | type = list(string) 51 | } 52 | 53 | variable "allowed_ingress_cidr_blocks" { 54 | description = "The CIDR blocks to allow ingress from" 55 | type = list(string) 56 | } 57 | 58 | variable "allowed_egress_cidr_blocks" { 59 | description = "The CIDR blocks to allow egress to" 60 | type = list(string) 61 | } 62 | -------------------------------------------------------------------------------- /terraform/ecs/cluster_autoscaling.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | autoscaling_min_capacity = module.this.stage == "prod" ? var.autoscaling_min_capacity : 1 3 | } 4 | 5 | resource "aws_iam_role" "ecs_autoscaling_role" { 6 | name = "${module.this.name}-ecs-scale-application" 7 | 8 | assume_role_policy = jsonencode({ 9 | Version = "2012-10-17" 10 | Statement = [ 11 | { 12 | Sid = "GrantAssumeRole" 13 | Action = "sts:AssumeRole" 14 | Effect = "Allow" 15 | Principal = { 16 | Service = "application-autoscaling.amazonaws.com" 17 | } 18 | }, 19 | ] 20 | }) 21 | } 22 | 23 | resource "aws_appautoscaling_target" "ecs_target" { 24 | min_capacity = local.autoscaling_min_capacity 25 | max_capacity = var.autoscaling_max_capacity 26 | resource_id = "service/${aws_ecs_cluster.app_cluster.name}/${aws_ecs_service.app_service.name}" 27 | scalable_dimension = "ecs:service:DesiredCount" 28 | service_namespace = "ecs" 29 | } 30 | 31 | resource "aws_appautoscaling_policy" "ecs_target_cpu" { 32 | name = "application-scaling-policy-cpu" 33 | policy_type = "TargetTrackingScaling" 34 | resource_id = aws_appautoscaling_target.ecs_target.resource_id 35 | scalable_dimension = aws_appautoscaling_target.ecs_target.scalable_dimension 36 | service_namespace = aws_appautoscaling_target.ecs_target.service_namespace 37 | 38 | target_tracking_scaling_policy_configuration { 39 | predefined_metric_specification { 40 | predefined_metric_type = "ECSServiceAverageCPUUtilization" 41 | } 42 | target_value = 80 43 | } 44 | depends_on = [aws_appautoscaling_target.ecs_target] 45 | } 46 | 47 | resource "aws_appautoscaling_policy" "ecs_target_memory" { 48 | name = "application-scaling-policy-memory" 49 | policy_type = "TargetTrackingScaling" 50 | resource_id = aws_appautoscaling_target.ecs_target.resource_id 51 | scalable_dimension = aws_appautoscaling_target.ecs_target.scalable_dimension 52 | service_namespace = aws_appautoscaling_target.ecs_target.service_namespace 53 | 54 | target_tracking_scaling_policy_configuration { 55 | predefined_metric_specification { 56 | predefined_metric_type = "ECSServiceAverageMemoryUtilization" 57 | } 58 | target_value = 80 59 | } 60 | depends_on = [aws_appautoscaling_target.ecs_target] 61 | } 62 | -------------------------------------------------------------------------------- /terraform/ecs/cluster_iam.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------- 2 | # Task execution role 3 | 4 | resource "aws_iam_role" "ecs_task_execution_role" { 5 | name = "${module.this.stage}_${module.this.name}_ecs_task_execution_role" 6 | assume_role_policy = data.aws_iam_policy_document.ecs_task_assume_role_policy.json 7 | } 8 | data "aws_iam_policy_document" "ecs_task_assume_role_policy" { 9 | statement { 10 | actions = ["sts:AssumeRole"] 11 | 12 | principals { 13 | type = "Service" 14 | identifiers = ["ecs-tasks.amazonaws.com"] 15 | } 16 | } 17 | } 18 | 19 | # GeoIP Bucket Access 20 | resource "aws_iam_policy" "geoip_bucket_access" { 21 | name = "${module.this.id}-geoip-bucket_access" 22 | path = "/" 23 | description = "Allows ${module.this.id} to read from ${var.geoip_db_bucket_name}" 24 | 25 | policy = jsonencode({ 26 | "Version" : "2012-10-17", 27 | "Statement" : [ 28 | { 29 | "Sid" : "ListObjectsInGeoipBucket", 30 | "Effect" : "Allow", 31 | "Action" : ["s3:ListBucket"], 32 | "Resource" : ["arn:aws:s3:::${var.geoip_db_bucket_name}"] 33 | }, 34 | { 35 | "Sid" : "AllObjectActionsInGeoipBucket", 36 | "Effect" : "Allow", 37 | "Action" : ["s3:CopyObject", "s3:GetObject", "s3:HeadObject"], 38 | "Resource" : ["arn:aws:s3:::${var.geoip_db_bucket_name}/*"] 39 | }, 40 | ] 41 | }) 42 | } 43 | resource "aws_iam_role_policy_attachment" "geoip_bucket_access" { 44 | role = aws_iam_role.ecs_task_execution_role.name 45 | policy_arn = aws_iam_policy.geoip_bucket_access.arn 46 | } 47 | 48 | 49 | resource "aws_iam_role_policy_attachment" "ecs_task_execution_role_policy" { 50 | role = aws_iam_role.ecs_task_execution_role.name 51 | policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" 52 | } 53 | 54 | resource "aws_iam_role_policy_attachment" "cloudwatch_write_policy" { 55 | role = aws_iam_role.ecs_task_execution_role.name 56 | policy_arn = "arn:aws:iam::aws:policy/CloudWatchLogsFullAccess" 57 | } 58 | 59 | resource "aws_iam_role_policy_attachment" "prometheus_write_policy" { 60 | role = aws_iam_role.ecs_task_execution_role.name 61 | policy_arn = "arn:aws:iam::aws:policy/AmazonPrometheusRemoteWriteAccess" 62 | } 63 | 64 | resource "aws_iam_role_policy_attachment" "ssm_read_only_policy" { 65 | role = aws_iam_role.ecs_task_execution_role.name 66 | policy_arn = "arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess" 67 | } 68 | 69 | resource "aws_iam_policy" "otel" { 70 | name = "${module.this.name}-otel" 71 | path = "/" 72 | policy = data.aws_iam_policy_document.otel.json 73 | } 74 | #tfsec:ignore:aws-iam-no-policy-wildcards 75 | data "aws_iam_policy_document" "otel" { 76 | statement { 77 | actions = [ 78 | "logs:PutLogEvents", 79 | "logs:CreateLogGroup", 80 | "logs:CreateLogStream", 81 | "logs:DescribeLogStreams", 82 | "logs:DescribeLogGroups", 83 | "ssm:GetParameters", 84 | ] 85 | resources = [ 86 | "*" 87 | ] 88 | } 89 | } 90 | resource "aws_iam_role_policy_attachment" "ecs_task_execution_fetch_ghcr_secret_policy" { 91 | role = aws_iam_role.ecs_task_execution_role.name 92 | policy_arn = aws_iam_policy.otel.arn 93 | } 94 | -------------------------------------------------------------------------------- /terraform/ecs/dns.tf: -------------------------------------------------------------------------------- 1 | # DNS Records 2 | resource "aws_route53_record" "dns_load_balancer" { 3 | for_each = var.route53_zones 4 | 5 | zone_id = each.key 6 | name = each.value 7 | type = "A" 8 | 9 | alias { 10 | name = aws_lb.load_balancer.dns_name 11 | zone_id = aws_lb.load_balancer.zone_id 12 | evaluate_target_health = true 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terraform/ecs/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_pet" "this" { 2 | length = 2 3 | } 4 | -------------------------------------------------------------------------------- /terraform/ecs/network.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | lb_name = substr(replace("${module.this.stage}-${module.this.name}-${random_pet.this.id}", "_", "-"), 0, 32) 3 | } 4 | 5 | #tfsec:ignore:aws-elb-drop-invalid-headers 6 | #tfsec:ignore:aws-elb-alb-not-public 7 | resource "aws_lb" "load_balancer" { 8 | name = local.lb_name 9 | load_balancer_type = "application" 10 | subnets = var.public_subnets 11 | 12 | security_groups = [aws_security_group.lb_ingress.id] 13 | 14 | lifecycle { 15 | create_before_destroy = true 16 | } 17 | } 18 | 19 | locals { 20 | main_certificate_key = keys(var.route53_zones_certificates)[0] 21 | main_certificate = var.route53_zones_certificates[local.main_certificate_key] 22 | additional_certificates = { for k, v in var.route53_zones_certificates : k => v if k != local.main_certificate_key } 23 | } 24 | 25 | resource "aws_lb_listener" "listener-https" { 26 | load_balancer_arn = aws_lb.load_balancer.arn 27 | port = "443" 28 | protocol = "HTTPS" 29 | certificate_arn = local.main_certificate 30 | ssl_policy = "ELBSecurityPolicy-TLS13-1-2-2021-06" 31 | 32 | default_action { 33 | type = "forward" 34 | target_group_arn = aws_lb_target_group.target_group.arn 35 | } 36 | 37 | lifecycle { 38 | create_before_destroy = true 39 | } 40 | } 41 | 42 | resource "aws_lb_listener_certificate" "listener-https" { 43 | for_each = local.additional_certificates 44 | listener_arn = aws_lb_listener.listener-https.arn 45 | certificate_arn = each.value 46 | } 47 | 48 | resource "aws_lb_listener" "listener-http" { 49 | load_balancer_arn = aws_lb.load_balancer.arn 50 | port = "80" 51 | protocol = "HTTP" 52 | 53 | default_action { 54 | type = "redirect" 55 | 56 | redirect { 57 | port = "443" 58 | protocol = "HTTPS" 59 | status_code = "HTTP_301" 60 | } 61 | } 62 | 63 | lifecycle { 64 | create_before_destroy = true 65 | } 66 | } 67 | 68 | resource "aws_lb_target_group" "target_group" { 69 | name = local.lb_name 70 | port = var.port 71 | protocol = "HTTP" 72 | target_type = "ip" 73 | vpc_id = var.vpc_id 74 | slow_start = 30 75 | 76 | health_check { 77 | protocol = "HTTP" 78 | path = "/health" # KeysServer's health path 79 | port = var.port 80 | interval = 15 81 | timeout = 10 82 | healthy_threshold = 2 83 | unhealthy_threshold = 2 84 | } 85 | 86 | lifecycle { 87 | create_before_destroy = true 88 | } 89 | } 90 | 91 | # Security Groups 92 | 93 | #tfsec:ignore:aws-ec2-no-public-ingress-sgr 94 | resource "aws_security_group" "lb_ingress" { 95 | name = "${local.lb_name}-lb-ingress" 96 | description = "Allow app port ingress from vpc" 97 | vpc_id = var.vpc_id 98 | 99 | ingress { 100 | from_port = 443 101 | to_port = 443 102 | protocol = "tcp" 103 | cidr_blocks = ["0.0.0.0/0"] 104 | description = "Allow HTTPS traffic from anywhere" 105 | } 106 | 107 | ingress { 108 | from_port = 80 109 | to_port = 80 110 | protocol = "tcp" 111 | cidr_blocks = ["0.0.0.0/0"] 112 | description = "Allow HTTP traffic from anywhere" 113 | } 114 | 115 | egress { 116 | from_port = 0 117 | to_port = 0 118 | protocol = "-1" 119 | cidr_blocks = [var.allowed_lb_ingress_cidr_blocks] 120 | description = "Allow traffic out to all VPC IP addresses" 121 | } 122 | 123 | lifecycle { 124 | create_before_destroy = true 125 | } 126 | } 127 | 128 | #tfsec:ignore:aws-ec2-no-public-egress-sgr 129 | resource "aws_security_group" "app_ingress" { 130 | name = "${local.lb_name}-app-ingress" 131 | description = "Allow app port ingress" 132 | vpc_id = var.vpc_id 133 | 134 | ingress { 135 | from_port = 0 136 | to_port = 0 137 | protocol = "-1" 138 | security_groups = [aws_security_group.lb_ingress.id] 139 | description = "Allow traffic from load balancer" 140 | } 141 | 142 | ingress { 143 | from_port = 0 144 | to_port = 0 145 | protocol = "-1" 146 | cidr_blocks = [var.allowed_app_ingress_cidr_blocks] 147 | description = "Allow traffic from allowed CIDR blocks" 148 | } 149 | 150 | egress { 151 | from_port = 0 152 | to_port = 0 153 | protocol = "-1" 154 | cidr_blocks = ["0.0.0.0/0"] 155 | description = "Allow traffic out to all IP addresses" 156 | } 157 | 158 | lifecycle { 159 | create_before_destroy = true 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /terraform/ecs/outputs.tf: -------------------------------------------------------------------------------- 1 | output "service_name" { 2 | description = "The name of the service" 3 | value = aws_ecs_service.app_service.name 4 | } 5 | 6 | output "service_security_group_id" { 7 | description = "The ID of the security group for the service" 8 | value = aws_security_group.app_ingress.id 9 | } 10 | 11 | output "target_group_arn" { 12 | description = "The ARN of the target group" 13 | value = aws_lb_target_group.target_group.arn 14 | } 15 | 16 | output "load_balancer_arn" { 17 | description = "The ARN of the load balancer" 18 | value = aws_lb.load_balancer.arn 19 | } 20 | 21 | output "load_balancer_arn_suffix" { 22 | description = "The ARN suffix of the load balancer" 23 | value = aws_lb.load_balancer.arn_suffix 24 | } 25 | 26 | output "ecs_cluster_name" { 27 | description = "The name of the ECS cluster" 28 | value = aws_ecs_cluster.app_cluster.name 29 | } 30 | 31 | output "ecs_service_name" { 32 | description = "The name of the ECS service" 33 | value = aws_ecs_service.app_service.name 34 | } 35 | 36 | output "ecs_task_family" { 37 | description = "The family of the task definition" 38 | value = aws_ecs_task_definition.app_task.family 39 | } 40 | -------------------------------------------------------------------------------- /terraform/ecs/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.7" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "3.5.1" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terraform/ecs/variables.tf: -------------------------------------------------------------------------------- 1 | #--------------------------------------- 2 | # Cluster 3 | 4 | variable "ecr_repository_url" { 5 | description = "The URL of the ECR repository where the app image is stored" 6 | type = string 7 | } 8 | 9 | variable "image_version" { 10 | description = "The version of the app image to deploy" 11 | type = string 12 | } 13 | 14 | variable "task_cpu" { 15 | description = "The number of CPU units to reserve for the container." 16 | type = number 17 | } 18 | 19 | variable "task_memory" { 20 | description = "The amount of memory (in MiB) to reserve for the container." 21 | type = number 22 | } 23 | 24 | variable "autoscaling_desired_count" { 25 | description = "Minimum number of instances in the autoscaling group" 26 | type = number 27 | default = 2 28 | } 29 | 30 | variable "autoscaling_min_capacity" { 31 | description = "Minimum number of instances in the autoscaling group" 32 | type = number 33 | default = 2 34 | } 35 | 36 | variable "autoscaling_max_capacity" { 37 | description = "Maximum number of instances in the autoscaling group" 38 | type = number 39 | default = 8 40 | } 41 | 42 | #--------------------------------------- 43 | # DNS 44 | 45 | variable "route53_zones" { 46 | description = "The FQDNs to use for the app" 47 | type = map(string) 48 | } 49 | 50 | variable "route53_zones_certificates" { 51 | description = "The ARNs of the ACM certificates to use for HTTPS" 52 | type = map(string) 53 | } 54 | 55 | #--------------------------------------- 56 | # Network 57 | 58 | variable "vpc_id" { 59 | description = "The ID of the VPC to deploy to" 60 | type = string 61 | } 62 | 63 | variable "public_subnets" { 64 | description = "The IDs of the public subnets to deploy to" 65 | type = list(string) 66 | } 67 | 68 | variable "private_subnets" { 69 | description = "The IDs of the private subnets to deploy to" 70 | type = list(string) 71 | } 72 | 73 | variable "allowed_app_ingress_cidr_blocks" { 74 | description = "A list of CIDR blocks to allow ingress access to the application." 75 | type = string 76 | } 77 | 78 | variable "allowed_lb_ingress_cidr_blocks" { 79 | description = "A list of CIDR blocks to allow ingress access to the load-balancer." 80 | type = string 81 | } 82 | #--------------------------------------- 83 | # Application 84 | 85 | variable "port" { 86 | description = "The port the app listens on" 87 | type = number 88 | } 89 | 90 | variable "keystore_addr" { 91 | description = "The address of the MongoDB instance to use for the persistent keystore" 92 | type = string 93 | } 94 | 95 | variable "log_level" { 96 | description = "Defines logging level for the application" 97 | type = string 98 | } 99 | 100 | variable "ofac_blocked_countries" { 101 | description = "The list of countries to block" 102 | type = string 103 | } 104 | 105 | variable "project_id" { 106 | description = "Project ID for Blockchain API" 107 | type = string 108 | } 109 | 110 | #--------------------------------------- 111 | # Monitoring 112 | 113 | variable "prometheus_endpoint" { 114 | description = "The endpoint of the Prometheus server to use for monitoring" 115 | type = string 116 | } 117 | 118 | #--------------------------------------- 119 | # GeoIP 120 | 121 | variable "geoip_db_bucket_name" { 122 | description = "The name of the S3 bucket where the GeoIP database is stored" 123 | type = string 124 | } 125 | 126 | variable "geoip_db_key" { 127 | description = "The key of the GeoIP database in the S3 bucket" 128 | type = string 129 | } 130 | -------------------------------------------------------------------------------- /terraform/inputs.tf: -------------------------------------------------------------------------------- 1 | data "terraform_remote_state" "org" { 2 | backend = "remote" 3 | config = { 4 | organization = "wallet-connect" 5 | workspaces = { 6 | name = "aws-org" 7 | } 8 | } 9 | } 10 | 11 | #data "terraform_remote_state" "datalake" { 12 | # backend = "remote" 13 | # config = { 14 | # organization = "wallet-connect" 15 | # workspaces = { 16 | # name = "data-lake-${module.this.stage}" 17 | # } 18 | # } 19 | #} 20 | 21 | data "terraform_remote_state" "infra_aws" { 22 | backend = "remote" 23 | config = { 24 | organization = "wallet-connect" 25 | workspaces = { 26 | name = "infra-aws" 27 | } 28 | } 29 | } 30 | 31 | data "terraform_remote_state" "monitoring" { 32 | backend = "remote" 33 | config = { 34 | organization = "wallet-connect" 35 | workspaces = { 36 | name = "monitoring" 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /terraform/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_pet" "this" { 2 | length = 2 3 | } 4 | 5 | locals { 6 | keystore_name = "keystore" 7 | ecr_repository_url = data.terraform_remote_state.org.outputs.accounts.wl.keys[local.stage].ecr-url 8 | 9 | stage = lookup({ 10 | "keyserver-wl-staging" = "staging", 11 | "keyserver-wl-prod" = "prod", 12 | "keyserver-staging" = "staging", 13 | "keyserver-prod" = "prod", 14 | "wl-staging" = "staging", 15 | "wl-prod" = "prod", 16 | "staging" = "staging", 17 | "prod" = "prod", 18 | }, terraform.workspace, terraform.workspace) 19 | } 20 | -------------------------------------------------------------------------------- /terraform/monitoring/README.md: -------------------------------------------------------------------------------- 1 | # `monitoring` module 2 | 3 | Configure the Grafana dashboards for the application 4 | 5 | 6 | 7 | ## Requirements 8 | 9 | | Name | Version | 10 | |------|---------| 11 | | [terraform](#requirement\_terraform) | >= 1.0 | 12 | | [grafana](#requirement\_grafana) | ~> 2.0 | 13 | | [jsonnet](#requirement\_jsonnet) | ~> 2.2.0 | 14 | ## Providers 15 | 16 | | Name | Version | 17 | |------|---------| 18 | | [grafana](#provider\_grafana) | ~> 2.0 | 19 | | [jsonnet](#provider\_jsonnet) | ~> 2.2.0 | 20 | ## Modules 21 | 22 | | Name | Source | Version | 23 | |------|--------|---------| 24 | | [monitoring-role](#module\_monitoring-role) | app.terraform.io/wallet-connect/monitoring-role/aws | 1.0.2 | 25 | | [this](#module\_this) | app.terraform.io/wallet-connect/label/null | 0.3.2 | 26 | 27 | ## Inputs 28 | | Name | Description | Type | Default | Required | 29 | |------|-------------|------|---------|:--------:| 30 | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes and tags, which are merged. |
any
|
n/a
| yes | 31 | | [ecs\_service\_name](#input\_ecs\_service\_name) | The name of the ECS service. |
string
|
n/a
| yes | 32 | | [ecs\_target\_group\_arn](#input\_ecs\_target\_group\_arn) | The ARN of the ECS LB target group. |
string
|
n/a
| yes | 33 | | [ecs\_task\_family](#input\_ecs\_task\_family) | The name of the ECS task family. |
string
|
n/a
| yes | 34 | | [keystore\_cluster\_id](#input\_keystore\_cluster\_id) | The ID of the keystore DocDB cluster. |
string
|
n/a
| yes | 35 | | [load\_balancer\_arn](#input\_load\_balancer\_arn) | The ARN of the load balancer. |
string
|
n/a
| yes | 36 | | [monitoring\_role\_arn](#input\_monitoring\_role\_arn) | The ARN of the monitoring role. |
string
|
n/a
| yes | 37 | | [notification\_channels](#input\_notification\_channels) | The notification channels to send alerts to |
list(any)
|
n/a
| yes | 38 | | [prometheus\_endpoint](#input\_prometheus\_endpoint) | The endpoint for the Prometheus server. |
string
|
n/a
| yes | 39 | ## Outputs 40 | 41 | No outputs. 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /terraform/monitoring/dashboard.jsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import 'grafonnet-lib/grafana.libsonnet'; 2 | local panels = import 'panels/panels.libsonnet'; 3 | 4 | local dashboard = grafana.dashboard; 5 | local row = grafana.row; 6 | 7 | local ds = { 8 | prometheus: { 9 | type: 'prometheus', 10 | uid: std.extVar('prometheus_uid'), 11 | }, 12 | cloudwatch: { 13 | type: 'cloudwatch', 14 | uid: std.extVar('cloudwatch_uid'), 15 | } 16 | }; 17 | local vars = { 18 | namespace: 'Keys', 19 | environment: std.extVar('environment'), 20 | notifications: std.parseJson(std.extVar('notifications')), 21 | 22 | ecs_service_name: std.extVar('ecs_service_name'), 23 | ecs_task_family: std.extVar('ecs_task_family'), 24 | load_balancer: std.extVar('load_balancer'), 25 | target_group: std.extVar('target_group'), 26 | docdb_cluster_id: std.extVar('docdb_cluster_id'), 27 | }; 28 | 29 | //////////////////////////////////////////////////////////////////////////////// 30 | 31 | local height = 8; 32 | local pos = grafana.layout.pos(height); 33 | 34 | //////////////////////////////////////////////////////////////////////////////// 35 | 36 | dashboard.new( 37 | title = std.extVar('dashboard_title'), 38 | uid = std.extVar('dashboard_uid'), 39 | editable = true, 40 | graphTooltip = dashboard.graphTooltips.sharedCrosshair, 41 | ) 42 | .addAnnotation( 43 | grafana.annotation.new( 44 | target = { 45 | limit: 100, 46 | matchAny: false, 47 | tags: [], 48 | type: 'dashboard', 49 | }, 50 | ) 51 | ) 52 | .addPanels(grafana.layout.generate_grid([ 53 | row.new('Application'), 54 | panels.app.identity.resolved(ds, vars) { gridPos: pos._3 }, 55 | panels.app.identity.register(ds, vars) { gridPos: pos._3 }, 56 | panels.app.identity.unregister(ds, vars) { gridPos: pos._3 }, 57 | 58 | panels.app.identity.invalid_register_cacao(ds, vars) { gridPos: pos._2 }, 59 | panels.app.identity.invalid_unregister_jwt(ds, vars) { gridPos: pos._2 }, 60 | 61 | panels.app.invite.resolved(ds, vars) { gridPos: pos._3 }, 62 | panels.app.invite.register(ds, vars) { gridPos: pos._3 }, 63 | panels.app.invite.unregister(ds, vars) { gridPos: pos._3 }, 64 | 65 | panels.app.invite.invalid_register_jwt(ds, vars) { gridPos: pos._2 }, 66 | panels.app.invite.invalid_unregister_jwt(ds, vars) { gridPos: pos._2 }, 67 | 68 | row.new('ECS'), 69 | panels.ecs.cpu(ds, vars) { gridPos: pos._2 }, 70 | panels.ecs.memory(ds, vars) { gridPos: pos._2 }, 71 | 72 | row.new('Load Balancer'), 73 | panels.lb.active_connections(ds, vars) { gridPos: pos._2 }, 74 | panels.lb.healthy_hosts(ds, vars) { gridPos: pos._2 }, 75 | 76 | panels.lb.requests(ds, vars) { gridPos: pos._3 }, 77 | panels.lb.error_4xx(ds, vars) { gridPos: pos._3 }, 78 | panels.lb.error_5xx(ds, vars) { gridPos: pos._3 }, 79 | 80 | row.new('DocumentDB'), 81 | panels.docdb.cpu(ds, vars) { gridPos: pos._3 }, 82 | panels.docdb.available_memory(ds, vars) { gridPos: pos._3 }, 83 | panels.docdb.connections(ds, vars) { gridPos: pos._3 }, 84 | 85 | panels.docdb.low_mem_op_throttled(ds, vars) { gridPos: pos._3 }, 86 | panels.docdb.volume(ds, vars) { gridPos: pos._3 }, 87 | panels.docdb.buffer_cache_hit_ratio(ds, vars) { gridPos: pos._3 }, 88 | ])) 89 | -------------------------------------------------------------------------------- /terraform/monitoring/dashboard.tf: -------------------------------------------------------------------------------- 1 | data "jsonnet_file" "dashboard" { 2 | source = "${path.module}/dashboard.jsonnet" 3 | 4 | ext_str = { 5 | dashboard_title = "Keys-Server - ${title(module.this.stage)}" 6 | dashboard_uid = "keyserver-${module.this.stage}" 7 | 8 | prometheus_uid = grafana_data_source.prometheus.uid 9 | cloudwatch_uid = grafana_data_source.cloudwatch.uid 10 | 11 | environment = module.this.stage 12 | notifications = jsonencode(var.notification_channels) 13 | 14 | ecs_service_name = var.ecs_service_name 15 | ecs_task_family = var.ecs_task_family 16 | load_balancer = var.load_balancer_arn 17 | target_group = var.ecs_target_group_arn 18 | docdb_cluster_id = var.keystore_cluster_id 19 | } 20 | } 21 | 22 | resource "grafana_dashboard" "main" { 23 | overwrite = true 24 | message = "Updated by Terraform" 25 | config_json = data.jsonnet_file.dashboard.rendered 26 | } 27 | -------------------------------------------------------------------------------- /terraform/monitoring/data_sources.tf: -------------------------------------------------------------------------------- 1 | module "monitoring-role" { 2 | source = "app.terraform.io/wallet-connect/monitoring-role/aws" 3 | version = "1.0.2" 4 | context = module.this 5 | remote_role_arn = var.monitoring_role_arn 6 | } 7 | 8 | resource "grafana_data_source" "prometheus" { 9 | type = "prometheus" 10 | name = "${module.this.stage}-${module.this.name}-amp" 11 | url = var.prometheus_endpoint 12 | 13 | json_data_encoded = jsonencode({ 14 | httpMethod = "GET" 15 | sigV4Auth = true 16 | sigV4AuthType = "ec2_iam_role" 17 | sigV4Region = module.this.region 18 | sigV4AssumeRoleArn = module.monitoring-role.iam_role_arn 19 | }) 20 | 21 | depends_on = [module.monitoring-role] 22 | } 23 | 24 | resource "grafana_data_source" "cloudwatch" { 25 | type = "cloudwatch" 26 | name = "${module.this.stage}-${module.this.name}-cloudwatch" 27 | 28 | json_data_encoded = jsonencode({ 29 | defaultRegion = module.this.region 30 | assumeRoleArn = module.monitoring-role.iam_role_arn 31 | }) 32 | 33 | depends_on = [module.monitoring-role] 34 | } 35 | -------------------------------------------------------------------------------- /terraform/monitoring/outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reown-com/keys-server/b6afb9a4b91e85e21c3e0828cbf6d165886576ea/terraform/monitoring/outputs.tf -------------------------------------------------------------------------------- /terraform/monitoring/panels/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Requirements 4 | 5 | No requirements. 6 | ## Providers 7 | 8 | No providers. 9 | ## Modules 10 | 11 | No modules. 12 | 13 | ## Inputs 14 | 15 | No inputs. 16 | 17 | ## Outputs 18 | 19 | No outputs. 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/app_metric.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local panels = grafana.panels; 3 | local targets = grafana.targets; 4 | 5 | local defaults = import '../defaults.libsonnet'; 6 | 7 | { 8 | new(ds, vars, title, metric_name, metric_label):: 9 | panels.timeseries( 10 | title = title, 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.prometheus( 16 | legendFormat = metric_label, 17 | datasource = ds.prometheus, 18 | expr = 'sum(avg_over_time(increase(%s{aws_ecs_task_family="%s"}[$__rate_interval])[5m:30s]))' % [metric_name, vars.ecs_task_family], 19 | refId = metric_name, 20 | exemplar = true, 21 | )) 22 | } 23 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/identity/invalid_register_cacao.libsonnet: -------------------------------------------------------------------------------- 1 | local app_metric = import '../app_metric.libsonnet'; 2 | 3 | { 4 | new(ds, vars):: app_metric.new(ds, vars, 'Identity - Invalid CACAO during Registration', 'invalid_identity_register_cacao_total', 'Invalid CACAOs') 5 | } 6 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/identity/invalid_unregister_jwt.libsonnet: -------------------------------------------------------------------------------- 1 | local app_metric = import '../app_metric.libsonnet'; 2 | 3 | { 4 | new(ds, vars):: app_metric.new(ds, vars, 'Identity - Invalid JWT during Unregistration', 'invalid_identity_unregister_jwt_total', 'Invalid JWTs') 5 | } 6 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/identity/register.libsonnet: -------------------------------------------------------------------------------- 1 | local app_metric = import '../app_metric.libsonnet'; 2 | 3 | { 4 | new(ds, vars):: app_metric.new(ds, vars, 'Identity - Registrations', 'identity_register_total', 'Registrations') 5 | } 6 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/identity/resolved.libsonnet: -------------------------------------------------------------------------------- 1 | local app_metric = import '../app_metric.libsonnet'; 2 | 3 | { 4 | new(ds, vars):: app_metric.new(ds, vars, 'Identity - Resolutions', 'identity_resolved_total', 'Resolutions') 5 | } 6 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/identity/unregister.libsonnet: -------------------------------------------------------------------------------- 1 | local app_metric = import '../app_metric.libsonnet'; 2 | 3 | { 4 | new(ds, vars):: app_metric.new(ds, vars, 'Identity - Unregistrations', 'identity_unregister_total', 'Unregistrations') 5 | } 6 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/invite/invalid_register_jwt.libsonnet: -------------------------------------------------------------------------------- 1 | local app_metric = import '../app_metric.libsonnet'; 2 | 3 | { 4 | new(ds, vars):: app_metric.new(ds, vars, 'Invite - Invalid JWT during Registration', 'invalid_invite_register_jwt_total', 'Invalid JWTs') 5 | } 6 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/invite/invalid_unregister_jwt.libsonnet: -------------------------------------------------------------------------------- 1 | local app_metric = import '../app_metric.libsonnet'; 2 | 3 | { 4 | new(ds, vars):: app_metric.new(ds, vars, 'Invite - Invalid JWT during Unregistration', 'invalid_invite_unregister_jwt_total', 'Invalid JWTs') 5 | } 6 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/invite/register.libsonnet: -------------------------------------------------------------------------------- 1 | local app_metric = import '../app_metric.libsonnet'; 2 | 3 | { 4 | new(ds, vars):: app_metric.new(ds, vars, 'Invite - Registrations', 'invite_register_total', 'Registrations') 5 | } 6 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/invite/resolved.libsonnet: -------------------------------------------------------------------------------- 1 | local app_metric = import '../app_metric.libsonnet'; 2 | 3 | { 4 | new(ds, vars):: app_metric.new(ds, vars, 'Invite - Resolutions', 'invite_resolved_total', 'Resolutions') 5 | } 6 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/invite/unregister.libsonnet: -------------------------------------------------------------------------------- 1 | local app_metric = import '../app_metric.libsonnet'; 2 | 3 | { 4 | new(ds, vars):: app_metric.new(ds, vars, 'Invite - Unregistrations', 'invite_unregister_total', 'Unregistrations') 5 | } 6 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/defaults.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../grafonnet-lib/grafana.libsonnet'; 2 | 3 | { 4 | configuration:: { 5 | timeseries:: 6 | grafana.panels.timeseries().createConfiguration( 7 | scaleDistribution = { 8 | type : 'linear' 9 | }, 10 | stacking = { 11 | group: 'A', 12 | mode: 'none' 13 | }, 14 | legend = grafana.common.legend(), 15 | tooltip = grafana.common.tooltip(), 16 | ), 17 | 18 | timeseries_resource:: 19 | grafana.panels.timeseries().createConfiguration( 20 | axisSoftMin = 0, 21 | axisSoftMax = 100, 22 | thresholdsStyle = { 23 | mode: 'area', 24 | }, 25 | scaleDistribution = { 26 | type: 'linear' 27 | }, 28 | stacking = { 29 | group: 'A', 30 | mode: 'none' 31 | }, 32 | legend = grafana.common.legend(), 33 | tooltip = grafana.common.tooltip(), 34 | ) 35 | .addOverride(grafana.override.new( 36 | name = 'CPU_Max', 37 | properties = [{ 38 | id: 'color', 39 | value: { 40 | mode: 'fixed', 41 | fixedColor: 'dark-blue' 42 | } 43 | }], 44 | )) 45 | .addOverride(grafana.override.new( 46 | name = 'CPU_Avg', 47 | properties = [{ 48 | id: 'color', 49 | value: { 50 | mode: 'fixed', 51 | fixedColor: 'blue' 52 | } 53 | }], 54 | )) 55 | .addOverride(grafana.override.new( 56 | name = 'Mem_Max', 57 | properties = [{ 58 | id: 'color', 59 | value: { 60 | mode: 'fixed', 61 | fixedColor: 'dark-purple' 62 | } 63 | }], 64 | )) 65 | .addOverride(grafana.override.new( 66 | name = 'Mem_Avg', 67 | properties = [{ 68 | id: 'color', 69 | value: { 70 | mode: 'fixed', 71 | fixedColor: 'purple' 72 | } 73 | }], 74 | )) 75 | .addThreshold({ 76 | color : 'red', 77 | value : 50 78 | }), 79 | 80 | timeseries_tr80:: self.timeseries 81 | .addThreshold({ 82 | color : 'red', 83 | value : 80 84 | }), 85 | }, 86 | } 87 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/docdb/available_memory.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | 9 | local mem_threshold = 400000000; // 0.400GiB 10 | local max_memory = 16000000000; // 16GiB (AWS DocDB max on db.r6g.large) 11 | 12 | local _configuration = defaults.configuration.timeseries 13 | .withThresholdStyle('area') 14 | .setThresholds( 15 | baseColor = 'red', 16 | steps = [ 17 | { value: mem_threshold, color: 'green' }, 18 | ] 19 | ) 20 | .withUnit('decbytes') 21 | .addOverride(grafana.override.new( 22 | name = 'Mem_Min', 23 | properties = [{ 24 | id: 'color', 25 | value: { 26 | mode: 'fixed', 27 | fixedColor: 'dark-purple' 28 | } 29 | }], 30 | )) 31 | .addOverride(grafana.override.new( 32 | name = 'Mem_Avg', 33 | properties = [{ 34 | id: 'color', 35 | value: { 36 | mode: 'fixed', 37 | fixedColor: 'purple' 38 | } 39 | }], 40 | )) 41 | .withSoftLimit( 42 | axisSoftMin = 0, 43 | axisSoftMax = max_memory, 44 | ); 45 | 46 | 47 | local mem_alert(vars) = alert.new( 48 | namespace = vars.namespace, 49 | name = "%s DocumentDB Freeable Memory Alert" % vars.environment, 50 | message = "%s DocumentDB Freeable Memory" % vars.environment, 51 | period = '5m', 52 | frequency = '1m', 53 | notifications = vars.notifications, 54 | conditions = [ 55 | alertCondition.new( 56 | evaluatorParams = [ mem_threshold ], 57 | evaluatorType = 'lt', 58 | operatorType = 'and', 59 | queryRefId = 'Mem_Avg', 60 | queryTimeStart = '5m', 61 | queryTimeEnd = 'now', 62 | reducerType = 'min', 63 | ), 64 | ] 65 | ); 66 | 67 | { 68 | new(ds, vars):: 69 | panels.timeseries( 70 | title = 'Available Memory', 71 | datasource = ds.cloudwatch, 72 | ) 73 | .configure(_configuration) 74 | .addPanelThreshold( 75 | op = 'lt', 76 | value = mem_threshold, 77 | ) 78 | 79 | .setAlert(mem_alert(vars)) 80 | 81 | .addTarget(targets.cloudwatch( 82 | refId = 'Mem_Min', 83 | alias = 'Freeable Memory (Min)', 84 | datasource = ds.cloudwatch, 85 | namespace = 'AWS/DocDB', 86 | metricName = 'FreeableMemory', 87 | statistic = 'Minimum', 88 | dimensions = { 89 | DBClusterIdentifier: vars.docdb_cluster_id 90 | }, 91 | matchExact = true, 92 | )) 93 | .addTarget(targets.cloudwatch( 94 | refId = 'Mem_Avg', 95 | alias = 'Freeable Memory (Avg)', 96 | datasource = ds.cloudwatch, 97 | namespace = 'AWS/DocDB', 98 | metricName = 'FreeableMemory', 99 | statistic = 'Average', 100 | dimensions = { 101 | DBClusterIdentifier: vars.docdb_cluster_id 102 | }, 103 | matchExact = true, 104 | )) 105 | } 106 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/docdb/buffer_cache_hit_ratio.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local _configuration = defaults.configuration.timeseries 8 | .withUnit('percent') 9 | .withSoftLimit( 10 | axisSoftMin = 90, 11 | axisSoftMax = 100, 12 | ); 13 | 14 | { 15 | new(ds, vars):: 16 | panels.timeseries( 17 | title = 'DocumentDB Buffer Cache Hit Ratio', 18 | description = 'See https://docs.aws.amazon.com/documentdb/latest/developerguide/best_practices.html', 19 | datasource = ds.cloudwatch, 20 | ) 21 | .configure(_configuration) 22 | 23 | .addTarget(targets.cloudwatch( 24 | alias = 'Average Cache Hit %', 25 | datasource = ds.cloudwatch, 26 | namespace = 'AWS/DocDB', 27 | metricName = 'BufferCacheHitRatio', 28 | statistic = 'Average', 29 | dimensions = { 30 | DBClusterIdentifier: vars.docdb_cluster_id 31 | }, 32 | matchExact = true, 33 | )) 34 | } 35 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/docdb/connections.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Database Connections', 11 | datasource = ds.cloudwatch, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.cloudwatch( 16 | alias = 'Database Connections', 17 | datasource = ds.cloudwatch, 18 | namespace = 'AWS/DocDB', 19 | metricName = 'DatabaseConnections', 20 | statistic = 'Average', 21 | dimensions = { 22 | DBClusterIdentifier: vars.docdb_cluster_id 23 | }, 24 | matchExact = true, 25 | )) 26 | } 27 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/docdb/cpu.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | 9 | local cpu_alert(vars) = alert.new( 10 | namespace = vars.namespace, 11 | name = "%s DocumentDB CPU alert" % vars.environment, 12 | message = "%s DocumentDB CPU alert" % vars.environment, 13 | period = '5m', 14 | frequency = '1m', 15 | notifications = vars.notifications, 16 | conditions = [ 17 | alertCondition.new( 18 | evaluatorParams = [ 50 ], 19 | evaluatorType = 'gt', 20 | operatorType = 'or', 21 | queryRefId = 'CPU_Max', 22 | queryTimeStart = '5m', 23 | queryTimeEnd = 'now', 24 | reducerType = 'avg', 25 | ), 26 | ] 27 | ); 28 | 29 | { 30 | new(ds, vars):: 31 | panels.timeseries( 32 | title = 'CPU Utilization', 33 | datasource = ds.cloudwatch, 34 | ) 35 | .configure(defaults.configuration.timeseries_resource) 36 | .setAlert(cpu_alert(vars)) 37 | 38 | .addTarget(targets.cloudwatch( 39 | refId = 'CPU_Max', 40 | alias = 'CPU (Max)', 41 | datasource = ds.cloudwatch, 42 | namespace = 'AWS/DocDB', 43 | metricName = 'CPUUtilization', 44 | statistic = 'Maximum', 45 | dimensions = { 46 | DBClusterIdentifier: vars.docdb_cluster_id 47 | }, 48 | matchExact = true, 49 | )) 50 | .addTarget(targets.cloudwatch( 51 | refId = 'CPU_Avg', 52 | alias = 'CPU (Avg)', 53 | datasource = ds.cloudwatch, 54 | namespace = 'AWS/DocDB', 55 | metricName = 'CPUUtilization', 56 | statistic = 'Average', 57 | dimensions = { 58 | DBClusterIdentifier: vars.docdb_cluster_id 59 | }, 60 | matchExact = true, 61 | )) 62 | } 63 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/docdb/low_mem_op_throttled.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local alert = grafana.alert; 7 | local alertCondition = grafana.alertCondition; 8 | 9 | local ops_threshold = 2; 10 | 11 | local _configuration = defaults.configuration.timeseries 12 | .withSoftLimit( 13 | axisSoftMin = 0, 14 | axisSoftMax = 10, 15 | ) 16 | .withThresholdStyle('area') 17 | .addOverride(grafana.override.new( 18 | name = 'Ops_Max', 19 | properties = [{ 20 | id: 'color', 21 | value: { 22 | mode: 'fixed', 23 | fixedColor: 'red' 24 | } 25 | }], 26 | )) 27 | .addThreshold({ 28 | color : 'red', 29 | value : ops_threshold 30 | }); 31 | 32 | 33 | local ops_alert(vars) = alert.new( 34 | namespace = vars.namespace, 35 | name = "%s DocumentDB LowMem Num Operations Throttled Alert" % vars.environment, 36 | message = "%s DocumentDB LowMem Num Operations Throttled" % vars.environment, 37 | period = '5m', 38 | frequency = '1m', 39 | notifications = vars.notifications, 40 | conditions = [ 41 | alertCondition.new( 42 | evaluatorParams = [ ops_threshold ], 43 | evaluatorType = 'gt', 44 | operatorType = 'and', 45 | queryRefId = 'Ops_Max', 46 | queryTimeStart = '5m', 47 | queryTimeEnd = 'now', 48 | reducerType = 'max', 49 | ), 50 | ] 51 | ); 52 | 53 | { 54 | new(ds, vars):: 55 | panels.timeseries( 56 | title = 'LowMem Num Operations Throttled', 57 | datasource = ds.cloudwatch, 58 | ) 59 | .configure(_configuration) 60 | 61 | .setAlert(ops_alert(vars)) 62 | 63 | .addTarget(targets.cloudwatch( 64 | alias = 'LowMem Num Operations Throttled (Max)', 65 | datasource = ds.cloudwatch, 66 | dimensions = { 67 | DBClusterIdentifier: vars.docdb_cluster_id 68 | }, 69 | matchExact = true, 70 | namespace = 'AWS/DocDB', 71 | metricName = 'LowMemNumOperationsThrottled', 72 | statistic = 'Maximum', 73 | refId = 'Ops_Max', 74 | )) 75 | } 76 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/docdb/volume.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local _configuration = defaults.configuration.timeseries 8 | .withUnit('decbytes') 9 | .addThreshold({ 10 | color : 'red', 11 | value : 40000000000000, // 40TB, Max is 64TB. 12 | }) 13 | .withSpanNulls(true); 14 | 15 | { 16 | new(ds, vars):: 17 | panels.timeseries( 18 | title = 'DocumentDB Volume', 19 | description = 'Max is 64TB', 20 | datasource = ds.cloudwatch, 21 | ) 22 | .configure(_configuration) 23 | 24 | .addTarget(targets.cloudwatch( 25 | datasource = ds.cloudwatch, 26 | namespace = 'AWS/DocDB', 27 | metricName = 'VolumeBytesUsed', 28 | statistic = 'Maximum', 29 | dimensions = { 30 | DBClusterIdentifier: vars.docdb_cluster_id 31 | }, 32 | )) 33 | } 34 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/ecs/cpu.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | local overrides = defaults.overrides; 7 | 8 | { 9 | new(ds, vars):: 10 | panels.timeseries( 11 | title = 'CPU Utilization', 12 | datasource = ds.cloudwatch, 13 | ) 14 | .configure(overrides.cpu(defaults.configuration.timeseries_resource)) 15 | .setAlert(defaults.alerts.cpu( 16 | namespace = vars.namespace, 17 | title = 'ECS', 18 | env = vars.environment, 19 | notifications = vars.notifications, 20 | )) 21 | 22 | .addTarget(targets.cloudwatch( 23 | alias = 'CPU (Max)', 24 | datasource = ds.cloudwatch, 25 | dimensions = { 26 | ServiceName: vars.ecs_service_name 27 | }, 28 | metricName = 'CPUUtilization', 29 | namespace = 'AWS/ECS', 30 | statistic = 'Maximum', 31 | refId = 'CPU_Max', 32 | )) 33 | .addTarget(targets.cloudwatch( 34 | alias = 'CPU (Avg)', 35 | datasource = ds.cloudwatch, 36 | dimensions = { 37 | ServiceName: vars.ecs_service_name 38 | }, 39 | metricName = 'CPUUtilization', 40 | namespace = 'AWS/ECS', 41 | statistic = 'Average', 42 | refId = 'CPU_Avg', 43 | )) 44 | } 45 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/ecs/memory.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Memory Utilization', 11 | datasource = ds.cloudwatch, 12 | ) 13 | .configure(defaults.overrides.memory(defaults.configuration.timeseries_resource)) 14 | 15 | .setAlert(defaults.alerts.memory( 16 | namespace = vars.namespace, 17 | title = 'ECS', 18 | env = vars.environment, 19 | notifications = vars.notifications, 20 | )) 21 | 22 | .addTarget(targets.cloudwatch( 23 | alias = 'Memory (Max)', 24 | datasource = ds.cloudwatch, 25 | namespace = 'AWS/ECS', 26 | metricName = 'MemoryUtilization', 27 | dimensions = { 28 | ServiceName: vars.ecs_service_name 29 | }, 30 | statistic = 'Maximum', 31 | refId = 'Mem_Max', 32 | )) 33 | .addTarget(targets.cloudwatch( 34 | alias = 'Memory (Avg)', 35 | datasource = ds.cloudwatch, 36 | namespace = 'AWS/ECS', 37 | metricName = 'MemoryUtilization', 38 | dimensions = { 39 | ServiceName: vars.ecs_service_name 40 | }, 41 | statistic = 'Average', 42 | refId = 'Mem_Avg', 43 | )) 44 | } 45 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/active_connections.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Active Connections', 11 | datasource = ds.cloudwatch, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.cloudwatch( 16 | datasource = ds.cloudwatch, 17 | namespace = 'AWS/ApplicationELB', 18 | metricName = 'ActiveConnectionCount', 19 | dimensions = { 20 | LoadBalancer: vars.load_balancer 21 | }, 22 | statistic = 'Average', 23 | )) 24 | } 25 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/error_4xx.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local threshold = 100; 8 | 9 | local _configuration = defaults.configuration.timeseries 10 | .withSoftLimit( 11 | axisSoftMin = 0, 12 | axisSoftMax = threshold * 1.2, 13 | ) 14 | .withThresholdStyle(grafana.fieldConfig.thresholdStyle.dashed) 15 | .addThreshold({ 16 | color : defaults.values.colors.critical, 17 | value : threshold, 18 | }); 19 | 20 | { 21 | new(ds, vars):: 22 | panels.timeseries( 23 | title = '4XX', 24 | datasource = ds.cloudwatch, 25 | ) 26 | .configure( 27 | defaults.configuration.timeseries 28 | .withSoftLimit( 29 | axisSoftMin = 0, 30 | axisSoftMax = threshold * 1.2, 31 | ) 32 | .withThresholdStyle(grafana.fieldConfig.thresholdStyle.dashed) 33 | .addThreshold({ 34 | color : defaults.values.colors.critical, 35 | value : threshold, 36 | }) 37 | ) 38 | .addPanelThreshold( 39 | op = 'gt', 40 | value = threshold, 41 | ) 42 | 43 | .addTarget(targets.cloudwatch( 44 | alias = 'ELB', 45 | datasource = ds.cloudwatch, 46 | namespace = 'AWS/ApplicationELB', 47 | metricName = 'HTTPCode_ELB_4XX_Count', 48 | dimensions = { 49 | LoadBalancer: vars.load_balancer 50 | }, 51 | matchExact = true, 52 | statistic = 'Sum', 53 | refId = 'ELB', 54 | )) 55 | .addTarget(targets.cloudwatch( 56 | alias = 'Target', 57 | datasource = ds.cloudwatch, 58 | namespace = 'AWS/ApplicationELB', 59 | metricName = 'HTTPCode_Target_4XX_Count', 60 | dimensions = { 61 | LoadBalancer: vars.load_balancer 62 | }, 63 | matchExact = true, 64 | statistic = 'Sum', 65 | refId = 'Target', 66 | )) 67 | } 68 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/error_5xx.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local threshold = 5; 8 | 9 | local _configuration = defaults.configuration.timeseries 10 | .withSoftLimit( 11 | axisSoftMin = 0, 12 | axisSoftMax = threshold * 1.2, 13 | ) 14 | .withThresholdStyle(grafana.fieldConfig.thresholdStyle.dashed) 15 | .addThreshold({ 16 | color : defaults.values.colors.critical, 17 | value : threshold, 18 | }); 19 | 20 | { 21 | new(ds, vars):: 22 | panels.timeseries( 23 | title = '5XX', 24 | datasource = ds.cloudwatch, 25 | ) 26 | .configure(_configuration) 27 | .addPanelThreshold( 28 | op = 'gt', 29 | value = threshold, 30 | ) 31 | 32 | .setAlert( 33 | grafana.alert.new( 34 | namespace = vars.namespace, 35 | name = "%(env)s - 5XX alert" % { env: grafana.utils.strings.capitalize(vars.environment) }, 36 | message = '%(env)s - Notify - 5XX alert' % { env: grafana.utils.strings.capitalize(vars.environment) }, 37 | notifications = vars.notifications, 38 | noDataState = 'no_data', 39 | period = '0m', 40 | conditions = [ 41 | grafana.alertCondition.new( 42 | evaluatorParams = [ 5 ], 43 | evaluatorType = 'gt', 44 | operatorType = 'or', 45 | queryRefId = 'ELB', 46 | queryTimeStart = '5m', 47 | queryTimeEnd = 'now', 48 | reducerType = grafana.alert_reducers.Avg 49 | ), 50 | grafana.alertCondition.new( 51 | evaluatorParams = [ threshold ], 52 | evaluatorType = 'gt', 53 | operatorType = 'or', 54 | queryRefId = 'Target', 55 | queryTimeStart = '5m', 56 | queryTimeEnd = 'now', 57 | reducerType = grafana.alert_reducers.Avg 58 | ), 59 | ], 60 | ) 61 | ) 62 | 63 | .addTarget(targets.cloudwatch( 64 | alias = 'ELB', 65 | datasource = ds.cloudwatch, 66 | namespace = 'AWS/ApplicationELB', 67 | metricName = 'HTTPCode_ELB_5XX_Count', 68 | dimensions = { 69 | LoadBalancer: vars.load_balancer 70 | }, 71 | statistic = 'Sum', 72 | refId = 'ELB', 73 | )) 74 | .addTarget(targets.cloudwatch( 75 | alias = 'Target', 76 | datasource = ds.cloudwatch, 77 | namespace = 'AWS/ApplicationELB', 78 | metricName = 'HTTPCode_Target_5XX_Count', 79 | dimensions = { 80 | LoadBalancer: vars.load_balancer 81 | }, 82 | statistic = 'Sum', 83 | refId = 'Target', 84 | )) 85 | } 86 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/healthy_hosts.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local _configuration = defaults.configuration.timeseries 8 | .withSoftLimit( 9 | axisSoftMin = 0, 10 | axisSoftMax = 5, 11 | ); 12 | 13 | { 14 | new(ds, vars):: 15 | panels.timeseries( 16 | title = 'Healthy Hosts', 17 | datasource = ds.cloudwatch, 18 | ) 19 | .configure(_configuration) 20 | 21 | .addTarget(targets.cloudwatch( 22 | datasource = ds.cloudwatch, 23 | metricQueryType = grafana.target.cloudwatch.metricQueryTypes.query, 24 | 25 | dimensions = { 26 | TargetGroup: vars.target_group 27 | }, 28 | metricName = 'HealthyHostCount', 29 | namespace = 'AWS/ApplicationELB', 30 | sql = { 31 | from: { 32 | property: { 33 | name: "AWS/ApplicationELB", 34 | type: "string" 35 | }, 36 | type: "property" 37 | }, 38 | select: { 39 | name: "MAX", 40 | parameters: [ 41 | { 42 | name: "HealthyHostCount", 43 | type: "functionParameter" 44 | } 45 | ], 46 | type: "function" 47 | }, 48 | where: { 49 | expressions: [ 50 | { 51 | operator: { 52 | name: "=", 53 | value: vars.load_balancer 54 | }, 55 | property: { 56 | name: "LoadBalancer", 57 | type: "string" 58 | }, 59 | type: "operator" 60 | } 61 | ], 62 | type: "and" 63 | } 64 | }, 65 | sqlExpression = "SELECT MAX(HealthyHostCount) FROM \"AWS/ApplicationELB\" WHERE LoadBalancer = '%s'" % [vars.load_balancer], 66 | statistic = 'Maximum', 67 | )) 68 | } 69 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/requests.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Requests', 11 | datasource = ds.cloudwatch, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.cloudwatch( 16 | alias = 'Requests', 17 | datasource = ds.cloudwatch, 18 | namespace = 'AWS/ApplicationELB', 19 | metricName = 'RequestCount', 20 | dimensions = { 21 | LoadBalancer: vars.load_balancer 22 | }, 23 | matchExact = true, 24 | statistic = 'Sum', 25 | refId = 'Requests', 26 | )) 27 | } 28 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/panels.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | ecs: { 3 | cpu: (import 'ecs/cpu.libsonnet').new, 4 | memory: (import 'ecs/memory.libsonnet').new, 5 | }, 6 | 7 | app: { 8 | invite: { 9 | register: (import 'app/invite/register.libsonnet').new, 10 | resolved: (import 'app/invite/resolved.libsonnet').new, 11 | unregister: (import 'app/invite/unregister.libsonnet').new, 12 | invalid_register_jwt: (import 'app/invite/invalid_register_jwt.libsonnet').new, 13 | invalid_unregister_jwt: (import 'app/invite/invalid_unregister_jwt.libsonnet').new, 14 | }, 15 | identity: { 16 | register: (import 'app/identity/register.libsonnet').new, 17 | resolved: (import 'app/identity/resolved.libsonnet').new, 18 | unregister: (import 'app/identity/unregister.libsonnet').new, 19 | invalid_register_cacao: (import 'app/identity/invalid_register_cacao.libsonnet').new, 20 | invalid_unregister_jwt: (import 'app/identity/invalid_unregister_jwt.libsonnet').new, 21 | }, 22 | }, 23 | 24 | lb: { 25 | active_connections: (import 'lb/active_connections.libsonnet').new, 26 | error_4xx: (import 'lb/error_4xx.libsonnet').new, 27 | error_5xx: (import 'lb/error_5xx.libsonnet').new, 28 | healthy_hosts: (import 'lb/healthy_hosts.libsonnet').new, 29 | requests: (import 'lb/requests.libsonnet').new, 30 | }, 31 | 32 | docdb: { 33 | buffer_cache_hit_ratio: (import 'docdb/buffer_cache_hit_ratio.libsonnet').new, 34 | cpu: (import 'docdb/cpu.libsonnet').new, 35 | volume: (import 'docdb/volume.libsonnet').new, 36 | available_memory: (import 'docdb/available_memory.libsonnet').new, 37 | connections: (import 'docdb/connections.libsonnet').new, 38 | low_mem_op_throttled: (import 'docdb/low_mem_op_throttled.libsonnet').new, 39 | }, 40 | } 41 | -------------------------------------------------------------------------------- /terraform/monitoring/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | grafana = { 6 | source = "grafana/grafana" 7 | version = "~> 2.0" 8 | } 9 | jsonnet = { 10 | source = "alxrem/jsonnet" 11 | version = "~> 2.2.0" 12 | } 13 | } 14 | } 15 | 16 | #provider "jsonnet" { 17 | # jsonnet_path = "./grafonnet-lib,./panels" 18 | #} 19 | -------------------------------------------------------------------------------- /terraform/monitoring/variables.tf: -------------------------------------------------------------------------------- 1 | variable "notification_channels" { 2 | description = "The notification channels to send alerts to" 3 | type = list(any) 4 | } 5 | 6 | variable "prometheus_endpoint" { 7 | description = "The endpoint for the Prometheus server." 8 | type = string 9 | } 10 | 11 | variable "ecs_service_name" { 12 | description = "The name of the ECS service." 13 | type = string 14 | } 15 | 16 | variable "ecs_target_group_arn" { 17 | description = "The ARN of the ECS LB target group." 18 | type = string 19 | } 20 | 21 | variable "load_balancer_arn" { 22 | description = "The ARN of the load balancer." 23 | type = string 24 | } 25 | 26 | variable "keystore_cluster_id" { 27 | description = "The ID of the keystore DocDB cluster." 28 | type = string 29 | } 30 | 31 | variable "monitoring_role_arn" { 32 | description = "The ARN of the monitoring role." 33 | type = string 34 | } 35 | 36 | variable "ecs_task_family" { 37 | description = "The name of the ECS task family." 38 | type = string 39 | } 40 | -------------------------------------------------------------------------------- /terraform/outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reown-com/keys-server/b6afb9a4b91e85e21c3e0828cbf6d165886576ea/terraform/outputs.tf -------------------------------------------------------------------------------- /terraform/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | 4 | default_tags { 5 | tags = module.this.tags 6 | } 7 | } 8 | 9 | provider "grafana" { 10 | url = "https://${data.terraform_remote_state.monitoring.outputs.grafana_workspaces.central.grafana_endpoint}" 11 | auth = var.grafana_auth 12 | } 13 | -------------------------------------------------------------------------------- /terraform/res_application.tf: -------------------------------------------------------------------------------- 1 | data "aws_s3_bucket" "geoip" { 2 | bucket = data.terraform_remote_state.infra_aws.outputs.geoip_bucked_id 3 | } 4 | 5 | # ECS Cluster, Task, Service, and Load Balancer for our app 6 | module "ecs" { 7 | source = "./ecs" 8 | context = module.this 9 | 10 | # Cluster 11 | ecr_repository_url = local.ecr_repository_url 12 | image_version = var.image_version 13 | task_cpu = 256 14 | task_memory = 512 15 | autoscaling_desired_count = 2 16 | autoscaling_min_capacity = 2 17 | autoscaling_max_capacity = 8 18 | 19 | # DNS 20 | route53_zones = local.zones 21 | route53_zones_certificates = local.zones_certificates 22 | 23 | # Network 24 | vpc_id = module.vpc.vpc_id 25 | public_subnets = module.vpc.public_subnets 26 | private_subnets = module.vpc.private_subnets 27 | allowed_app_ingress_cidr_blocks = module.vpc.vpc_cidr_block 28 | allowed_lb_ingress_cidr_blocks = module.vpc.vpc_cidr_block 29 | 30 | # Application 31 | port = 8080 32 | keystore_addr = module.keystore.connection_url 33 | log_level = var.log_level 34 | ofac_blocked_countries = var.ofac_blocked_countries 35 | project_id = var.project_id 36 | 37 | # Monitoring 38 | prometheus_endpoint = aws_prometheus_workspace.prometheus.prometheus_endpoint 39 | 40 | # GeoIP 41 | geoip_db_bucket_name = data.aws_s3_bucket.geoip.id 42 | geoip_db_key = var.geoip_db_key 43 | } 44 | -------------------------------------------------------------------------------- /terraform/res_cloudwatch.tf: -------------------------------------------------------------------------------- 1 | module "cloudwatch" { 2 | source = "./cloudwatch" 3 | context = module.this.context 4 | 5 | webhook_url = var.betterstack_cloudwatch_webhook 6 | 7 | ecs_cluster_name = module.ecs.ecs_cluster_name 8 | ecs_service_name = module.ecs.ecs_service_name 9 | 10 | docdb_cluster_id = module.keystore.cluster_id 11 | } 12 | -------------------------------------------------------------------------------- /terraform/res_dns.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | zones = { for k, v in tomap(data.terraform_remote_state.infra_aws.outputs.zones.keyserver[local.stage]) : v.id => v.name } 3 | zones_certificates = { for k, v in module.dns_certificate : v.zone_id => v.certificate_arn } 4 | } 5 | 6 | module "dns_certificate" { 7 | for_each = local.zones 8 | source = "app.terraform.io/wallet-connect/dns/aws" 9 | version = "0.1.3" 10 | context = module.this 11 | hosted_zone_name = each.value 12 | fqdn = each.value 13 | } 14 | -------------------------------------------------------------------------------- /terraform/res_keystore.tf: -------------------------------------------------------------------------------- 1 | module "keystore" { 2 | source = "./docdb" 3 | context = module.this 4 | 5 | db_name = local.keystore_name 6 | default_database = local.keystore_name 7 | port = local.ports.docdb 8 | primary_instance_count = var.keystore_primary_instance_count 9 | primary_instance_class = var.keystore_primary_instance_class 10 | replica_instance_count = var.keystore_replica_instance_count 11 | replica_instance_class = var.keystore_replica_instance_class 12 | 13 | vpc_id = module.vpc.vpc_id 14 | private_subnet_ids = module.vpc.intra_subnets 15 | allowed_ingress_cidr_blocks = [module.vpc.vpc_cidr_block] 16 | allowed_egress_cidr_blocks = [module.vpc.vpc_cidr_block] 17 | } 18 | -------------------------------------------------------------------------------- /terraform/res_monitoring.tf: -------------------------------------------------------------------------------- 1 | module "monitoring" { 2 | source = "./monitoring" 3 | context = module.this 4 | 5 | notification_channels = var.notification_channels 6 | prometheus_endpoint = aws_prometheus_workspace.prometheus.prometheus_endpoint 7 | ecs_service_name = module.ecs.service_name 8 | ecs_target_group_arn = module.ecs.target_group_arn 9 | load_balancer_arn = module.ecs.load_balancer_arn_suffix 10 | keystore_cluster_id = module.keystore.cluster_id 11 | monitoring_role_arn = data.terraform_remote_state.monitoring.outputs.grafana_workspaces.central.iam_role_arn 12 | ecs_task_family = module.ecs.ecs_task_family 13 | } 14 | -------------------------------------------------------------------------------- /terraform/res_network.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | ports = { 3 | http = 80 4 | https = 443 5 | ssh = 22 6 | docdb = 27017 7 | } 8 | 9 | vpc_cidr = "10.0.0.0/16" 10 | vpc_azs = slice(data.aws_availability_zones.available.names, 0, 3) 11 | vpc_flow_s3_bucket_name = "vpc-flow-logs-${random_pet.this.id}" 12 | } 13 | 14 | #------------------------------------------------------------------------------- 15 | # VPC 16 | 17 | data "aws_availability_zones" "available" {} 18 | 19 | #tfsec:ignore:aws-ec2-no-public-ingress-acl 20 | #tfsec:ignore:aws-ec2-require-vpc-flow-logs-for-all-vpcs 21 | #tfsec:ignore:aws-ec2-no-excessive-port-access 22 | module "vpc" { 23 | source = "terraform-aws-modules/vpc/aws" 24 | version = "5.1" 25 | 26 | name = module.this.name 27 | 28 | cidr = local.vpc_cidr 29 | azs = local.vpc_azs 30 | manage_default_network_acl = true 31 | intra_subnets = [for k, v in local.vpc_azs : cidrsubnet(local.vpc_cidr, 8, k)] 32 | public_subnets = [for k, v in local.vpc_azs : cidrsubnet(local.vpc_cidr, 8, k + 4)] 33 | private_subnets = [for k, v in local.vpc_azs : cidrsubnet(local.vpc_cidr, 8, k + 8)] 34 | 35 | intra_subnet_tags = { 36 | Visibility = "intra" 37 | } 38 | public_subnet_tags = { 39 | Visibility = "public" 40 | } 41 | private_subnet_tags = { 42 | Visibility = "private" 43 | } 44 | 45 | enable_dns_support = true 46 | enable_dns_hostnames = true 47 | enable_nat_gateway = true 48 | single_nat_gateway = true 49 | one_nat_gateway_per_az = false 50 | 51 | enable_flow_log = true 52 | flow_log_file_format = "parquet" 53 | flow_log_destination_type = "s3" 54 | flow_log_destination_arn = module.vpc_flow_s3_bucket.s3_bucket_arn 55 | vpc_flow_log_tags = module.this.tags 56 | } 57 | 58 | module "vpc_endpoints" { 59 | source = "terraform-aws-modules/vpc/aws//modules/vpc-endpoints" 60 | version = "5.1" 61 | 62 | vpc_id = module.vpc.vpc_id 63 | 64 | endpoints = { 65 | cloudwatch = { 66 | service = "monitoring" 67 | }, 68 | cloudwatch-events = { 69 | service = "events" 70 | }, 71 | cloudwatch-logs = { 72 | service = "logs" 73 | }, 74 | ecs = { 75 | service = "ecs" 76 | }, 77 | ecs-agent = { 78 | service = "ecs-agent" 79 | }, 80 | ecs-telemetry = { 81 | service = "ecs-telemetry" 82 | }, 83 | elastic-load-balancing = { 84 | service = "elasticloadbalancing" 85 | }, 86 | kms = { 87 | service = "kms" 88 | }, 89 | s3 = { 90 | service = "s3" 91 | }, 92 | } 93 | } 94 | 95 | #------------------------------------------------------------------------------- 96 | # VPC Flow S3 Bucket 97 | 98 | #TODO: Enable bucket logging and send logs to bucket on security account. 99 | #tfsec:ignore:aws-s3-enable-versioning 100 | #tfsec:ignore:aws-s3-enable-bucket-logging 101 | #tfsec:ignore:aws-s3-enable-bucket-encryption 102 | #tfsec:ignore:aws-s3-encryption-customer-key 103 | module "vpc_flow_s3_bucket" { 104 | source = "terraform-aws-modules/s3-bucket/aws" 105 | version = "~> 3.14" 106 | 107 | bucket = local.vpc_flow_s3_bucket_name 108 | policy = data.aws_iam_policy_document.vpc_flow_log_s3.json 109 | force_destroy = true 110 | 111 | lifecycle_rule = [ 112 | { 113 | id = "transition-old-logs" 114 | enabled = true 115 | 116 | transition = [ 117 | { 118 | days = 30 119 | storage_class = "ONEZONE_IA" 120 | }, 121 | { 122 | days = 60 123 | storage_class = "GLACIER" 124 | } 125 | ] 126 | } 127 | ] 128 | } 129 | 130 | data "aws_iam_policy_document" "vpc_flow_log_s3" { 131 | statement { 132 | sid = "AWSLogDeliveryWrite" 133 | 134 | principals { 135 | type = "Service" 136 | identifiers = ["delivery.logs.amazonaws.com"] 137 | } 138 | 139 | actions = ["s3:PutObject"] 140 | 141 | resources = ["arn:aws:s3:::${local.vpc_flow_s3_bucket_name}/AWSLogs/*"] 142 | } 143 | 144 | statement { 145 | sid = "AWSLogDeliveryAclCheck" 146 | 147 | principals { 148 | type = "Service" 149 | identifiers = ["delivery.logs.amazonaws.com"] 150 | } 151 | 152 | actions = ["s3:GetBucketAcl"] 153 | 154 | resources = ["arn:aws:s3:::${local.vpc_flow_s3_bucket_name}"] 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /terraform/res_prometheus.tf: -------------------------------------------------------------------------------- 1 | resource "aws_prometheus_workspace" "prometheus" { 2 | alias = "prometheus-${module.this.id}" 3 | } 4 | 5 | resource "aws_prometheus_alert_manager_definition" "prometheus_alerts" { 6 | workspace_id = aws_prometheus_workspace.prometheus.id 7 | definition = <<-EOF 8 | alertmanager_config: | 9 | route: 10 | receiver: 'BetterUptime' 11 | receivers: 12 | - name: 'BetterUptime' 13 | sns_configs: 14 | - topic_arn: ${aws_sns_topic.prometheus_webhook.arn} 15 | EOF 16 | } 17 | 18 | #tfsec:ignore:aws-sns-enable-topic-encryption 19 | resource "aws_sns_topic" "prometheus_webhook" { 20 | name = "prometheus-webhook" 21 | display_name = "Prometheus Webhook forwarding to BetterUptime" 22 | } 23 | 24 | resource "aws_sns_topic_subscription" "prometheus_webhook" { 25 | endpoint = var.betterstack_prometheus_webhook 26 | protocol = "https" 27 | topic_arn = aws_sns_topic.prometheus_webhook.arn 28 | } 29 | -------------------------------------------------------------------------------- /terraform/terraform.tf: -------------------------------------------------------------------------------- 1 | # Terraform Configuration 2 | terraform { 3 | required_version = ">= 1.0" 4 | 5 | backend "remote" { 6 | hostname = "app.terraform.io" 7 | organization = "wallet-connect" 8 | workspaces { 9 | prefix = "keyserver-" 10 | } 11 | } 12 | 13 | required_providers { 14 | aws = { 15 | source = "hashicorp/aws" 16 | version = ">= 5.7" 17 | } 18 | grafana = { 19 | source = "grafana/grafana" 20 | version = ">= 2.1" 21 | } 22 | random = { 23 | source = "hashicorp/random" 24 | version = "3.5.1" 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /terraform/variables.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------- 2 | # Configuration 3 | 4 | variable "grafana_auth" { 5 | description = "The API Token for the Grafana instance" 6 | type = string 7 | default = "" 8 | } 9 | 10 | #------------------------------------------------------------------------------- 11 | # Application 12 | 13 | variable "name" { 14 | description = "The name of the application" 15 | type = string 16 | default = "keyserver" 17 | } 18 | 19 | variable "region" { 20 | description = "AWS region to deploy to" 21 | type = string 22 | } 23 | 24 | variable "image_version" { 25 | description = "The version of the image to deploy" 26 | type = string 27 | } 28 | 29 | variable "log_level" { 30 | description = "Defines logging level for the application" 31 | type = string 32 | } 33 | 34 | variable "ofac_blocked_countries" { 35 | description = "The list of countries to block" 36 | type = string 37 | default = "" 38 | } 39 | 40 | variable "project_id" { 41 | description = "Project ID for Blockchain API" 42 | type = string 43 | } 44 | 45 | #------------------------------------------------------------------------------- 46 | # Keystore 47 | 48 | variable "keystore_primary_instance_count" { 49 | description = "The number of primary docdb instances to deploy" 50 | type = number 51 | } 52 | 53 | variable "keystore_primary_instance_class" { 54 | description = "The instance class of the primary docdb instances" 55 | type = string 56 | } 57 | 58 | variable "keystore_replica_instance_count" { 59 | description = "The number of replica docdb instances to deploy" 60 | type = number 61 | } 62 | 63 | variable "keystore_replica_instance_class" { 64 | description = "The instance class of the replica docdb instances" 65 | type = string 66 | } 67 | 68 | #------------------------------------------------------------------------------- 69 | # Monitoring 70 | 71 | variable "notification_channels" { 72 | description = "The notification channels to send alerts to" 73 | type = list(any) 74 | default = [] 75 | } 76 | 77 | variable "betterstack_prometheus_webhook" { 78 | description = "The BetterStack webhook to send Prometheus alerts to" 79 | type = string 80 | sensitive = true 81 | } 82 | 83 | variable "betterstack_cloudwatch_webhook" { 84 | description = "The BetterStack webhook to send CloudWatch alerts to" 85 | type = string 86 | sensitive = true 87 | } 88 | 89 | #--------------------------------------- 90 | # GeoIP 91 | 92 | variable "geoip_db_key" { 93 | description = "The name to the GeoIP database" 94 | type = string 95 | default = "GeoLite2-City.mmdb" 96 | } 97 | --------------------------------------------------------------------------------