├── .dockerignore ├── .editorconfig ├── .env.example ├── .env.multi-tenant-example ├── .env.single-tenant-example ├── .github ├── ISSUE_TEMPLATE │ ├── bug.yml │ └── feature_request.yml ├── SECRETS.md ├── codeowners ├── pull_request_template.md └── workflows │ ├── auto_deploy.yml │ ├── cd.yml │ ├── ci.yml │ ├── ci_terraform.yml │ ├── intake.yml │ ├── release.yml │ └── validate.yml ├── .gitignore ├── .gitmodules ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── Contributors.md ├── Dockerfile ├── LICENSE ├── README.md ├── RELEASE.md ├── build.rs ├── cog.toml ├── crates └── is-variant-derive │ ├── .gitignore │ ├── Cargo.lock │ ├── Cargo.toml │ ├── README.md │ └── src │ └── lib.rs ├── docker-compose.multi-tenant.yml ├── docker-compose.storage.yml ├── docker-compose.yml ├── integration ├── .prettierrc ├── integration.test.ts └── jestconfig.integration.json ├── justfile ├── migrations ├── 1664117744_init.sql ├── 1664117746_create-clients.sql ├── 1664117751_create-notifications.sql ├── 1667510128_add-tenant-id.sql ├── 1675269994_add-sandbox-to-enum.sql ├── 1695381202_alter_notifications_constraint.sql ├── 1695631804_add-unique-device-tokens.sql ├── 1699887339_notifications-primary-key-includes-client-id.sql ├── 1699982551_add_always_raw_to_clients.sql ├── README.md └── new.sh ├── package.json ├── rustfmt.toml ├── shell.nix ├── slim.Dockerfile ├── src ├── analytics │ ├── client_info.rs │ ├── message_info.rs │ └── mod.rs ├── blob.rs ├── config.rs ├── error.rs ├── handlers │ ├── create_tenant.rs │ ├── delete_apns.rs │ ├── delete_client.rs │ ├── delete_fcm.rs │ ├── delete_fcm_v1.rs │ ├── delete_tenant.rs │ ├── get_tenant.rs │ ├── health.rs │ ├── metrics.rs │ ├── mod.rs │ ├── push_message.rs │ ├── rate_limit_test.rs │ ├── register_client.rs │ ├── single_tenant_wrappers.rs │ ├── update_apns.rs │ ├── update_fcm.rs │ └── update_fcm_v1.rs ├── jwt_validation │ └── mod.rs ├── lib.rs ├── log │ └── mod.rs ├── macros.rs ├── main.rs ├── metrics │ └── mod.rs ├── middleware │ ├── mod.rs │ ├── rate_limit.rs │ └── validate_signature.rs ├── networking.rs ├── providers │ ├── apns.rs │ ├── fcm.rs │ ├── fcm_v1.rs │ ├── mod.rs │ └── noop.rs ├── relay │ └── mod.rs ├── state.rs └── stores │ ├── client.rs │ ├── mod.rs │ ├── notification.rs │ └── tenant.rs ├── tenant_migrations ├── 1667510326_initial.sql ├── 1667510351_create-tenants.sql ├── 1674744346_deprecate-sandbox-flag.sql ├── 1676394119_apns-team-tokens.sql ├── 1676813285_fix-apns-team-tokens.sql ├── 1691518766_add-suspension.sql ├── 1713226293_fcm-v1.sql ├── README.md └── new.sh ├── terraform ├── .terraform-docs.yml ├── .terraform.lock.hcl ├── README.md ├── backend.tf ├── ecs │ ├── README.md │ ├── iam.tf │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tf │ └── variables.tf ├── inputs.tf ├── main.tf ├── monitoring │ ├── README.md │ ├── context.tf │ ├── dashboard.jsonnet │ ├── main.tf │ ├── panels │ │ ├── app │ │ │ ├── postgres_query_latency.libsonnet │ │ │ └── postgres_query_rate.libsonnet │ │ └── panels.libsonnet │ ├── terraform.tf │ └── variables.tf ├── provider.tf ├── variables.tf └── vars │ ├── README.md │ ├── dev.tfvars │ ├── prod.tfvars │ └── staging.tfvars ├── tests ├── context │ ├── mod.rs │ ├── server.rs │ └── stores.rs ├── functional │ ├── mod.rs │ ├── multitenant │ │ ├── apns.rs │ │ ├── fcm.rs │ │ ├── fcm_v1.rs │ │ ├── mod.rs │ │ └── tenancy.rs │ ├── singletenant │ │ ├── mod.rs │ │ ├── push.rs │ │ └── registration.rs │ └── stores │ │ ├── client.rs │ │ ├── mod.rs │ │ ├── notification.rs │ │ └── tenant.rs ├── integration.rs └── unit │ ├── messages.rs │ ├── middleware │ ├── mod.rs │ └── validate_signature.rs │ └── mod.rs └── yarn.lock /.dockerignore: -------------------------------------------------------------------------------- 1 | /.git 2 | /**/node_modules 3 | /**/target 4 | /terraform -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | charset = utf-8 5 | end_of_line = lf 6 | indent_size = 2 7 | tab_width = 2 8 | indent_style = space 9 | insert_final_newline = true 10 | max_line_length = 80 11 | trim_trailing_whitespace = true 12 | 13 | [*.rs] 14 | indent_size = 4 15 | tab_width = 4 16 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | PORT=3000 2 | PUBLIC_URL=https://echo.walletconnect.com 3 | DATABASE_URL=postgres://user:pass@host:port/database 4 | DISABLE_HEADER=false 5 | 6 | # Public key can be obtained from https://relay.walletconnect.com/public-key 7 | RELAY_PUBLIC_KEY= 8 | 9 | # Should Echo Server validate messages it recieves are from the Relay when attempting to send a push notification 10 | VALIDATE_SIGNATURES=true 11 | 12 | # Filter irrelevant logs from other crates, but enable traces for the relay. 13 | # We're using separate log levels for stderr and telemetry. Note: telemetry 14 | # exports require 'trace' log level. 15 | LOG_LEVEL=info,echo-server=info 16 | 17 | # Multi-Tenancy 18 | TENANT_DATABASE_URL= 19 | DEFAULT_TENANT_ID= # This has a default value and dosen't hold much impact to the running of echo-server 20 | JWT_SECRET= 21 | 22 | # CORS 23 | CORS_ALLOWED_ORIGINS=* 24 | 25 | # Telemetry 26 | TELEMETRY_PROMETHEUS_PORT=3001 27 | 28 | # FCM 29 | FCM_API_KEY= 30 | FCM_V1_CREDENTIALS= 31 | 32 | # APNS 33 | APNS_CERTIFICATE= # base64 encoded .p12 APNS Certificate 34 | APNS_CERTIFICATE_PASSWORD= # Password for provided certificate 35 | APNS_TOPIC= # bundle ID/app ID 36 | 37 | # Analytics 38 | ANALYTICS_S3_ENDPOINT= 39 | ANALYTICS_EXPORT_BUCKET= 40 | ANALYTICS_GEOIP_DB_BUCKET= 41 | ANALYTICS_GEOIP_DB_KEY= 42 | -------------------------------------------------------------------------------- /.env.multi-tenant-example: -------------------------------------------------------------------------------- 1 | PORT=3000 2 | PUBLIC_URL=http://localhost:3000 3 | DATABASE_URL=postgres://user:pass@host:port/database 4 | LOG_LEVEL=debug,echo-server=debug 5 | 6 | # Public key can be obtained from https://relay.walletconnect.com/public-key 7 | RELAY_PUBLIC_KEY= 8 | 9 | # Don't validate signatures - allows for users to send push notifications from 10 | # HTTP clients e.g. curl, insomnia, postman, etc 11 | VALIDATE_SIGNATURES=false 12 | 13 | # Multi-Tenancy 14 | TENANT_DATABASE_URL=postgres://user:pass@host:port/tenant-database 15 | JWT_SECRET=example-secret 16 | 17 | # Telemetry 18 | TELEMETRY_PROMETHEUS_PORT=3001 19 | 20 | # CORS 21 | CORS_ALLOWED_ORIGINS=* 22 | -------------------------------------------------------------------------------- /.env.single-tenant-example: -------------------------------------------------------------------------------- 1 | PORT=3000 2 | PUBLIC_URL=http://localhost:3000 3 | DATABASE_URL=postgres://user:pass@host:port/database 4 | LOG_LEVEL=debug,echo-server=debug 5 | 6 | # Public key can be obtained from https://relay.walletconnect.com/public-key 7 | RELAY_PUBLIC_KEY= 8 | 9 | # Don't validate signatures - allows for users to send push notifications from 10 | # HTTP clients e.g. curl, insomnia, postman, etc 11 | VALIDATE_SIGNATURES=false 12 | 13 | # CORS 14 | CORS_ALLOWED_ORIGINS=* 15 | 16 | # Telemetry 17 | TELEMETRY_PROMETHEUS_PORT=3001 18 | 19 | # FCM 20 | FCM_API_KEY= # Firebase Cloud Messaging Server Key 21 | FCM_V1_CREDENTIALS= # Firebase Cloud Messaging Service Account Credentials 22 | 23 | # APNS 24 | APNS_CERTIFICATE= # base64 encoded .p12 APNS Certificate 25 | APNS_CERTIFICATE_PASSWORD= # Password for provided certificate 26 | APNS_TOPIC= # bundle ID/app ID 27 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug.yml: -------------------------------------------------------------------------------- 1 | name: Bug Report 2 | description: File a bug report 3 | title: "bug: " 4 | labels: 5 | - bug 6 | body: 7 | - type: markdown 8 | attributes: 9 | value: | 10 | Thanks for taking the time to fill out this bug report! 🐛 11 | - type: checkboxes 12 | attributes: 13 | label: Is there an existing issue for this? 14 | description: Please search to see if an issue already exists for the bug you encountered. 15 | options: 16 | - label: I have searched the existing issues 17 | required: true 18 | - type: textarea 19 | attributes: 20 | label: Current Behavior 21 | description: A concise description of what you're experiencing. 22 | validations: 23 | required: true 24 | - type: textarea 25 | attributes: 26 | label: Expected Behavior 27 | description: A concise description of what you expected to happen. 28 | validations: 29 | required: true 30 | - type: textarea 31 | attributes: 32 | label: Steps To Reproduce 33 | description: Steps to reproduce the behavior. 34 | placeholder: | 35 | 1. In this environment... 36 | 2. With this config... 37 | 3. Run '...' 38 | 4. See error... 39 | validations: 40 | required: true 41 | - type: textarea 42 | attributes: 43 | label: Environment 44 | description: | 45 | examples: 46 | - **OS**: MacOS Monterey 12.5 47 | - **rustc**: rustc 1.62.1 (e092d0b6b 2022-07-16) 48 | - **cargo**: cargo 1.62.1 (a748cf5a3 2022-06-08) 49 | 50 | > **Note** 51 | > If using docker image please provide docker version and the image's tag 52 | value: | 53 | - OS: 54 | - rustc: 55 | - cargo: 56 | render: markdown 57 | validations: 58 | required: false 59 | - type: textarea 60 | id: logs 61 | attributes: 62 | label: Relevant log output 63 | description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. 64 | render: shell 65 | validations: 66 | required: false 67 | - type: textarea 68 | attributes: 69 | label: Anything else? 70 | description: | 71 | Links? References? Anything that will give us more context about the issue you are encountering! 72 | 73 | Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. 74 | validations: 75 | required: false -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Feature Request 2 | description: Request a new feature be added 3 | title: "feat: " 4 | labels: 5 | - enhancement 6 | body: 7 | - type: markdown 8 | attributes: 9 | value: | 10 | Thanks for taking the time to suggest a new feature for Echo Server! ✨ 11 | - type: checkboxes 12 | attributes: 13 | label: Is there an existing issue for this? 14 | description: Please search to see if an issue already exists for the feature you would like. 15 | options: 16 | - label: I have searched the existing issues 17 | required: true 18 | - type: textarea 19 | attributes: 20 | label: Current Behavior 21 | description: A concise description of what you're experiencing. 22 | validations: 23 | required: true 24 | - type: textarea 25 | attributes: 26 | label: Requested Behavior 27 | description: A concise description of what you expected to happen. 28 | validations: 29 | required: true 30 | - type: textarea 31 | attributes: 32 | label: Anything else? 33 | description: | 34 | Links? References? Anything that will give us more context about the issue you are encountering! 35 | 36 | Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. 37 | validations: 38 | required: false -------------------------------------------------------------------------------- /.github/SECRETS.md: -------------------------------------------------------------------------------- 1 | ## `PROD_JWT_SECRET` & `STAGING_JWT_SECRET` 2 | 3 | From 1Password: `cloud/push-server-jwt/prod` and `cloud/push-server-jwt/staging` 4 | 5 | Generated randomly and used by Cloud app to sign JWTs. 6 | 7 | ## `ECHO_TEST_FCM_V1_CREDENTIALS` 8 | 9 | From 1Password: `Firebase Push Server Tests Service Account` 10 | 11 | FCM v1 service account credentials for test cases. 12 | 13 | Setup: 14 | - Go to the Push Server Tests Firebase project: https://console.firebase.google.com/project/push-server-tests-cc0f7/settings/cloudmessaging 15 | - On Cloud Messaging tab, under the "Firebase Cloud Messaging API (V1)" header, click the "Manage Service Accounts" link 16 | - Select the service account and click "Manage keys" 17 | - Click "Add key" and select "Create new key" and pick JSON 18 | -------------------------------------------------------------------------------- /.github/codeowners: -------------------------------------------------------------------------------- 1 | * @geekbrother 2 | * @chris13524 -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | 8 | 9 | Resolves # (issue) 10 | 11 | ## How Has This Been Tested? 12 | 13 | 18 | 19 | 20 | 21 | ## Due Diligence 22 | 23 | * [ ] Breaking change 24 | * [ ] Requires a documentation update 25 | * [ ] Requires a e2e/integration test update -------------------------------------------------------------------------------- /.github/workflows/auto_deploy.yml: -------------------------------------------------------------------------------- 1 | name: auto_deploy 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | permissions: 9 | contents: write 10 | packages: write 11 | 12 | jobs: 13 | build-container: 14 | runs-on: ubuntu-latest 15 | strategy: 16 | matrix: 17 | file: [Dockerfile, slim.Dockerfile] 18 | steps: 19 | - uses: actions/checkout@v3 20 | with: 21 | fetch-depth: 0 22 | submodules: recursive 23 | token: ${{ secrets.RELEASE_PAT }} 24 | 25 | - name: Configure AWS Credentials 26 | uses: aws-actions/configure-aws-credentials@v1 27 | with: 28 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 29 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 30 | aws-region: eu-central-1 31 | 32 | # Authenticate with ECR 33 | - name: Login to Amazon ECR 34 | id: login-ecr 35 | uses: aws-actions/amazon-ecr-login@v1 36 | 37 | - name: Login to GitHub Container Registry 38 | uses: docker/login-action@v2 39 | with: 40 | registry: ghcr.io 41 | username: ${{ github.actor }} 42 | password: ${{ secrets.GITHUB_TOKEN }} 43 | logout: false 44 | 45 | - name: Set tag suffix 46 | id: suffix 47 | run: | 48 | if [[ "${{ matrix.file }}" == *.* ]]; then 49 | echo "::set-output name=suffix:::$(echo "${{ matrix.file }}" | cut -d'.' -f1)" 50 | else 51 | echo "::set-output name=suffix::" 52 | fi 53 | 54 | - name: Docker meta 55 | id: meta 56 | uses: docker/metadata-action@v4 57 | with: 58 | images: | 59 | ${{ steps.login-ecr.outputs.registry }}/echo-server 60 | ghcr.io/${{ github.repository }} 61 | walletconnect/echo-server,enable=false 62 | flavor: | 63 | latest=auto 64 | tags: | 65 | type=raw,value=${{ github.sha }},suffix=${{ steps.suffix.outputs.suffix }} 66 | 67 | # Setup Buildkit 68 | - name: Set up Docker Buildx 69 | uses: docker/setup-buildx-action@v2 70 | 71 | - name: Build, tag, and push image 72 | uses: docker/build-push-action@v3 73 | with: 74 | context: . 75 | file: ${{ matrix.file }} 76 | push: true 77 | tags: ${{ steps.meta.outputs.tags }} 78 | labels: ${{ steps.meta.outputs.labels }} 79 | cache-from: type=gha 80 | cache-to: type=gha,mode=max 81 | run-ci: 82 | uses: ./.github/workflows/ci.yml 83 | secrets: inherit 84 | run-cd: 85 | needs: 86 | - build-container 87 | - run-ci 88 | # call the cd.yml file with image tag as the commit hash 89 | uses: ./.github/workflows/cd.yml 90 | with: 91 | image_tag: ${{ github.sha }} 92 | deploy_to_staging: true 93 | deploy_to_prod: false 94 | secrets: inherit 95 | -------------------------------------------------------------------------------- /.github/workflows/cd.yml: -------------------------------------------------------------------------------- 1 | name: cd 2 | on: 3 | workflow_dispatch: 4 | inputs: 5 | deploy_to_staging: 6 | description: "Deploy to staging" 7 | type: boolean 8 | required: true 9 | default: true 10 | deploy_to_prod: 11 | description: "Deploy to production" 12 | type: boolean 13 | required: true 14 | default: false 15 | image_tag: 16 | description: "App image tag. Default: latest release" 17 | type: string 18 | required: false 19 | default: "" 20 | workflow_call: 21 | inputs: 22 | deploy_to_staging: 23 | type: boolean 24 | required: true 25 | default: true 26 | deploy_to_prod: 27 | type: boolean 28 | required: false 29 | default: false 30 | image_tag: 31 | type: string 32 | required: true 33 | 34 | concurrency: 35 | # Only allow for one action to run at once, queue any others 36 | group: cd 37 | # Don't cancel existing 38 | cancel-in-progress: false 39 | 40 | jobs: 41 | deploy-infra-staging: 42 | if: ${{ inputs.deploy_to_staging }} 43 | runs-on: ubuntu-latest 44 | environment: 45 | name: staging 46 | url: https://staging.echo.walletconnect.com/health 47 | steps: 48 | - name: Checkout 49 | uses: actions/checkout@v3 50 | with: 51 | submodules: recursive 52 | token: ${{ secrets.RELEASE_PAT }} 53 | 54 | - name: Configure AWS Credentials 55 | uses: aws-actions/configure-aws-credentials@v1 56 | with: 57 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 58 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 59 | aws-region: eu-central-1 60 | 61 | - name: Setup Terraform 62 | uses: hashicorp/setup-terraform@v2 63 | with: 64 | cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} 65 | 66 | - name: Get Grafana details 67 | id: grafana-get-details 68 | uses: WalletConnect/actions/aws/grafana/get-details/@1.0.3 69 | 70 | - name: Get Grafana key 71 | id: grafana-get-key 72 | uses: WalletConnect/actions/aws/grafana/get-key/@1.0.3 73 | with: 74 | key-prefix: ${{ github.event.repository.name }} 75 | workspace-id: ${{ steps.grafana-get-details.outputs.workspace-id }} 76 | 77 | - name: Init Terraform 78 | id: tf-init 79 | uses: WalletConnect/actions/terraform/init/@1.0.3 80 | with: 81 | environment: "staging" 82 | 83 | - name: Deploy Terraform to Staging 84 | id: tf-apply 85 | uses: WalletConnect/actions/terraform/apply/@1.0.3 86 | env: 87 | TF_VAR_grafana_auth: ${{ steps.grafana-get-key.outputs.key }} 88 | TF_VAR_grafana_endpoint: ${{ steps.grafana-get-details.outputs.endpoint }} 89 | TF_VAR_jwt_secret: ${{ secrets.STAGING_JWT_SECRET }} 90 | TF_VAR_image_version: ${{ inputs.image_tag }} 91 | TF_VAR_relay_public_key: ${{ secrets.RELAY_PUBLIC_KEY }} 92 | with: 93 | environment: "staging" 94 | 95 | - name: Delete Grafana key 96 | id: grafana-delete-key 97 | uses: WalletConnect/actions/aws/grafana/delete-key/@1.0.3 98 | if: ${{ success() || failure() || cancelled() }} # don't use always() since it creates non-cancellable jobs 99 | with: 100 | key-name: ${{ steps.grafana-get-key.outputs.key-name }} 101 | workspace-id: ${{ steps.grafana-get-details.outputs.workspace-id }} 102 | 103 | validate_staging: 104 | if: ${{ inputs.deploy_to_staging }} 105 | needs: [deploy-infra-staging] 106 | uses: ./.github/workflows/validate.yml 107 | with: 108 | environment: 'staging' 109 | secrets: 110 | TEST_TENANT_ID: ${{ secrets.TEST_TENANT_ID }} 111 | 112 | deploy-infra-prod: 113 | if: ${{ inputs.deploy_to_prod }} 114 | runs-on: ubuntu-latest 115 | environment: 116 | name: prod 117 | url: https://echo.walletconnect.com/health 118 | needs: 119 | - validate_staging 120 | steps: 121 | - name: Checkout 122 | uses: actions/checkout@v3 123 | with: 124 | submodules: recursive 125 | token: ${{ secrets.RELEASE_PAT }} 126 | 127 | - name: Configure AWS Credentials 128 | uses: aws-actions/configure-aws-credentials@v1 129 | with: 130 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 131 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 132 | aws-region: eu-central-1 133 | 134 | - name: Setup Terraform 135 | uses: hashicorp/setup-terraform@v2 136 | with: 137 | cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} 138 | 139 | - name: Get Grafana details 140 | id: grafana-get-details 141 | uses: WalletConnect/actions/aws/grafana/get-details/@1.0.3 142 | 143 | - name: Get Grafana key 144 | id: grafana-get-key 145 | uses: WalletConnect/actions/aws/grafana/get-key/@1.0.3 146 | with: 147 | key-prefix: ${{ github.event.repository.name }} 148 | workspace-id: ${{ steps.grafana-get-details.outputs.workspace-id }} 149 | 150 | - name: Init Terraform 151 | id: tf-init 152 | uses: WalletConnect/actions/terraform/init/@1.0.3 153 | with: 154 | environment: "prod" 155 | 156 | - name: Deploy Terraform to Production 157 | id: tf-apply 158 | uses: WalletConnect/actions/terraform/apply/@1.0.3 159 | env: 160 | TF_VAR_grafana_auth: ${{ steps.grafana-get-key.outputs.key }} 161 | TF_VAR_grafana_endpoint: ${{ steps.grafana-get-details.outputs.endpoint }} 162 | TF_VAR_jwt_secret: ${{ secrets.PROD_JWT_SECRET }} 163 | TF_VAR_image_version: ${{ inputs.image_tag }} 164 | TF_VAR_relay_public_key: ${{ secrets.RELAY_PUBLIC_KEY }} 165 | with: 166 | environment: "prod" 167 | 168 | - name: Delete Grafana key 169 | id: grafana-delete-key 170 | uses: WalletConnect/actions/aws/grafana/delete-key/@1.0.3 171 | if: ${{ success() || failure() || cancelled() }} # don't use always() since it creates non-cancellable jobs 172 | with: 173 | key-name: ${{ steps.grafana-get-key.outputs.key-name }} 174 | workspace-id: ${{ steps.grafana-get-details.outputs.workspace-id }} 175 | -------------------------------------------------------------------------------- /.github/workflows/ci_terraform.yml: -------------------------------------------------------------------------------- 1 | name: ci_terraform 2 | on: 3 | push: 4 | branches: 5 | - main 6 | paths: 7 | - "terraform/**" 8 | pull_request: 9 | paths: 10 | - "terraform/**" 11 | 12 | concurrency: 13 | # Support push/pr as event types with different behaviors each: 14 | # 1. push: queue up builds 15 | # 2. pr: only allow one run per PR 16 | group: ${{ github.workflow }}-${{ github.event.type }}${{ github.event.pull_request.number }} 17 | # If there is already a workflow running for the same pull request, cancel it 18 | cancel-in-progress: ${{ github.event.type == 'PullRequest' }} 19 | 20 | permissions: 21 | contents: write 22 | 23 | jobs: 24 | check-fmt: 25 | runs-on: ubuntu-latest 26 | steps: 27 | - name: Checkout 28 | uses: actions/checkout@v4 29 | with: 30 | submodules: recursive 31 | token: ${{ secrets.RELEASE_TOKEN_V2 }} 32 | 33 | - name: Setup Terraform 34 | uses: hashicorp/setup-terraform@v2 35 | with: 36 | cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} 37 | 38 | - name: Init Terraform 39 | id: tf-init 40 | uses: WalletConnect/actions/terraform/init/@2.5.4 41 | with: 42 | environment: staging 43 | 44 | - name: Check Terraform Formatting 45 | id: tf-fmt 46 | uses: WalletConnect/actions/terraform/check-fmt/@2.5.4 47 | 48 | get-version: 49 | if: github.event_name == 'pull_request' 50 | runs-on: ubuntu-latest 51 | outputs: 52 | version: ${{ steps.clean_version.outputs.version }} 53 | steps: 54 | #TODO: Authenticate to avoid API rate limit exceeded errors. 55 | - name: Get latest release for image version 56 | id: latest_release 57 | uses: pozetroninc/github-action-get-latest-release@master 58 | with: 59 | repository: ${{ github.repository }} 60 | excludes: draft 61 | 62 | - name: Get release value 63 | id: get_value 64 | uses: actions/github-script@v6 65 | env: 66 | LATEST_TAG: ${{ steps.latest_release.outputs.release }} 67 | with: 68 | result-encoding: string 69 | script: | 70 | if (context.eventName == "release") { 71 | return context.payload.release.tag_name 72 | } else { 73 | return process.env.LATEST_TAG 74 | } 75 | 76 | - name: Clean version 77 | id: clean_version 78 | run: | 79 | version=$(echo "${{ steps.get_value.outputs.result }}" | sed 's/v//g') 80 | echo "version=$version" >> $GITHUB_OUTPUT 81 | 82 | plan-staging: 83 | if: github.event_name == 'pull_request' 84 | needs: 85 | - get-version 86 | runs-on: ubuntu-latest 87 | environment: 88 | name: staging 89 | url: https://staging.echo.walletconnect.com/health 90 | 91 | steps: 92 | - name: Checkout 93 | uses: actions/checkout@v4 94 | with: 95 | submodules: recursive 96 | token: ${{ secrets.RELEASE_TOKEN_V2 }} 97 | 98 | - name: Configure AWS Credentials 99 | uses: aws-actions/configure-aws-credentials@v1 100 | with: 101 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 102 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 103 | aws-region: eu-central-1 104 | 105 | - name: Setup Terraform 106 | uses: hashicorp/setup-terraform@v2 107 | with: 108 | cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} 109 | 110 | - name: Get Grafana details 111 | id: grafana-get-details 112 | uses: WalletConnect/actions/aws/grafana/get-details/@2.5.4 113 | 114 | - name: Get Grafana key 115 | id: grafana-get-key 116 | uses: WalletConnect/actions/aws/grafana/get-key/@2.5.4 117 | with: 118 | key-prefix: ${{ github.event.repository.name }}-staging 119 | workspace-id: ${{ steps.grafana-get-details.outputs.workspace-id }} 120 | 121 | - name: Init Terraform 122 | id: tf-init 123 | uses: WalletConnect/actions/terraform/init/@2.5.4 124 | with: 125 | environment: staging 126 | 127 | - name: Run Terraform Plan 128 | id: tf-plan-staging 129 | uses: WalletConnect/actions/terraform/plan/@2.5.4 130 | env: 131 | TF_VAR_grafana_auth: ${{ steps.grafana-get-key.outputs.key }} 132 | TF_VAR_grafana_endpoint: ${{ steps.grafana-get-details.outputs.endpoint }} 133 | TF_VAR_jwt_secret: ${{ secrets.JWT_SECRET }} 134 | TF_VAR_relay_public_key: ${{ secrets.RELAY_PUBLIC_KEY }} 135 | with: 136 | github-token: ${{ secrets.GITHUB_TOKEN }} 137 | environment: staging 138 | 139 | - name: Delete Grafana key 140 | id: grafana-delete-key 141 | uses: WalletConnect/actions/aws/grafana/delete-key/@2.5.4 142 | if: ${{ success() || failure() || cancelled() }} # don't use always() since it creates non-cancellable jobs 143 | with: 144 | key-name: ${{ steps.grafana-get-key.outputs.key-name }} 145 | workspace-id: ${{ steps.grafana-get-details.outputs.workspace-id }} 146 | -------------------------------------------------------------------------------- /.github/workflows/intake.yml: -------------------------------------------------------------------------------- 1 | # This workflow moves issues to the Project board when they receive the "accepted" label 2 | # When WalletConnect Org members create issues they are automatically "accepted". 3 | # Otherwise, they need to manually receive that label during intake. 4 | name: intake 5 | 6 | on: 7 | issues: 8 | types: [opened, labeled] 9 | pull_request: 10 | types: [opened, labeled] 11 | 12 | jobs: 13 | add-to-project: 14 | name: Add issue to board 15 | if: github.event_name == 'issues' && github.event.action == 'labeled' && github.event.label.name == 'accepted' 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/add-to-project@v0.1.0 19 | with: 20 | project-url: https://github.com/orgs/WalletConnect/projects/20 21 | github-token: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} 22 | labeled: accepted 23 | label-operator: OR 24 | 25 | auto-promote: 26 | name: auto-promote 27 | if: github.event.action == 'opened' 28 | runs-on: ubuntu-latest 29 | steps: 30 | - name: Check Core Team membership 31 | uses: tspascoal/get-user-teams-membership@v1 32 | id: is-core-team 33 | with: 34 | username: ${{ github.event_name != 'pull_request' && github.event.issue.user.login || github.event.sender.login }} 35 | team: "Core Team" 36 | GITHUB_TOKEN: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} 37 | - name: Print result 38 | env: 39 | CREATOR: ${{ github.event_name != 'pull_request' && github.event.issue.user.login || github.event.sender.login }} 40 | IS_TEAM_MEMBER: ${{ steps.is-core-team.outputs.isTeamMember }} 41 | run: echo "$CREATOR (Core Team Member $IS_TEAM_MEMBER) created this issue/PR" 42 | - name: Label issues 43 | if: ${{ steps.is-core-team.outputs.isTeamMember == 'true' }} 44 | uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 45 | with: 46 | add-labels: "accepted" 47 | repo-token: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} 48 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | deploy_to_staging: 7 | description: "Deploy to staging" 8 | type: boolean 9 | required: true 10 | default: true 11 | deploy_to_prod: 12 | description: "Deploy to production" 13 | type: boolean 14 | required: true 15 | default: false 16 | 17 | permissions: 18 | contents: write 19 | packages: write 20 | 21 | jobs: 22 | release: 23 | runs-on: ubuntu-latest 24 | outputs: 25 | version: ${{ steps.release.outputs.version }} 26 | steps: 27 | - uses: actions/checkout@v4 28 | with: 29 | fetch-depth: 0 30 | submodules: recursive 31 | token: ${{ secrets.RELEASE_PAT }} 32 | 33 | - name: Install lld and llvm 34 | run: sudo apt-get install -y lld llvm 35 | 36 | # TODO: Remove once https://crates.io/crates/opentelemetry-otlp is updated 37 | # and no longer has a build requirement of `protoc``. 38 | - name: Install Protoc 39 | uses: arduino/setup-protoc@v1 40 | with: 41 | repo-token: ${{ secrets.GITHUB_TOKEN }} 42 | 43 | - name: Update and commit the release version 44 | id: release 45 | uses: WalletConnect/actions/github/update-rust-version/@2.1.5 46 | with: 47 | token: ${{ secrets.RELEASE_PAT }} 48 | 49 | build-container: 50 | runs-on: ubuntu-latest 51 | strategy: 52 | matrix: 53 | file: [Dockerfile, slim.Dockerfile] 54 | needs: 55 | - release 56 | steps: 57 | - uses: actions/checkout@v4 58 | with: 59 | fetch-depth: 0 60 | # Ensure that we get the new version from updated Cargo.toml 61 | - name: Move to HEAD 62 | run: | 63 | git reset --hard HEAD 64 | - name: Configure AWS Credentials 65 | uses: aws-actions/configure-aws-credentials@v1 66 | with: 67 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 68 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 69 | aws-region: eu-central-1 70 | 71 | # Authenticate with ECR 72 | - name: Login to Amazon ECR 73 | id: login-ecr 74 | uses: aws-actions/amazon-ecr-login@v1 75 | 76 | - name: Login to GitHub Container Registry 77 | uses: docker/login-action@v2 78 | with: 79 | registry: ghcr.io 80 | username: ${{ github.actor }} 81 | password: ${{ secrets.GITHUB_TOKEN }} 82 | logout: false 83 | 84 | - name: Set tag suffix 85 | id: suffix 86 | run: | 87 | if [[ "${{ matrix.file }}" == *.* ]]; then 88 | echo "::set-output name=suffix:::$(echo "${{ matrix.file }}" | cut -d'.' -f1)" 89 | else 90 | echo "::set-output name=suffix::" 91 | fi 92 | 93 | - name: Docker meta 94 | id: meta 95 | uses: docker/metadata-action@v4 96 | with: 97 | images: | 98 | ${{ steps.login-ecr.outputs.registry }}/echo-server 99 | ghcr.io/${{ github.repository }} 100 | walletconnect/echo-server,enable=false 101 | flavor: | 102 | latest=auto 103 | tags: | 104 | type=semver,pattern={{version}},suffix=${{ steps.suffix.outputs.suffix }} 105 | type=semver,pattern={{major}}.{{minor}},suffix=${{ steps.suffix.outputs.suffix }} 106 | type=raw,value=${{ needs.release.outputs.version }},suffix=${{ steps.suffix.outputs.suffix }} 107 | 108 | # Setup Buildkit 109 | - name: Set up Docker Buildx 110 | uses: docker/setup-buildx-action@v2 111 | 112 | - name: Build, tag, and push image 113 | uses: docker/build-push-action@v3 114 | with: 115 | context: . 116 | file: ${{ matrix.file }} 117 | push: true 118 | tags: ${{ steps.meta.outputs.tags }} 119 | labels: ${{ steps.meta.outputs.labels }} 120 | cache-from: type=gha 121 | cache-to: type=gha,mode=max 122 | 123 | run-cd: 124 | needs: 125 | - release 126 | - build-container 127 | # call the cd.yml file with image tag from the new release 128 | uses: ./.github/workflows/cd.yml 129 | with: 130 | image_tag: ${{ needs.release.outputs.version }} 131 | deploy_to_staging: ${{ inputs.deploy_to_staging }} 132 | deploy_to_prod: ${{ inputs.deploy_to_prod }} 133 | secrets: inherit 134 | -------------------------------------------------------------------------------- /.github/workflows/validate.yml: -------------------------------------------------------------------------------- 1 | name: validate 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | environment: 7 | description: 'the environment to validate' 8 | required: true 9 | default: 'staging' 10 | type: choice 11 | options: 12 | - prod 13 | - staging 14 | - dev 15 | workflow_call: 16 | inputs: 17 | environment: 18 | description: 'the environment to validate' 19 | required: true 20 | default: 'staging' 21 | type: string 22 | secrets: 23 | TEST_TENANT_ID: 24 | required: true 25 | 26 | jobs: 27 | validate: 28 | runs-on: ubuntu-latest 29 | steps: 30 | - name: checkout 31 | uses: actions/checkout@v3 32 | - name: setup-node 33 | uses: actions/setup-node@v3 34 | with: 35 | node-version: 16.x 36 | cache: "yarn" 37 | cache-dependency-path: "**/yarn.lock" 38 | - name: install 39 | run: yarn install 40 | - run: yarn integration:$ENVIRONMENT 41 | env: 42 | ENVIRONMENT: ${{ inputs.environment }} 43 | TEST_TENANT_ID: ${{ secrets.TEST_TENANT_ID }} 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | _legacy/ 2 | 3 | #--------------------------------------- 4 | # Migration 5 | .github/_legacy 6 | terraform/_legacy 7 | 8 | #--------------------------------------- 9 | # General 10 | 11 | .DS_Store 12 | .AppleDouble 13 | .LSOverride 14 | [Dd]esktop.ini 15 | 16 | #--------------------------------------- 17 | # Environment 18 | 19 | .direnv 20 | .envrc 21 | .actrc 22 | .env 23 | .env.terraform 24 | 25 | #--------------------------------------- 26 | # Editors 27 | 28 | # JetBrains 29 | .idea/ 30 | out/ 31 | .fleet 32 | *.iws 33 | 34 | # VSCode 35 | .vscode/ 36 | .history/ 37 | *.code-workspace 38 | 39 | #--------------------------------------- 40 | # Rust/Cargo 41 | 42 | # Generated by Cargo, will have compiled files and executables 43 | debug/ 44 | target/ 45 | 46 | # Backup files generated by rustfmt 47 | **/*.rs.bk 48 | 49 | # MSVC Windows builds of rustc generate these, which store debugging information 50 | *.pdb 51 | 52 | #--------------------------------------- 53 | # Terraform 54 | 55 | # Local .terraform directories 56 | **/.terraform/* 57 | 58 | # .tfstate files 59 | *.tfstate 60 | *.tfstate.* 61 | 62 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 63 | # password, private keys, and other secrets. These should not be part of version 64 | # control as they are data points which are potentially sensitive and subject 65 | # to change depending on the environment. 66 | *.tfvars 67 | *.tfvars.json 68 | 69 | # Ignore override files as they are usually used to override resources locally and so are not checked in 70 | override.tf 71 | override.tf.json 72 | *_override.tf 73 | *_override.tf.json 74 | 75 | # Include override files you do wish to add to version control using negated pattern 76 | # 77 | # !example_override.tf 78 | 79 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 80 | *tfplan* 81 | 82 | # Ignore CLI configuration files 83 | .terraformrc 84 | terraform.rc 85 | 86 | #--------------------------------------- 87 | # Integration 88 | 89 | node_modules 90 | *.log 91 | 92 | # test script 93 | test.sh 94 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "terraform/monitoring/grafonnet-lib"] 2 | path = terraform/monitoring/grafonnet-lib 3 | url = git@github.com:WalletConnect/grafonnet-lib.git 4 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/antonbabenko/pre-commit-terraform 3 | rev: v1.77.0 4 | hooks: 5 | - id: terraform_fmt 6 | - id: terraform_tflint 7 | - id: terraform_tfsec 8 | - id: terraform_docs 9 | args: 10 | - '--args=--lockfile=false' 11 | 12 | - repo: https://github.com/pre-commit/pre-commit-hooks 13 | rev: v4.4.0 14 | hooks: 15 | - id: check-merge-conflict 16 | - id: check-yaml 17 | - id: end-of-file-fixer 18 | - id: trailing-whitespace 19 | - id: detect-aws-credentials 20 | - id: detect-private-key 21 | - id: forbid-new-submodules 22 | - id: no-commit-to-branch 23 | - id: mixed-line-ending 24 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "echo-server" 3 | version = "0.41.8" 4 | edition = "2021" 5 | authors = [ 6 | "Harry Bairstow " 7 | ] 8 | build = "build.rs" 9 | resolver = "2" 10 | 11 | [features] 12 | full = ["functional_tests", "multitenant", "analytics", "geoblock", "cloud", "apns_tests", "fcm_tests", "fcmv1_tests"] 13 | # Used to enable functional tests 14 | functional_tests = [] 15 | # Multi-tenancy mode 16 | multitenant = [] 17 | # Enable analytics 18 | analytics = [] 19 | # Geoblocking 20 | geoblock = [] 21 | # Enable cloud app validations 22 | cloud = [] 23 | apns_tests = [] 24 | fcm_tests = [] 25 | fcmv1_tests = [] 26 | 27 | [dependencies] 28 | wc = { git = "https://github.com/WalletConnect/utils-rs.git", tag = "v0.11.1", features = ["full"] } 29 | 30 | tokio = { version = "1", features = ["full"] } 31 | axum = { version = "0.7.5", features = ["json", "multipart", "tokio"] } 32 | axum-client-ip = "0.5.1" 33 | tower = "0.4.13" 34 | tower-http = { version = "0.5.2", features = ["trace", "cors", "request-id", "propagate-header", "catch-panic", "util"] } 35 | hyper = "1.2.0" 36 | 37 | # Database 38 | sqlx = { version = "0.6", features = ["runtime-tokio-native-tls", "postgres", "json", "chrono", "macros"] } 39 | 40 | # Seralisation 41 | serde = { version = "1.0", features = ["derive"] } 42 | serde_json = "1.0" 43 | 44 | # Env Vars 45 | dotenv = "0.15" 46 | envy = "0.4" 47 | 48 | # Build-time info 49 | build-info = "0.0" 50 | 51 | # Logging 52 | tracing = "0.1" 53 | tracing-subscriber = { version = "0.3", features = ["env-filter", "parking_lot"] } 54 | tracing-appender = "0.2" 55 | tracing-opentelemetry = "0.18" 56 | atty = "0.2" 57 | 58 | # Push 59 | a2 = { version = "0.10.0", features = ["tracing", "openssl"] } 60 | fcm = "0.9" 61 | # fcm_v1 = { git = "https://github.com/rj76/fcm-rust.git", package = "fcm" } 62 | fcm_v1 = { git = "https://github.com/WalletConnect/fcm-rust.git", package = "fcm", branch = "feat/key-not-from-file", default-features = false, features = ["native-tls"] } # TODO use above version once released 63 | 64 | # Signature validation 65 | ed25519-dalek = "2.1.1" 66 | 67 | # JWT Authentication 68 | relay_rpc = { git = "https://github.com/WalletConnect/WalletConnectRust.git", tag = "v0.29.4" } 69 | jsonwebtoken = "8.1" 70 | data-encoding = "2.3" 71 | 72 | # Analytics 73 | aws-config = "1.1.9" 74 | aws-sdk-s3 = "1.21.0" 75 | parquet = { git = "https://github.com/WalletConnect/arrow-rs.git", rev = "99a1cc3", default-features = false, features = ["flate2"] } 76 | parquet_derive = { git = "https://github.com/WalletConnect/arrow-rs.git", rev = "99a1cc3" } 77 | 78 | # Misc 79 | reqwest = { version = "0.12.4", features = ["multipart", "json"] } 80 | async-trait = "0.1" 81 | thiserror = "1.0" 82 | hex = "0.4" 83 | base64 = "0.21" 84 | chrono = { version = "0.4", features = ["serde"] } 85 | uuid = { version = "1.2", features = ["v4"] } 86 | is-variant-derive = { path = "crates/is-variant-derive" } 87 | once_cell = "1.15" 88 | pnet_datalink = "0.31" 89 | ipnet = "2.5" 90 | tap = "1.0.1" 91 | wiremock = "0.6.0" 92 | moka = { version = "0.12", features = ["future"] } 93 | 94 | [dev-dependencies] 95 | serial_test = "1.0" 96 | test-context = "0.1" 97 | futures-util = "0.3" 98 | random-string = "1.0" 99 | rand = "0.8" 100 | ed25519-dalek = { version = "2.1.1", features = ["rand_core"] } 101 | 102 | [build-dependencies] 103 | build-info-build = "0.0" 104 | 105 | # [patch.'https://github.com/WalletConnect/fcm-rust.git'] 106 | # fcm = { path = "../fcm-rust" } 107 | 108 | # [patch.'https://github.com/WalletConnect/gauth-rs.git'] 109 | # gauth = { path = "../gauth-rs" } 110 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # Install cargo-chef 4 | # 5 | ################################################################################ 6 | FROM rust:buster AS chef 7 | 8 | WORKDIR /app 9 | RUN cargo install cargo-chef 10 | 11 | ################################################################################ 12 | # 13 | # Generate recipe file 14 | # 15 | ################################################################################ 16 | FROM chef AS plan 17 | 18 | WORKDIR /app 19 | COPY Cargo.lock Cargo.toml ./ 20 | COPY src ./src 21 | COPY crates ./crates 22 | RUN cargo chef prepare --recipe-path recipe.json 23 | 24 | ################################################################################ 25 | # 26 | # Build the binary 27 | # 28 | ################################################################################ 29 | FROM chef AS build 30 | 31 | ENV TINI_VERSION v0.19.0 32 | 33 | # This is a build requirement of `opentelemetry-otlp`. Once the new version 34 | # is rolled out, which no longer requires the `protoc`, we'll be able to 35 | # get rid of this. 36 | RUN apt-get update \ 37 | && apt-get install -y --no-install-recommends protobuf-compiler 38 | 39 | RUN apt-get update \ 40 | && apt-get install -y --no-install-recommends lld llvm 41 | 42 | WORKDIR /app 43 | # Cache dependencies 44 | COPY --from=plan /app/recipe.json recipe.json 45 | COPY --from=plan /app/crates ./crates 46 | 47 | # Install init to be used in runtime container 48 | ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-static /tini 49 | RUN chmod +x /tini 50 | 51 | RUN cargo chef cook --recipe-path recipe.json --release --features multitenant,analytics,cloud 52 | # Build the local binary 53 | COPY . . 54 | RUN cargo build --bin echo-server --release --features multitenant,analytics,cloud 55 | 56 | ################################################################################ 57 | # 58 | # Runtime image 59 | # 60 | ################################################################################ 61 | FROM debian:buster-slim AS runtime 62 | 63 | COPY --from=build /tini /tini 64 | 65 | WORKDIR /app 66 | COPY --from=build /app/target/release/echo-server /usr/local/bin/echo-server 67 | RUN apt-get update \ 68 | && apt-get install -y --no-install-recommends ca-certificates libssl-dev \ 69 | && apt-get clean \ 70 | && rm -rf /var/lib/apt/lists/* 71 | 72 | USER 1001:1001 73 | ENTRYPOINT ["/tini", "--"] 74 | CMD ["/usr/local/bin/echo-server"] 75 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Push Server 2 | Push server for the WalletConnect V2 Protocol 3 | 4 | ## Notification Providers 5 | This list contains both supported and potentially planned providers 6 | - [x] FCM (API Key) 7 | - [x] FCM V1 (Google Service Accounts) 8 | - [x] APNS (Certificate Based) 9 | - [x] APNS (Token Based) 10 | - [ ] Web Push 11 | 12 | ## Supporting Notifications 13 | > **Note** Full documentation will be available soon. This is only a brief overview. 14 | 15 | There are 3 options for receiving notifications within your wallet: 16 | 1. **Use the hosted platform.** 17 | Go to settings in the [cloud app](https://cloud.walletconnect.com) for a project and create a Push URL, see the documentation on Push 18 | prerequisites for more info. 19 | 2. Host this rust implementation. 20 | there is an included [`terraform`](https://github.com/WalletConnect/echo-server/tree/main/terraform) configuration to help with this. 21 | 3. Write your own implementation that follows the [spec](https://docs.walletconnect.com/2.0/specs/servers/echo/echo-server-api) 22 | 23 | When using the hosted platform or self-hosting this implementation you have to provide the instance 24 | you FCM API Key or APNS certificates and then - following the FCM/APNS docs - add support for that within your 25 | wallet. 26 | 27 | You also have to register the device with the instance of Echo Server once when the client_id is initially 28 | generated. By sending a POST request to `/clients` as per the [spec](./spec/spec.md). 29 | 30 | ## Multi-tenancy 31 | Echo Server supports multi-tenancy. To enable multi-tenancy you need to specify a `TENANT_DATABASE_URL` which will then disable 32 | the single-tenant endpoints in favour of endpoints with a `/:tenant_id` prefix e.g. `/:tenant_id/client/:id` 33 | 34 | > **Warning** 35 | > The `TENANT_DATABASE_URL` **must** point to a different database than the `DATABASE_URL` 36 | 37 | ## Running locally 38 | 39 | ``` 40 | docker compose -f docker-compose.storage.yml up -d 41 | just test-all 42 | ``` 43 | 44 | ## Running tests locally 45 | 46 | ``` 47 | yarn install 48 | yarn integration:dev # or yarn integration:staging 49 | ``` 50 | 51 | ## Deploying infrastructure 52 | 53 | ``` 54 | terraform -chdir=terraform init 55 | terraform -chdir=terraform workspace select dev/staging/prod 56 | terraform -chdir=terraform apply -var-file="vars/$(terraform -chdir=terraform workspace show).tfvars" 57 | ``` 58 | 59 | ## Using Images 60 | There are two Dockerfiles, one `Dockerfile` is used in production by the hosted platform at WalletConnect 61 | while `slim.Dockerfile` is a stripped down version with no features enabled i.e. Single Tenant 62 | 63 | ## Contact 64 | If you wish to integrate Push functionality into your Wallet (only available on v2), please contact us. 65 | 66 | ## Contributing 67 | To get started with contributing to Echo Server, look at the [open issues](https://github.com/WalletConnect/echo-server/issues?q=is:issue+is:open+label:%22help+wanted%22). 68 | New contributors can also look at the [issues labeled with "good first issue"](https://github.com/WalletConnect/echo-server/issues?q=is:issue+is:open+label:%22good+first+issue%22) 69 | as they should be suitable to people who are looking at the project for the first time. 70 | 71 | ## License 72 | Copyright 2022 WalletConnect, Inc. 73 | 74 | Licensed under the Apache License, Version 2.0 (the "License"); 75 | you may not use this file except in compliance with the License. 76 | You may obtain a copy of the License at 77 | 78 | http://www.apache.org/licenses/LICENSE-2.0 79 | 80 | Unless required by applicable law or agreed to in writing, software 81 | distributed under the License is distributed on an "AS IS" BASIS, 82 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 83 | See the License for the specific language governing permissions and 84 | limitations under the License. 85 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Release Guide 2 | 3 | This doc explains the Echo Server pipelines and how to create a release. 4 | 5 | > **Warning** 6 | > All commits **Must** follow the [conventional commit](https://www.conventionalcommits.org/en/v1.0.0/) 7 | > specification for the pipeline to continue working, if a commit ever doesn't follow this then git 8 | > history may be to be re-written! 9 | 10 | # Creating a new Release 11 | 12 | If you decide that we need to push the latest changes from `main` to our servers and as a release for 13 | downstream dependents, follow these steps: 14 | - Go to the actions tab 15 | - Select the `release` action from the sidebar 16 | - Click run workflow 17 | 18 | This will then run the [Release](#release) workflow which in turn triggers [CD](#cd) so that changes are 19 | deployed to our servers 20 | 21 | # Workflows 22 | 23 | ## Intake 24 | 25 | Adds `S-accepted` to issues opened by the WalletConnect core team and adds them to our boards 26 | 27 | ## Validate 28 | 29 | Runs integration tests against a specific environment - typically called by the [CD](#cd) action but 30 | can be run manually using `workflow_dispatch` 31 | 32 | ## CI Terraform 33 | 34 | Checks Terraform formatting and then runs plan, sends the plan as a comment on the PR so that reviewers can 35 | more clearly see what this PR changes with the infra 36 | 37 | ## CI 38 | 39 | Runs: 40 | - `cargo clippy` 41 | - `cargo +nightly fmt` 42 | - `cargo test` 43 | 44 | To check that all formatting is correct, clippy has be respected and that unit/integration tests are still 45 | passing 46 | 47 | ## Release 48 | 49 | Generates a changelog, bumps the version in the `cargo.lock` file, commits it and creates a new release. 50 | 51 | The new release triggers [CD](#cd), and while that starts we create docker containers and publish them to: 52 | - Internal ECR 53 | - `ghcr.io/walletconnect/echo-server` 54 | 55 | # CD 56 | 57 | Deploy changes to Staging infrastructure, then runs the [Validations](#validate), if they succeed we then 58 | deploy the same changes to Production -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | build_info_build::build_script(); 3 | } 4 | -------------------------------------------------------------------------------- /cog.toml: -------------------------------------------------------------------------------- 1 | pre_bump_hooks = [ 2 | "cargo check", 3 | ] 4 | 5 | tag_prefix = "v" -------------------------------------------------------------------------------- /crates/is-variant-derive/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /crates/is-variant-derive/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "convert_case" 7 | version = "0.6.0" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" 10 | dependencies = [ 11 | "unicode-segmentation", 12 | ] 13 | 14 | [[package]] 15 | name = "is-variant-derive" 16 | version = "0.1.0" 17 | dependencies = [ 18 | "convert_case", 19 | "proc-macro2", 20 | "quote", 21 | "syn", 22 | ] 23 | 24 | [[package]] 25 | name = "proc-macro2" 26 | version = "1.0.56" 27 | source = "registry+https://github.com/rust-lang/crates.io-index" 28 | checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" 29 | dependencies = [ 30 | "unicode-ident", 31 | ] 32 | 33 | [[package]] 34 | name = "quote" 35 | version = "1.0.26" 36 | source = "registry+https://github.com/rust-lang/crates.io-index" 37 | checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" 38 | dependencies = [ 39 | "proc-macro2", 40 | ] 41 | 42 | [[package]] 43 | name = "syn" 44 | version = "1.0.109" 45 | source = "registry+https://github.com/rust-lang/crates.io-index" 46 | checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" 47 | dependencies = [ 48 | "proc-macro2", 49 | "quote", 50 | "unicode-ident", 51 | ] 52 | 53 | [[package]] 54 | name = "unicode-ident" 55 | version = "1.0.8" 56 | source = "registry+https://github.com/rust-lang/crates.io-index" 57 | checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" 58 | 59 | [[package]] 60 | name = "unicode-segmentation" 61 | version = "1.10.1" 62 | source = "registry+https://github.com/rust-lang/crates.io-index" 63 | checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" 64 | -------------------------------------------------------------------------------- /crates/is-variant-derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "is-variant-derive" 3 | version = "0.1.0" 4 | edition = "2021" 5 | authors = [ 6 | "Harry Bairstow ", 7 | "Mihir Luthra" 8 | ] 9 | 10 | [lib] 11 | proc-macro = true 12 | 13 | [dependencies] 14 | syn = "1.0" 15 | quote = "1.0" 16 | proc-macro2 = "1.0" 17 | convert_case = "0.6" -------------------------------------------------------------------------------- /crates/is-variant-derive/README.md: -------------------------------------------------------------------------------- 1 | # is-variant-derive 2 | 3 | Used in testing to ensure that the correct errors are returned, 4 | from: **https://stackoverflow.com/a/65182902** 5 | 6 | > **Note** 7 | > Original Source is Avaliable here: https://github.com/MihirLuthra/is_variant -------------------------------------------------------------------------------- /crates/is-variant-derive/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate proc_macro; 2 | 3 | use proc_macro::TokenStream; 4 | use proc_macro2::{Span, TokenStream as TokenStream2}; 5 | 6 | use quote::{format_ident, quote, quote_spanned}; 7 | use syn::spanned::Spanned; 8 | use syn::{parse_macro_input, Data, DeriveInput, Error, Fields}; 9 | 10 | // https://crates.io/crates/convert_case 11 | use convert_case::{Case, Casing}; 12 | 13 | macro_rules! derive_error { 14 | ($string: tt) => { 15 | Error::new(Span::call_site(), $string) 16 | .to_compile_error() 17 | .into() 18 | }; 19 | } 20 | 21 | #[proc_macro_derive(IsVariant)] 22 | pub fn derive_is_variant(input: TokenStream) -> TokenStream { 23 | // See https://doc.servo.org/syn/derive/struct.DeriveInput.html 24 | let input: DeriveInput = parse_macro_input!(input as DeriveInput); 25 | 26 | // get enum name 27 | let ref name = input.ident; 28 | let ref data = input.data; 29 | 30 | let mut variant_checker_functions; 31 | 32 | // data is of type syn::Data 33 | // See https://doc.servo.org/syn/enum.Data.html 34 | match data { 35 | // Only if data is an enum, we do parsing 36 | Data::Enum(data_enum) => { 37 | 38 | // data_enum is of type syn::DataEnum 39 | // https://doc.servo.org/syn/struct.DataEnum.html 40 | 41 | variant_checker_functions = TokenStream2::new(); 42 | 43 | // Iterate over enum variants 44 | // `variants` if of type `Punctuated` which implements IntoIterator 45 | // 46 | // https://doc.servo.org/syn/punctuated/struct.Punctuated.html 47 | // https://doc.servo.org/syn/struct.Variant.html 48 | for variant in &data_enum.variants { 49 | 50 | // Variant's name 51 | let ref variant_name = variant.ident; 52 | 53 | // Variant can have unnamed fields like `Variant(i32, i64)` 54 | // Variant can have named fields like `Variant {x: i32, y: i32}` 55 | // Variant can be named Unit like `Variant` 56 | let fields_in_variant = match &variant.fields { 57 | Fields::Unnamed(_) => quote_spanned! {variant.span()=> (..) }, 58 | Fields::Unit => quote_spanned! { variant.span()=> }, 59 | Fields::Named(_) => quote_spanned! {variant.span()=> {..} }, 60 | }; 61 | 62 | // construct an identifier named is_ for function name 63 | // We convert it to snake case using `to_case(Case::Snake)` 64 | // For example, if variant is `HelloWorld`, it will generate `is_hello_world` 65 | let mut is_variant_func_name = 66 | format_ident!("is_{}", variant_name.to_string().to_case(Case::Snake)); 67 | is_variant_func_name.set_span(variant_name.span()); 68 | 69 | // Here we construct the function for the current variant 70 | variant_checker_functions.extend(quote_spanned! {variant.span()=> 71 | pub fn #is_variant_func_name(&self) -> bool { 72 | match self { 73 | #name::#variant_name #fields_in_variant => true, 74 | _ => false, 75 | } 76 | } 77 | }); 78 | 79 | // Above we are making a TokenStream using extend() 80 | // This is because TokenStream is an Iterator, 81 | // so we can keep extending it. 82 | // 83 | // proc_macro2::TokenStream:- https://docs.rs/proc-macro2/1.0.24/proc_macro2/struct.TokenStream.html 84 | 85 | // Read about 86 | // quote:- https://docs.rs/quote/1.0.7/quote/ 87 | // quote_spanned:- https://docs.rs/quote/1.0.7/quote/macro.quote_spanned.html 88 | // spans:- https://docs.rs/syn/1.0.54/syn/spanned/index.html 89 | } 90 | } 91 | _ => return derive_error!("IsVariant is only implemented for enums"), 92 | }; 93 | 94 | let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); 95 | 96 | let expanded = quote! { 97 | impl #impl_generics #name #ty_generics #where_clause { 98 | // variant_checker_functions gets replaced by all the functions 99 | // that were constructed above 100 | #variant_checker_functions 101 | } 102 | }; 103 | 104 | TokenStream::from(expanded) 105 | } -------------------------------------------------------------------------------- /docker-compose.multi-tenant.yml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | services: 3 | postgres: 4 | image: postgres 5 | environment: 6 | - POSTGRES_PASSWORD=root 7 | ports: 8 | - "5432:5432" 9 | 10 | postgres-tenant: 11 | image: postgres 12 | environment: 13 | - POSTGRES_PASSWORD=root 14 | ports: 15 | - "5433:5432" 16 | 17 | echo-server: 18 | build: . 19 | depends_on: 20 | - jaeger 21 | - postgres 22 | - postgres-tenant 23 | ports: 24 | - "3000:3000" 25 | - "3002:3002" 26 | environment: 27 | - PORT=3000 28 | - LOG_LEVEL=info,echo-server=info 29 | - DATABASE_URL=postgres://postgres:root@postgres:5432/postgres 30 | - TENANT_DATABASE_URL=postgres://postgres:root@postgres-tenant:5433/postgres 31 | - TELEMETRY_PROMETHEUS_PORT=3002 32 | -------------------------------------------------------------------------------- /docker-compose.storage.yml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | services: 3 | 4 | postgres: 5 | image: postgres 6 | environment: 7 | - POSTGRES_PASSWORD=root 8 | ports: 9 | - "5432:5432" 10 | 11 | postgres-tenant: 12 | image: postgres 13 | environment: 14 | - POSTGRES_PASSWORD=root 15 | ports: 16 | - "5433:5432" 17 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | services: 3 | postgres: 4 | image: postgres 5 | environment: 6 | - POSTGRES_PASSWORD=root 7 | 8 | echo-server: 9 | build: . 10 | depends_on: 11 | - jaeger 12 | - postgres 13 | ports: 14 | - "3000:3000" 15 | - "3002:3002" 16 | environment: 17 | - PORT=3000 18 | - LOG_LEVEL=info,echo-server=info 19 | - DATABASE_URL=postgres://postgres:root@postgres:5432/postgres 20 | - TELEMETRY_PROMETHEUS_PORT=3002 21 | -------------------------------------------------------------------------------- /integration/.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "semi": false, 4 | "trailingComma": "all", 5 | "tabWidth": 2, 6 | "printWidth": 80 7 | } 8 | -------------------------------------------------------------------------------- /integration/integration.test.ts: -------------------------------------------------------------------------------- 1 | import axios from 'axios' 2 | 3 | declare let process: { 4 | env: { 5 | JEST_ENV: string, 6 | TEST_TENANT_ID: string, 7 | } 8 | } 9 | 10 | const BASE_URLS = new Map([ 11 | ['prod', 'https://echo.walletconnect.com'], 12 | ['staging', 'https://staging.echo.walletconnect.com'], 13 | ['dev', 'http://localhost:3000'], 14 | ]) 15 | 16 | const TEST_TENANT = process.env.TEST_TENANT_ID 17 | 18 | const BASE_URL = BASE_URLS.get(process.env.JEST_ENV) 19 | 20 | describe('Echo Server', () => { 21 | describe('Health', () => { 22 | const url = `${BASE_URL}/health` 23 | 24 | it('is healthy', async () => { 25 | const { status } = await axios.get(`${url}`) 26 | 27 | expect(status).toBe(200) 28 | }) 29 | }) 30 | describe('APNS Client Registration', () => { 31 | const url = `${BASE_URL}/${TEST_TENANT}/clients` 32 | 33 | it('registers a client', async () => { 34 | const { status, data } = await axios.post( 35 | `${url}`, 36 | { 37 | client_id: Math.random().toString(36).substr(2, 5), 38 | type: 'apns', 39 | token: Math.random().toString(36).substr(2, 5), 40 | }, 41 | { 42 | headers: { 43 | 'content-type': 'application/json', 44 | }, 45 | }, 46 | ) 47 | 48 | expect(status).toBe(200) 49 | }) 50 | }) 51 | describe('FCM Client Registration', () => { 52 | const url = `${BASE_URL}/${TEST_TENANT}/clients` 53 | 54 | it('registers a client', async () => { 55 | const { status, data } = await axios.post( 56 | `${url}`, 57 | { 58 | client_id: Math.random().toString(36).substr(2, 5), 59 | type: 'fcm', 60 | token: Math.random().toString(36).substr(2, 5), 61 | }, 62 | { 63 | headers: { 64 | 'content-type': 'application/json', 65 | }, 66 | }, 67 | ) 68 | 69 | expect(status).toBe(200) 70 | }) 71 | }) 72 | describe('Middlewares', () => { 73 | const httpClient = axios.create({ 74 | validateStatus: (_status) => true, 75 | }) 76 | 77 | // Simulate flood of requests and check for rate-limited responses 78 | it('Rate limiting', async () => { 79 | const url = `${BASE_URL}/rate_limit_test` 80 | // x2.5 of the rate limit 81 | const requests_to_send = 250; 82 | const promises = []; 83 | for (let i = 0; i < requests_to_send; i++) { 84 | promises.push( 85 | httpClient.get(url) 86 | ); 87 | } 88 | const results = await Promise.allSettled(promises); 89 | 90 | let ok_statuses_counter = 0; 91 | let rate_limited_statuses_counter = 0; 92 | results.forEach((result) => { 93 | if (result.status === 'fulfilled' && result.value.status === 429) { 94 | rate_limited_statuses_counter++; 95 | }else if (result.status === 'fulfilled' && result.value.status === 200) { 96 | ok_statuses_counter++; 97 | } 98 | }); 99 | 100 | console.log(`➜ Rate limited statuses: ${rate_limited_statuses_counter} out of ${requests_to_send} total requests.`); 101 | // Check if there are any successful and rate limited statuses 102 | expect(ok_statuses_counter).toBeGreaterThan(0); 103 | expect(rate_limited_statuses_counter).toBeGreaterThan(0); 104 | }) 105 | }) 106 | }) 107 | -------------------------------------------------------------------------------- /integration/jestconfig.integration.json: -------------------------------------------------------------------------------- 1 | { 2 | "setupFilesAfterEnv": [], 3 | "transform": { 4 | "^.+\\.(t|j)sx?$": "ts-jest" 5 | }, 6 | "testRegex": "./.*\\.test\\.ts$", 7 | "collectCoverageFrom": ["src/**/*.{ts,js}"] 8 | } 9 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | set dotenv-load 2 | 3 | lint: clippy fmt 4 | 5 | unit: lint test test-all test-single-tenant lint-tf 6 | 7 | devloop: unit fmt-imports 8 | 9 | test := "" 10 | 11 | test: 12 | RUST_BACKTRACE=1 cargo test --all-targets -- {{test}} 13 | 14 | test-all: 15 | RUST_BACKTRACE=1 cargo test --all-targets --features=multitenant,analytics,geoblock,functional_tests -- {{test}} 16 | 17 | test-all-providers: 18 | RUST_BACKTRACE=1 cargo test --all-targets --features=multitenant,analytics,geoblock,functional_tests,apns_tests,fcm_tests,fcmv1_tests -- {{test}} 19 | 20 | test-single-tenant: 21 | RUST_BACKTRACE=1 cargo test --features=functional_tests -- {{test}} 22 | 23 | clippy: 24 | #!/bin/bash 25 | set -euo pipefail 26 | 27 | if command -v cargo-clippy >/dev/null; then 28 | echo '==> Running clippy' 29 | cargo clippy --all-features --tests -- -D warnings 30 | else 31 | echo '==> clippy not found in PATH, skipping' 32 | fi 33 | 34 | fmt: 35 | #!/bin/bash 36 | set -euo pipefail 37 | 38 | if command -v cargo-fmt >/dev/null; then 39 | echo '==> Running rustfmt' 40 | cargo fmt 41 | else 42 | echo '==> rustfmt not found in PATH, skipping' 43 | fi 44 | 45 | if command -v terraform -version >/dev/null; then 46 | echo '==> Running terraform fmt' 47 | terraform -chdir=terraform fmt -recursive 48 | else 49 | echo '==> terraform not found in PATH, skipping' 50 | fi 51 | 52 | fmt-imports: 53 | #!/bin/bash 54 | set -euo pipefail 55 | 56 | if command -v cargo-fmt >/dev/null; then 57 | echo '==> Running rustfmt' 58 | cargo +nightly fmt -- --config group_imports=StdExternalCrate,imports_granularity=One 59 | else 60 | echo '==> rustfmt not found in PATH, skipping' 61 | fi 62 | 63 | lint-tf: tf-validate tf-fmt tfsec tflint tfdocs 64 | 65 | tf-fmt: 66 | #!/bin/bash 67 | set -euo pipefail 68 | 69 | if command -v terraform >/dev/null; then 70 | echo '==> Running terraform fmt' 71 | terraform -chdir=terraform fmt -recursive 72 | else 73 | echo '==> Terraform not found in PATH, skipping' 74 | fi 75 | 76 | tf-validate: 77 | #!/bin/bash 78 | set -euo pipefail 79 | 80 | if command -v terraform >/dev/null; then 81 | echo '==> Running terraform fmt' 82 | terraform -chdir=terraform validate 83 | else 84 | echo '==> Terraform not found in PATH, skipping' 85 | fi 86 | 87 | tfsec: 88 | #!/bin/bash 89 | set -euo pipefail 90 | 91 | if command -v tfsec >/dev/null; then 92 | echo '==> Running tfsec' 93 | cd terraform 94 | tfsec 95 | else 96 | echo '==> tfsec not found in PATH, skipping' 97 | fi 98 | 99 | tflint: 100 | #!/bin/bash 101 | set -euo pipefail 102 | 103 | if command -v tflint >/dev/null; then 104 | echo '==> Running tflint' 105 | cd terraform; tflint 106 | cd ecs; tflint 107 | cd ../monitoring; tflint 108 | cd ../private_zone; tflint 109 | cd ../redis; tflint 110 | 111 | else 112 | echo '==> tflint not found in PATH, skipping' 113 | fi 114 | 115 | tfdocs: 116 | #!/bin/bash 117 | set -euo pipefail 118 | 119 | if command -v terraform-docs >/dev/null; then 120 | echo '==> Running terraform-docs' 121 | terraform-docs terraform 122 | else 123 | echo '==> terraform-docs not found in PATH, skipping' 124 | fi 125 | -------------------------------------------------------------------------------- /migrations/1664117744_init.sql: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA IF NOT EXISTS public; -------------------------------------------------------------------------------- /migrations/1664117746_create-clients.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE public.provider AS ENUM ('fcm', 'apns', 'noop'); 2 | 3 | CREATE TABLE IF NOT EXISTS public.clients 4 | ( 5 | id varchar(255) primary key default gen_random_uuid(), 6 | 7 | push_type public.provider not null, 8 | device_token text not null, 9 | 10 | created_at timestamptz not null default now() 11 | ); -------------------------------------------------------------------------------- /migrations/1664117751_create-notifications.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS public.notifications 2 | ( 3 | id varchar(255) primary key, 4 | client_id varchar(255) not null, 5 | 6 | last_payload jsonb not null default '{}'::jsonb, 7 | previous_payloads jsonb[] not null default array []::jsonb[], 8 | 9 | last_received_at timestamptz not null default now(), 10 | created_at timestamptz not null default now(), 11 | 12 | CONSTRAINT fk_notifications_client_id FOREIGN KEY (client_id) 13 | REFERENCES public.clients (id) 14 | ); -------------------------------------------------------------------------------- /migrations/1667510128_add-tenant-id.sql: -------------------------------------------------------------------------------- 1 | alter table public.clients 2 | add tenant_id varchar(255) not null default '0000-0000-0000-0000'; 3 | 4 | alter table public.notifications 5 | add tenant_id varchar(255) not null default '0000-0000-0000-0000'; 6 | -------------------------------------------------------------------------------- /migrations/1675269994_add-sandbox-to-enum.sql: -------------------------------------------------------------------------------- 1 | ALTER TYPE public.provider ADD VALUE 'apnssandbox'; 2 | -------------------------------------------------------------------------------- /migrations/1695381202_alter_notifications_constraint.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE public.notifications 2 | DROP CONSTRAINT fk_notifications_client_id, 3 | ADD CONSTRAINT fk_notifications_client_id 4 | FOREIGN KEY (client_id) 5 | REFERENCES public.clients (id) 6 | ON DELETE CASCADE; 7 | -------------------------------------------------------------------------------- /migrations/1695631804_add-unique-device-tokens.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM public.clients 2 | WHERE (device_token, created_at) NOT IN 3 | ( 4 | SELECT device_token, MAX(created_at) 5 | FROM public.clients 6 | GROUP BY device_token 7 | ); 8 | ALTER TABLE public.clients 9 | ADD CONSTRAINT device_token_unique UNIQUE(device_token); 10 | -------------------------------------------------------------------------------- /migrations/1699887339_notifications-primary-key-includes-client-id.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE public.notifications 2 | DROP CONSTRAINT notifications_pkey; 3 | 4 | ALTER TABLE public.notifications 5 | ALTER COLUMN id SET NOT NULL; 6 | 7 | ALTER TABLE public.notifications 8 | ADD PRIMARY KEY (id, client_id); 9 | -------------------------------------------------------------------------------- /migrations/1699982551_add_always_raw_to_clients.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE public.clients 2 | ADD COLUMN always_raw BOOLEAN DEFAULT FALSE; 3 | -------------------------------------------------------------------------------- /migrations/README.md: -------------------------------------------------------------------------------- 1 | # Migrations 2 | 3 | This folder contains migrations for Echo Server and they are automatically called on start-up. 4 | 5 | ## Format 6 | ``` 7 | {unix timestamp}_{description}.sql 8 | ``` 9 | 10 | ## Contributors 11 | To create a new migration run `./new.sh [description]` to make a new migration 12 | -------------------------------------------------------------------------------- /migrations/new.sh: -------------------------------------------------------------------------------- 1 | DESCRIPTION=$1 2 | touch "./$(date +%s)_$DESCRIPTION.sql" -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "echo-server-integration-tests", 3 | "version": "1.0.0", 4 | "description": "Integration tests for echo server", 5 | "scripts": { 6 | "integration:prod": "JEST_ENV=prod jest --config integration/jestconfig.integration.json --verbose", 7 | "integration:staging": "JEST_ENV=staging jest --config integration/jestconfig.integration.json --verbose", 8 | "integration:dev": "JEST_ENV=dev jest --config integration/jestconfig.integration.json --verbose", 9 | "format": "prettier --config integration/.prettierrc --write '*.{json,js}' 'integration/**/*.{js,ts}' '.github/**/*.{yml,yaml}'", 10 | "lint": "eslint --max-warnings=0 integration && prettier --config integration/.prettierrc --check '*.{json,js}' 'integration/**/*.{js,ts}'" 11 | }, 12 | "author": "WalletConnect, Inc. ", 13 | "license": "Apache-2.0", 14 | "eslintConfig": { 15 | "root": true, 16 | "extends": [ 17 | "typescript", 18 | "prettier" 19 | ] 20 | }, 21 | "devDependencies": { 22 | "@types/jest": "^26.0.23", 23 | "@typescript-eslint/eslint-plugin": "^4.16.1", 24 | "@typescript-eslint/parser": "^4.16.1", 25 | "axios": "^0.27.2", 26 | "eslint": "^7.21.0", 27 | "eslint-config-prettier": "^8.1.0", 28 | "eslint-config-typescript": "^3.0.0", 29 | "jest": "^27.0.1", 30 | "prettier": "^2.3.0", 31 | "ts-jest": "^27.0.1", 32 | "ts-loader": "^9.2.2", 33 | "typescript": "^4.3.2" 34 | }, 35 | "dependencies": {} 36 | } 37 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | 3 | reorder_imports = true 4 | use_try_shorthand = true 5 | remove_nested_parens = true 6 | reorder_modules = true 7 | use_field_init_shorthand = true 8 | 9 | ## We only use settings available in the stable channel 10 | 11 | #fn_single_line = false 12 | #format_code_in_doc_comments = true 13 | #format_strings = true 14 | #imports_layout = "HorizontalVertical" 15 | #imports_granularity = "One" 16 | #normalize_comments = true 17 | #normalize_doc_attributes = true 18 | #reorder_impl_items = true 19 | #group_imports = "StdExternalCrate" 20 | #wrap_comments = true 21 | #overflow_delimited_expr = true 22 | #unstable_features = true 23 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | let 2 | nixpkgs = import {}; 3 | in 4 | with nixpkgs; 5 | stdenv.mkDerivation { 6 | name = "echo-server"; 7 | buildInputs = [ 8 | cargo 9 | rustc 10 | pkgconfig 11 | openssl.dev 12 | nix 13 | protobuf 14 | ]; 15 | OPENSSL_DEV=openssl.dev; 16 | } -------------------------------------------------------------------------------- /slim.Dockerfile: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # Install cargo-chef 4 | # 5 | ################################################################################ 6 | FROM rust:buster AS chef 7 | 8 | WORKDIR /app 9 | RUN cargo install cargo-chef 10 | 11 | ################################################################################ 12 | # 13 | # Generate recipe file 14 | # 15 | ################################################################################ 16 | FROM chef AS plan 17 | 18 | WORKDIR /app 19 | COPY Cargo.lock Cargo.toml ./ 20 | COPY src ./src 21 | COPY crates ./crates 22 | RUN cargo chef prepare --recipe-path recipe.json 23 | 24 | ################################################################################ 25 | # 26 | # Build the binary 27 | # 28 | ################################################################################ 29 | FROM chef AS build 30 | 31 | ENV TINI_VERSION v0.19.0 32 | 33 | # This is a build requirement of `opentelemetry-otlp`. Once the new version 34 | # is rolled out, which no longer requires the `protoc`, we'll be able to 35 | # get rid of this. 36 | RUN apt-get update \ 37 | && apt-get install -y --no-install-recommends protobuf-compiler 38 | 39 | RUN apt-get update \ 40 | && apt-get install -y --no-install-recommends lld llvm 41 | 42 | WORKDIR /app 43 | # Cache dependencies 44 | COPY --from=plan /app/recipe.json recipe.json 45 | COPY --from=plan /app/crates ./crates 46 | 47 | # Install init to be used in runtime container 48 | ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-static /tini 49 | RUN chmod +x /tini 50 | 51 | RUN cargo chef cook --recipe-path recipe.json --release 52 | # Build the local binary 53 | COPY . . 54 | RUN cargo build --bin echo-server --release 55 | 56 | ################################################################################ 57 | # 58 | # Runtime image 59 | # 60 | ################################################################################ 61 | FROM debian:buster-slim AS runtime 62 | 63 | COPY --from=build /tini /tini 64 | 65 | WORKDIR /app 66 | COPY --from=build /app/target/release/echo-server /usr/local/bin/echo-server 67 | RUN apt-get update \ 68 | && apt-get install -y --no-install-recommends ca-certificates libssl-dev \ 69 | && apt-get clean \ 70 | && rm -rf /var/lib/apt/lists/* 71 | 72 | USER 1001:1001 73 | ENTRYPOINT ["/tini", "--"] 74 | CMD ["/usr/local/bin/echo-server"] 75 | -------------------------------------------------------------------------------- /src/analytics/client_info.rs: -------------------------------------------------------------------------------- 1 | use {parquet_derive::ParquetRecordWriter, serde::Serialize, std::sync::Arc}; 2 | 3 | #[derive(Debug, Clone, Serialize, ParquetRecordWriter)] 4 | #[serde(rename_all = "camelCase")] 5 | pub struct ClientInfo { 6 | pub region: Option>, 7 | pub country: Option>, 8 | pub continent: Option>, 9 | pub project_id: Arc, 10 | pub client_id: Arc, 11 | pub push_provider: Arc, 12 | pub always_raw: bool, 13 | pub registered_at: chrono::NaiveDateTime, 14 | } 15 | -------------------------------------------------------------------------------- /src/analytics/message_info.rs: -------------------------------------------------------------------------------- 1 | use {parquet_derive::ParquetRecordWriter, serde::Serialize, std::sync::Arc}; 2 | 3 | #[derive(Debug, Clone, Serialize, ParquetRecordWriter)] 4 | #[serde(rename_all = "camelCase")] 5 | pub struct MessageInfo { 6 | pub msg_id: Arc, 7 | pub region: Option>, 8 | pub country: Option>, 9 | pub continent: Option>, 10 | pub project_id: Arc, 11 | pub client_id: Arc, 12 | pub topic: Arc, 13 | pub push_provider: Arc, 14 | pub always_raw: Option, 15 | pub tag: Option, 16 | pub encrypted: Option, 17 | pub flags: Option, 18 | pub status: u16, 19 | pub response_message: Option>, 20 | pub received_at: chrono::NaiveDateTime, 21 | } 22 | -------------------------------------------------------------------------------- /src/blob.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::error::{Error, Result}, 3 | base64::Engine as _, 4 | serde::{Deserialize, Serialize}, 5 | }; 6 | 7 | pub type Flag = u32; 8 | pub const ENCRYPTED_FLAG: Flag = 1 << 0; 9 | 10 | #[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)] 11 | pub struct DecryptedPayloadBlob { 12 | pub title: String, 13 | pub body: String, 14 | pub image: Option, 15 | pub url: Option, 16 | } 17 | 18 | impl DecryptedPayloadBlob { 19 | pub fn from_base64_encoded(blob_string: &str) -> Result { 20 | let blob_decoded = base64::engine::general_purpose::STANDARD 21 | .decode(blob_string) 22 | .map_err(Error::DecryptedNotificationDecode)?; 23 | serde_json::from_slice(&blob_decoded).map_err(Error::DecryptedNotificationParse) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /src/handlers/create_tenant.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::Error, handlers::validate_tenant_request, increment_counter, log::prelude::*, 4 | state::AppState, stores::tenant::TenantUpdateParams, 5 | }, 6 | axum::{extract::State, http::HeaderMap, Json}, 7 | serde::{Deserialize, Serialize}, 8 | std::sync::Arc, 9 | tracing::instrument, 10 | }; 11 | 12 | #[derive(Serialize, Deserialize)] 13 | pub struct TenantRegisterBody { 14 | /// The project ID 15 | pub id: String, 16 | } 17 | 18 | #[derive(Serialize)] 19 | pub struct TenantRegisterResponse { 20 | /// The generated tenant url for the specified project id 21 | pub url: String, 22 | } 23 | 24 | #[instrument(skip_all, name = "create_tenant_handler")] 25 | pub async fn handler( 26 | State(state): State>, 27 | headers: HeaderMap, 28 | Json(body): Json, 29 | ) -> Result, Error> { 30 | #[cfg(feature = "cloud")] 31 | if let Err(e) = validate_tenant_request(&state.jwt_validation_client, &headers, &body.id).await 32 | { 33 | error!( 34 | tenant_id = %body.id, 35 | err = ?e, 36 | "JWT verification failed" 37 | ); 38 | return Err(e); 39 | } 40 | 41 | #[cfg(not(feature = "cloud"))] 42 | if let Err(e) = validate_tenant_request(&state.jwt_validation_client, &headers) { 43 | error!( 44 | tenant_id = %body.id, 45 | err = ?e, 46 | "JWT verification failed" 47 | ); 48 | return Err(e); 49 | } 50 | 51 | let params = TenantUpdateParams { id: body.id }; 52 | 53 | let tenant = state.tenant_store.create_tenant(params).await?; 54 | 55 | increment_counter!(state.metrics, registered_tenants); 56 | 57 | debug!( 58 | tenant_id = %tenant.id, 59 | "new tenant" 60 | ); 61 | 62 | Ok(Json(TenantRegisterResponse { 63 | url: format!("{}/{}", state.config.public_url, tenant.id), 64 | })) 65 | } 66 | -------------------------------------------------------------------------------- /src/handlers/delete_apns.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::Error::{self}, 4 | handlers::validate_tenant_request, 5 | increment_counter, 6 | state::AppState, 7 | }, 8 | axum::{ 9 | extract::{Path, State}, 10 | http::HeaderMap, 11 | }, 12 | hyper::StatusCode, 13 | std::sync::Arc, 14 | tracing::{error, instrument}, 15 | }; 16 | 17 | #[instrument(skip_all, name = "delete_apns_handler")] 18 | pub async fn handler( 19 | State(state): State>, 20 | Path(id): Path, 21 | headers: HeaderMap, 22 | ) -> Result { 23 | // JWT verification 24 | #[cfg(feature = "cloud")] 25 | let jwt_verification_result = 26 | validate_tenant_request(&state.jwt_validation_client, &headers, &id).await; 27 | 28 | #[cfg(not(feature = "cloud"))] 29 | let jwt_verification_result = validate_tenant_request(&state.jwt_validation_client, &headers); 30 | 31 | if let Err(e) = jwt_verification_result { 32 | error!( 33 | tenant_id = %id, 34 | err = ?e, 35 | "JWT verification failed" 36 | ); 37 | return Err(e); 38 | } 39 | 40 | // Ensure tenant real 41 | let _existing_tenant = state.tenant_store.get_tenant(&id).await?; 42 | 43 | let new_tenant = state.tenant_store.update_tenant_delete_apns(&id).await?; 44 | 45 | if new_tenant.suspended { 46 | // If suspended, it can be restored now because valid credentials have been 47 | // provided 48 | state.tenant_store.unsuspend_tenant(&new_tenant.id).await?; 49 | } 50 | 51 | increment_counter!(state.metrics, tenant_apns_updates); 52 | 53 | Ok(StatusCode::NO_CONTENT) 54 | } 55 | -------------------------------------------------------------------------------- /src/handlers/delete_client.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::{Error::InvalidAuthentication, Result}, 4 | handlers::{authenticate_client, Response, DECENTRALIZED_IDENTIFIER_PREFIX}, 5 | log::prelude::*, 6 | state::AppState, 7 | }, 8 | axum::{ 9 | extract::{Path, State as StateExtractor}, 10 | http::HeaderMap, 11 | }, 12 | relay_rpc::domain::ClientId, 13 | std::sync::Arc, 14 | tracing::instrument, 15 | }; 16 | 17 | #[instrument(skip_all, name = "delete_client_handler")] 18 | pub async fn handler( 19 | Path((tenant_id, id)): Path<(String, String)>, 20 | StateExtractor(state): StateExtractor>, 21 | headers: HeaderMap, 22 | ) -> Result { 23 | let id = id 24 | .trim_start_matches(DECENTRALIZED_IDENTIFIER_PREFIX) 25 | .to_string(); 26 | 27 | let client_to_be_deleted = ClientId::new(id.clone().into()); 28 | if !authenticate_client(headers, &state.config.public_url, |client_id| { 29 | if let Some(client_id) = client_id { 30 | debug!( 31 | %tenant_id, 32 | requested_client_id = %client_to_be_deleted, 33 | token_client_id = %client_id, 34 | "client_id authentication checking" 35 | ); 36 | client_id == client_to_be_deleted 37 | } else { 38 | debug!( 39 | %tenant_id, 40 | requested_client_id = %client_to_be_deleted, 41 | token_client_id = "unknown", 42 | "client_id verification failed: missing client_id" 43 | ); 44 | false 45 | } 46 | })? { 47 | debug!( 48 | %tenant_id, 49 | requested_client_id = %client_to_be_deleted, 50 | token_client_id = "unknown", 51 | "client_id verification failed: invalid client_id" 52 | ); 53 | return Err(InvalidAuthentication); 54 | } 55 | 56 | state.client_store.delete_client(&tenant_id, &id).await?; 57 | debug!("client ({}) deleted for tenant ({})", id, tenant_id); 58 | 59 | debug!( 60 | %tenant_id, 61 | client_id = %client_to_be_deleted, 62 | "deleted client" 63 | ); 64 | 65 | Ok(Response::default()) 66 | } 67 | -------------------------------------------------------------------------------- /src/handlers/delete_fcm.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::Error::{self}, 4 | handlers::validate_tenant_request, 5 | increment_counter, 6 | state::AppState, 7 | }, 8 | axum::{ 9 | extract::{Path, State}, 10 | http::HeaderMap, 11 | }, 12 | hyper::StatusCode, 13 | std::sync::Arc, 14 | tracing::{error, instrument}, 15 | }; 16 | 17 | #[instrument(skip_all, name = "delete_fcm_handler")] 18 | pub async fn handler( 19 | State(state): State>, 20 | Path(id): Path, 21 | headers: HeaderMap, 22 | ) -> Result { 23 | // JWT token verification 24 | #[cfg(feature = "cloud")] 25 | let jwt_verification_result = 26 | validate_tenant_request(&state.jwt_validation_client, &headers, &id).await; 27 | 28 | #[cfg(not(feature = "cloud"))] 29 | let jwt_verification_result = validate_tenant_request(&state.jwt_validation_client, &headers); 30 | 31 | if let Err(e) = jwt_verification_result { 32 | error!( 33 | tenant_id = %id, 34 | err = ?e, 35 | "JWT verification failed" 36 | ); 37 | return Err(e); 38 | } 39 | 40 | // -- check if tenant is real 41 | let _existing_tenant = state.tenant_store.get_tenant(&id).await?; 42 | 43 | let new_tenant = state.tenant_store.update_tenant_delete_fcm(&id).await?; 44 | 45 | if new_tenant.suspended { 46 | // If suspended, it can be restored now because valid credentials have been 47 | // provided 48 | state.tenant_store.unsuspend_tenant(&new_tenant.id).await?; 49 | } 50 | 51 | increment_counter!(state.metrics, tenant_fcm_updates); 52 | 53 | Ok(StatusCode::NO_CONTENT) 54 | } 55 | -------------------------------------------------------------------------------- /src/handlers/delete_fcm_v1.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::Error::{self}, 4 | handlers::validate_tenant_request, 5 | increment_counter, 6 | state::AppState, 7 | }, 8 | axum::{ 9 | extract::{Path, State}, 10 | http::HeaderMap, 11 | }, 12 | hyper::StatusCode, 13 | std::sync::Arc, 14 | tracing::{debug, error, instrument}, 15 | }; 16 | 17 | #[instrument(skip_all, name = "delete_fcm_v1_handler")] 18 | pub async fn handler( 19 | State(state): State>, 20 | Path(id): Path, 21 | headers: HeaderMap, 22 | ) -> Result { 23 | // JWT token verification 24 | #[cfg(feature = "cloud")] 25 | let jwt_verification_result = 26 | validate_tenant_request(&state.jwt_validation_client, &headers, &id).await; 27 | 28 | // -- check if tenant is real 29 | let _existing_tenant = state.tenant_store.get_tenant(&id).await?; 30 | 31 | #[cfg(not(feature = "cloud"))] 32 | let jwt_verification_result = validate_tenant_request(&state.jwt_validation_client, &headers); 33 | 34 | if let Err(e) = jwt_verification_result { 35 | error!( 36 | tenant_id = %id, 37 | err = ?e, 38 | "JWT verification failed" 39 | ); 40 | return Err(e); 41 | } 42 | 43 | let new_tenant = state.tenant_store.update_tenant_delete_fcm_v1(&id).await?; 44 | 45 | if new_tenant.suspended { 46 | // If suspended, it can be restored now because valid credentials have been 47 | // provided 48 | state.tenant_store.unsuspend_tenant(&new_tenant.id).await?; 49 | } 50 | 51 | increment_counter!(state.metrics, tenant_fcm_v1_updates); 52 | 53 | Ok(StatusCode::NO_CONTENT) 54 | } 55 | -------------------------------------------------------------------------------- /src/handlers/delete_tenant.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{error::Error, handlers::validate_tenant_request, log::prelude::*, state::AppState}, 3 | axum::{ 4 | extract::{Path, State}, 5 | http::HeaderMap, 6 | Json, 7 | }, 8 | serde::Serialize, 9 | std::sync::Arc, 10 | tracing::instrument, 11 | }; 12 | 13 | #[derive(Serialize)] 14 | pub struct DeleteTenantResponse { 15 | success: bool, 16 | } 17 | 18 | #[instrument(skip_all, name = "delete_tenant_handler")] 19 | pub async fn handler( 20 | State(state): State>, 21 | Path(id): Path, 22 | headers: HeaderMap, 23 | ) -> Result, Error> { 24 | #[cfg(feature = "cloud")] 25 | let verification_res = 26 | validate_tenant_request(&state.jwt_validation_client, &headers, &id).await; 27 | 28 | #[cfg(not(feature = "cloud"))] 29 | let verification_res = validate_tenant_request(&state.jwt_validation_client, &headers); 30 | 31 | if let Err(e) = verification_res { 32 | error!( 33 | tenant_id = %id, 34 | err = ?e, 35 | "JWT verification failed" 36 | ); 37 | return Err(e); 38 | } 39 | 40 | state.tenant_store.delete_tenant(&id).await?; 41 | 42 | debug!( 43 | tenant_id = %id, 44 | "deleted tenant" 45 | ); 46 | 47 | Ok(Json(DeleteTenantResponse { success: true })) 48 | } 49 | -------------------------------------------------------------------------------- /src/handlers/get_tenant.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::Error, 4 | handlers::validate_tenant_request, 5 | log::prelude::*, 6 | providers::{ProviderKind, PROVIDER_FCM_V1}, 7 | state::AppState, 8 | stores::tenant::ApnsType, 9 | }, 10 | axum::{ 11 | extract::{Path, State}, 12 | http::HeaderMap, 13 | Json, 14 | }, 15 | serde::{Deserialize, Serialize}, 16 | std::sync::Arc, 17 | tracing::instrument, 18 | }; 19 | 20 | #[derive(Serialize, Deserialize, Debug)] 21 | pub struct GetTenantResponse { 22 | pub url: String, 23 | pub enabled_providers: Vec, 24 | pub apns_topic: Option, 25 | pub apns_type: Option, 26 | pub suspended: bool, 27 | pub suspended_reason: Option, 28 | } 29 | 30 | #[instrument(skip_all, name = "get_tenant_handler")] 31 | pub async fn handler( 32 | State(state): State>, 33 | Path(id): Path, 34 | headers: HeaderMap, 35 | ) -> Result, Error> { 36 | #[cfg(feature = "cloud")] 37 | let verification_res = 38 | validate_tenant_request(&state.jwt_validation_client, &headers, &id).await; 39 | 40 | #[cfg(not(feature = "cloud"))] 41 | let verification_res = validate_tenant_request(&state.jwt_validation_client, &headers); 42 | 43 | if let Err(e) = verification_res { 44 | error!( 45 | tenant_id = %id, 46 | err = ?e, 47 | "JWT verification failed" 48 | ); 49 | return Err(e); 50 | } 51 | 52 | let tenant = state.tenant_store.get_tenant(&id).await?; 53 | 54 | let providers = tenant.providers(); 55 | 56 | let mut res = GetTenantResponse { 57 | url: format!("{}/{}", state.config.public_url, tenant.id), 58 | enabled_providers: tenant 59 | .providers() 60 | .iter() 61 | .map(Into::into) 62 | // Special case on fcm_v1 for credentials because providers() is also used for token management (of which FCM and FCM V1 tokens are the same) 63 | .chain(if tenant.fcm_v1_credentials.is_some() { 64 | vec![PROVIDER_FCM_V1.to_string()] 65 | } else { 66 | vec![] 67 | }) 68 | .collect(), 69 | apns_topic: None, 70 | apns_type: None, 71 | suspended: tenant.suspended, 72 | suspended_reason: tenant.suspended_reason, 73 | }; 74 | 75 | if providers.contains(&ProviderKind::Apns) { 76 | res.apns_topic = tenant.apns_topic; 77 | res.apns_type = tenant.apns_type; 78 | } 79 | 80 | debug!( 81 | tenant_id = %id, 82 | "requested tenant" 83 | ); 84 | 85 | Ok(Json(res)) 86 | } 87 | -------------------------------------------------------------------------------- /src/handlers/health.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::state::AppState, 3 | axum::{extract::State as ExtractState, http::StatusCode, response::IntoResponse}, 4 | std::sync::Arc, 5 | }; 6 | 7 | pub async fn handler(ExtractState(state): ExtractState>) -> impl IntoResponse { 8 | let build_commit = match state.build_info.version_control.clone() { 9 | Some(v) => v.git().unwrap().commit_short_id.clone(), 10 | None => String::new(), 11 | }; 12 | ( 13 | StatusCode::OK, 14 | format!( 15 | "OK v{}, commit hash: {}, features: {:?}, instance id: {}, uptime: {} seconds", 16 | state.build_info.crate_info.version, 17 | build_commit, 18 | state.build_info.crate_info.enabled_features, 19 | state.instance_id, 20 | state.uptime.elapsed().as_secs(), 21 | ), 22 | ) 23 | } 24 | -------------------------------------------------------------------------------- /src/handlers/metrics.rs: -------------------------------------------------------------------------------- 1 | use { 2 | axum::response::IntoResponse, hyper::StatusCode, tracing::error, wc::metrics::ServiceMetrics, 3 | }; 4 | 5 | pub async fn handler() -> impl IntoResponse { 6 | let result = ServiceMetrics::export(); 7 | 8 | match result { 9 | Ok(content) => (StatusCode::OK, content), 10 | Err(e) => { 11 | error!(?e, "Failed to parse metrics"); 12 | 13 | ( 14 | StatusCode::INTERNAL_SERVER_ERROR, 15 | "Failed to get metrics".to_string(), 16 | ) 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/handlers/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::{Error::InvalidAuthentication, Result}, 4 | jwt_validation::{Claims, JwtValidationClient}, 5 | }, 6 | axum::{ 7 | http::{header::AUTHORIZATION, HeaderMap}, 8 | response::IntoResponse, 9 | Json, 10 | }, 11 | hyper::StatusCode, 12 | jsonwebtoken::TokenData, 13 | relay_rpc::{ 14 | domain::ClientId, 15 | jwt::{JwtBasicClaims, VerifyableClaims}, 16 | }, 17 | serde_json::{json, Value}, 18 | std::{collections::HashSet, string::ToString}, 19 | tracing::{debug, instrument}, 20 | }; 21 | 22 | // Push 23 | pub mod delete_client; 24 | pub mod metrics; 25 | pub mod push_message; 26 | pub mod register_client; 27 | #[cfg(not(feature = "multitenant"))] 28 | pub mod single_tenant_wrappers; 29 | // Tenant Management 30 | #[cfg(feature = "multitenant")] 31 | pub mod create_tenant; 32 | #[cfg(feature = "multitenant")] 33 | pub mod delete_apns; 34 | #[cfg(feature = "multitenant")] 35 | pub mod delete_fcm; 36 | #[cfg(feature = "multitenant")] 37 | pub mod delete_fcm_v1; 38 | #[cfg(feature = "multitenant")] 39 | pub mod delete_tenant; 40 | #[cfg(feature = "multitenant")] 41 | pub mod get_tenant; 42 | pub mod health; 43 | pub mod rate_limit_test; 44 | #[cfg(feature = "multitenant")] 45 | pub mod update_apns; 46 | #[cfg(feature = "multitenant")] 47 | pub mod update_fcm; 48 | #[cfg(feature = "multitenant")] 49 | pub mod update_fcm_v1; 50 | 51 | pub const DECENTRALIZED_IDENTIFIER_PREFIX: &str = "did:key:"; 52 | 53 | #[instrument(skip_all)] 54 | pub fn authenticate_client(headers: HeaderMap, aud: &str, check: F) -> Result 55 | where 56 | F: FnOnce(Option) -> bool, 57 | { 58 | return if let Some(auth_header) = headers.get(axum::http::header::AUTHORIZATION) { 59 | let header_str = auth_header.to_str()?; 60 | 61 | let claims = JwtBasicClaims::try_from_str(header_str).map_err(|e| { 62 | debug!("Invalid claims: {:?}", e); 63 | e 64 | })?; 65 | claims 66 | .verify_basic(&HashSet::from([aud.to_string()]), None) 67 | .map_err(|e| { 68 | debug!("Failed to verify_basic: {:?}", e); 69 | e 70 | })?; 71 | let client_id: ClientId = claims.iss.into(); 72 | Ok(check(Some(client_id))) 73 | } else { 74 | // Note: Authentication is not required right now to ensure that this is a 75 | // non-breaking change, eventually it will be required and this should default 76 | // to returning `Err(MissingAuthentication)` or `Err(InvalidAuthentication)` 77 | Ok(true) 78 | }; 79 | } 80 | 81 | #[derive(serde::Serialize)] 82 | #[serde(rename_all = "lowercase")] 83 | pub enum ErrorLocation { 84 | Body, 85 | // Note (Harry): Spec supports this but it currently isn't used 86 | // Query, 87 | Header, 88 | Path, 89 | Unknown, 90 | } 91 | 92 | #[derive(serde::Serialize)] 93 | #[serde(rename_all = "UPPERCASE")] 94 | pub enum ResponseStatus { 95 | Success, 96 | Failure, 97 | } 98 | 99 | #[derive(serde::Serialize)] 100 | pub struct ErrorField { 101 | pub field: String, 102 | pub description: String, 103 | pub location: ErrorLocation, 104 | } 105 | 106 | #[derive(serde::Serialize)] 107 | pub struct ResponseError { 108 | pub name: String, 109 | pub message: String, 110 | } 111 | 112 | #[derive(serde::Serialize)] 113 | pub struct Response { 114 | pub status: ResponseStatus, 115 | #[serde(skip_serializing)] 116 | pub status_code: StatusCode, 117 | pub errors: Option>, 118 | pub fields: Option>, 119 | } 120 | 121 | impl Response { 122 | pub fn new_success(status: StatusCode) -> Self { 123 | Response { 124 | status: ResponseStatus::Success, 125 | status_code: status, 126 | errors: None, 127 | fields: None, 128 | } 129 | } 130 | 131 | pub fn new_failure( 132 | status: StatusCode, 133 | errors: Vec, 134 | fields: Vec, 135 | ) -> Self { 136 | Response { 137 | status: ResponseStatus::Failure, 138 | status_code: status, 139 | errors: Some(errors), 140 | fields: Some(fields), 141 | } 142 | } 143 | } 144 | 145 | impl IntoResponse for Response { 146 | fn into_response(self) -> axum::response::Response { 147 | let status = self.status_code; 148 | let json: Json = self.into(); 149 | 150 | (status, json).into_response() 151 | } 152 | } 153 | 154 | impl From for Json { 155 | fn from(value: Response) -> Self { 156 | Json(json!(value)) 157 | } 158 | } 159 | 160 | impl Default for Response { 161 | fn default() -> Self { 162 | Response::new_success(StatusCode::OK) 163 | } 164 | } 165 | 166 | fn validate_jwt( 167 | jwt_validation_client: &JwtValidationClient, 168 | headers: &HeaderMap, 169 | ) -> Result> { 170 | if let Some(token_data) = headers.get(AUTHORIZATION) { 171 | // TODO Clients should always use `Bearer`, migrate them (if not already) and remove this optionality 172 | // TODO Specific not-bearer token error 173 | let jwt = token_data.to_str()?.to_string().replace("Bearer ", ""); 174 | jwt_validation_client 175 | .is_valid_token(jwt) 176 | .map_err(|_| InvalidAuthentication) 177 | } else { 178 | // TODO specific missing Authorization header error 179 | Err(InvalidAuthentication) 180 | } 181 | } 182 | 183 | #[cfg(feature = "cloud")] 184 | #[instrument(skip_all, fields(project_id = %project_id))] 185 | pub async fn validate_tenant_request( 186 | jwt_validation_client: &JwtValidationClient, 187 | headers: &HeaderMap, 188 | project_id: &str, 189 | ) -> Result<()> { 190 | let token_data = validate_jwt(jwt_validation_client, headers)?; 191 | if token_data.claims.sub == project_id { 192 | Ok(()) 193 | } else { 194 | // TODO specific wrong `sub` error 195 | Err(InvalidAuthentication) 196 | } 197 | } 198 | 199 | #[cfg(not(feature = "cloud"))] 200 | #[instrument(skip_all)] 201 | pub fn validate_tenant_request( 202 | jwt_validation_client: &JwtValidationClient, 203 | headers: &HeaderMap, 204 | ) -> Result<()> { 205 | validate_jwt(jwt_validation_client, headers).map(|_| ()) 206 | } 207 | -------------------------------------------------------------------------------- /src/handlers/rate_limit_test.rs: -------------------------------------------------------------------------------- 1 | use axum::{http::StatusCode, response::IntoResponse}; 2 | 3 | pub async fn handler() -> impl IntoResponse { 4 | StatusCode::OK.into_response() 5 | } 6 | -------------------------------------------------------------------------------- /src/handlers/register_client.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "analytics")] 2 | use {crate::analytics::client_info::ClientInfo, axum_client_ip::SecureClientIp}; 3 | use { 4 | crate::{ 5 | error::{ 6 | Error::{EmptyField, InvalidAuthentication, ProviderNotAvailable}, 7 | Result, 8 | }, 9 | handlers::{authenticate_client, Response, DECENTRALIZED_IDENTIFIER_PREFIX}, 10 | increment_counter, 11 | log::prelude::*, 12 | state::AppState, 13 | stores::client::Client, 14 | }, 15 | axum::{ 16 | extract::{Json, Path, State as StateExtractor}, 17 | http::HeaderMap, 18 | }, 19 | relay_rpc::domain::ClientId, 20 | serde::{Deserialize, Serialize}, 21 | std::sync::Arc, 22 | tracing::instrument, 23 | }; 24 | 25 | #[derive(Serialize, Deserialize)] 26 | pub struct RegisterBody { 27 | pub client_id: ClientId, 28 | #[serde(rename = "type")] 29 | pub push_type: String, 30 | pub token: String, 31 | pub always_raw: Option, 32 | } 33 | 34 | #[instrument(skip_all, name = "register_client_handler")] 35 | pub async fn handler( 36 | #[cfg(feature = "analytics")] SecureClientIp(client_ip): SecureClientIp, 37 | Path(tenant_id): Path, 38 | StateExtractor(state): StateExtractor>, 39 | headers: HeaderMap, 40 | Json(body): Json, 41 | ) -> Result { 42 | if !authenticate_client(headers, &state.config.public_url, |client_id| { 43 | if let Some(client_id) = client_id { 44 | debug!( 45 | %tenant_id, 46 | requested_client_id = %body.client_id, 47 | token_client_id = %client_id, 48 | "client_id authentication checking" 49 | ); 50 | client_id == body.client_id 51 | } else { 52 | debug!( 53 | %tenant_id, 54 | requested_client_id = %body.client_id, 55 | token_client_id = "unknown", 56 | "client_id verification failed: missing client_id" 57 | ); 58 | false 59 | } 60 | })? { 61 | debug!( 62 | %tenant_id, 63 | requested_client_id = %body.client_id, 64 | token_client_id = "unknown", 65 | "client_id verification failed: invalid client_id" 66 | ); 67 | return Err(InvalidAuthentication); 68 | } 69 | 70 | let push_type = body.push_type.as_str().try_into()?; 71 | let tenant = state.tenant_store.get_tenant(&tenant_id).await?; 72 | let supported_providers = tenant.providers(); 73 | if !supported_providers.contains(&push_type) { 74 | return Err(ProviderNotAvailable(push_type.into())); 75 | } 76 | 77 | if body.token.is_empty() { 78 | return Err(EmptyField("token".to_string())); 79 | } 80 | 81 | let client_id = body 82 | .client_id 83 | .as_ref() 84 | .trim_start_matches(DECENTRALIZED_IDENTIFIER_PREFIX) 85 | .to_owned(); 86 | 87 | let always_raw = body.always_raw.unwrap_or(false); 88 | state 89 | .client_store 90 | .create_client( 91 | &tenant_id, 92 | &client_id, 93 | Client { 94 | tenant_id: tenant_id.clone(), 95 | push_type, 96 | token: body.token, 97 | always_raw, 98 | }, 99 | state.metrics.as_ref(), 100 | ) 101 | .await?; 102 | 103 | debug!( 104 | %tenant_id, %client_id, %push_type, "registered client" 105 | ); 106 | 107 | increment_counter!(state.metrics, registered_clients); 108 | 109 | // Analytics 110 | #[cfg(feature = "analytics")] 111 | tokio::spawn(async move { 112 | if let Some(analytics) = &state.analytics { 113 | let (country, continent, region) = analytics 114 | .lookup_geo_data(client_ip) 115 | .map_or((None, None, None), |geo| { 116 | (geo.country, geo.continent, geo.region) 117 | }); 118 | 119 | debug!( 120 | %tenant_id, 121 | %client_id, 122 | ip = %client_ip, 123 | "loaded geo data" 124 | ); 125 | 126 | let msg = ClientInfo { 127 | region: region.map(|r| Arc::from(r.join(", "))), 128 | country, 129 | continent, 130 | project_id: tenant_id.into(), 131 | client_id: client_id.into(), 132 | push_provider: body.push_type.as_str().into(), 133 | always_raw, 134 | registered_at: wc::analytics::time::now(), 135 | }; 136 | 137 | analytics.client(msg); 138 | } 139 | }); 140 | 141 | Ok(Response::default()) 142 | } 143 | -------------------------------------------------------------------------------- /src/handlers/single_tenant_wrappers.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "analytics")] 2 | use axum_client_ip::SecureClientIp; 3 | use { 4 | crate::{ 5 | error::Result, 6 | handlers::{push_message::PushMessageBody, register_client::RegisterBody, Response}, 7 | middleware::validate_signature::RequireValidSignature, 8 | state::AppState, 9 | stores::tenant::DEFAULT_TENANT_ID, 10 | }, 11 | axum::{ 12 | extract::{Path, State as StateExtractor}, 13 | Json, 14 | }, 15 | hyper::HeaderMap, 16 | std::sync::Arc, 17 | }; 18 | 19 | #[cfg(feature = "multitenant")] 20 | use crate::error::Error::MissingTenantId; 21 | 22 | pub async fn delete_handler( 23 | Path(id): Path, 24 | state: StateExtractor>, 25 | headers: HeaderMap, 26 | ) -> Result { 27 | #[cfg(feature = "multitenant")] 28 | return Err(MissingTenantId); 29 | 30 | #[cfg(not(feature = "multitenant"))] 31 | crate::handlers::delete_client::handler( 32 | Path((DEFAULT_TENANT_ID.to_string(), id)), 33 | state, 34 | headers, 35 | ) 36 | .await 37 | } 38 | 39 | pub async fn push_handler( 40 | #[cfg(feature = "analytics")] SecureClientIp(client_ip): SecureClientIp, 41 | Path(id): Path, 42 | state: StateExtractor>, 43 | valid_sig: RequireValidSignature>, 44 | ) -> Result { 45 | #[cfg(feature = "multitenant")] 46 | return Err(MissingTenantId); 47 | 48 | #[cfg(all(not(feature = "multitenant"), feature = "analytics"))] 49 | return crate::handlers::push_message::handler( 50 | SecureClientIp(client_ip), 51 | Path((DEFAULT_TENANT_ID.to_string(), id)), 52 | state, 53 | headers, 54 | valid_sig, 55 | ) 56 | .await; 57 | 58 | #[cfg(all(not(feature = "multitenant"), not(feature = "analytics")))] 59 | return crate::handlers::push_message::handler( 60 | Path((DEFAULT_TENANT_ID.to_string(), id)), 61 | state, 62 | valid_sig, 63 | ) 64 | .await; 65 | } 66 | 67 | pub async fn register_handler( 68 | #[cfg(feature = "analytics")] SecureClientIp(client_ip): SecureClientIp, 69 | state: StateExtractor>, 70 | headers: HeaderMap, 71 | body: Json, 72 | ) -> Result { 73 | #[cfg(feature = "multitenant")] 74 | return Err(MissingTenantId); 75 | 76 | #[cfg(all(not(feature = "multitenant"), feature = "analytics"))] 77 | return crate::handlers::register_client::handler( 78 | SecureClientIp(client_ip), 79 | Path(DEFAULT_TENANT_ID.to_string()), 80 | state, 81 | headers, 82 | body, 83 | ) 84 | .await; 85 | 86 | #[cfg(all(not(feature = "multitenant"), not(feature = "analytics")))] 87 | return crate::handlers::register_client::handler( 88 | Path(DEFAULT_TENANT_ID.to_string()), 89 | state, 90 | headers, 91 | body, 92 | ) 93 | .await; 94 | } 95 | -------------------------------------------------------------------------------- /src/handlers/update_fcm.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::{ 4 | Error, 5 | Error::{BadFcmApiKey, InvalidMultipartBody}, 6 | }, 7 | handlers::validate_tenant_request, 8 | increment_counter, 9 | state::AppState, 10 | stores::tenant::TenantFcmUpdateParams, 11 | }, 12 | axum::{ 13 | extract::{Multipart, Path, State}, 14 | http::HeaderMap, 15 | Json, 16 | }, 17 | fcm::FcmError, 18 | serde::Serialize, 19 | std::sync::Arc, 20 | tracing::{error, instrument}, 21 | }; 22 | 23 | pub struct FcmUpdateBody { 24 | api_key: String, 25 | /// Used to ensure that at least one value has changed 26 | value_changed_: bool, 27 | } 28 | 29 | #[derive(Serialize)] 30 | pub struct UpdateTenantFcmResponse { 31 | success: bool, 32 | } 33 | 34 | #[instrument(skip_all, name = "update_fcm_handler")] 35 | pub async fn handler( 36 | State(state): State>, 37 | Path(id): Path, 38 | headers: HeaderMap, 39 | mut form_body: Multipart, 40 | ) -> Result, Error> { 41 | // JWT token verification 42 | #[cfg(feature = "cloud")] 43 | let jwt_verification_result = 44 | validate_tenant_request(&state.jwt_validation_client, &headers, &id).await; 45 | 46 | #[cfg(not(feature = "cloud"))] 47 | let jwt_verification_result = validate_tenant_request(&state.jwt_validation_client, &headers); 48 | 49 | if let Err(e) = jwt_verification_result { 50 | error!( 51 | tenant_id = %id, 52 | err = ?e, 53 | "JWT verification failed" 54 | ); 55 | return Err(e); 56 | } 57 | 58 | // -- check if tenant is real 59 | let _existing_tenant = state.tenant_store.get_tenant(&id).await?; 60 | 61 | // ---- retrieve body from form 62 | let mut body = FcmUpdateBody { 63 | api_key: Default::default(), 64 | value_changed_: false, 65 | }; 66 | while let Some(field) = form_body.next_field().await? { 67 | let name = field.name().unwrap_or("unknown").to_string(); 68 | let data = field.text().await?; 69 | 70 | if name.to_lowercase().as_str() == "api_key" { 71 | body.api_key = data; 72 | body.value_changed_ = true; 73 | }; 74 | } 75 | if !body.value_changed_ { 76 | return Err(InvalidMultipartBody); 77 | } 78 | 79 | // ---- checks 80 | let fcm_api_key = body.api_key.clone(); 81 | let mut test_message_builder = fcm::MessageBuilder::new(&fcm_api_key, "wc-notification-test"); 82 | test_message_builder.dry_run(true); 83 | let test_message = test_message_builder.finalize(); 84 | let test_notification = fcm::Client::new().send(test_message).await; 85 | match test_notification { 86 | Err(e) => match e { 87 | FcmError::Unauthorized => Err(BadFcmApiKey), 88 | _ => Ok(()), 89 | }, 90 | Ok(_) => Ok(()), 91 | }?; 92 | 93 | // ---- handler 94 | let update_body = TenantFcmUpdateParams { 95 | fcm_api_key: body.api_key, 96 | }; 97 | 98 | let new_tenant = state 99 | .tenant_store 100 | .update_tenant_fcm(&id, update_body) 101 | .await?; 102 | 103 | if new_tenant.suspended { 104 | // If suspended, it can be restored now because valid credentials have been 105 | // provided 106 | state.tenant_store.unsuspend_tenant(&new_tenant.id).await?; 107 | } 108 | 109 | increment_counter!(state.metrics, tenant_fcm_updates); 110 | 111 | Ok(Json(UpdateTenantFcmResponse { success: true })) 112 | } 113 | -------------------------------------------------------------------------------- /src/handlers/update_fcm_v1.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::{Error, Error::InvalidMultipartBody}, 4 | handlers::validate_tenant_request, 5 | increment_counter, 6 | state::AppState, 7 | stores::tenant::TenantFcmV1UpdateParams, 8 | }, 9 | axum::{ 10 | extract::{Multipart, Path, State}, 11 | http::HeaderMap, 12 | Json, 13 | }, 14 | serde::Serialize, 15 | std::sync::Arc, 16 | tracing::{debug, error, instrument}, 17 | }; 18 | 19 | pub struct FcmV1UpdateBody { 20 | credentials: String, 21 | /// Used to ensure that at least one value has changed 22 | value_changed_: bool, 23 | } 24 | 25 | #[derive(Serialize)] 26 | pub struct UpdateTenantFcmV1Response { 27 | success: bool, 28 | } 29 | 30 | #[instrument(skip_all, name = "update_fcm_v1_handler")] 31 | pub async fn handler( 32 | State(state): State>, 33 | Path(id): Path, 34 | headers: HeaderMap, 35 | mut form_body: Multipart, 36 | ) -> Result, Error> { 37 | // JWT token verification 38 | #[cfg(feature = "cloud")] 39 | let jwt_verification_result = 40 | validate_tenant_request(&state.jwt_validation_client, &headers, &id).await; 41 | 42 | // -- check if tenant is real 43 | let _existing_tenant = state.tenant_store.get_tenant(&id).await?; 44 | 45 | #[cfg(not(feature = "cloud"))] 46 | let jwt_verification_result = validate_tenant_request(&state.jwt_validation_client, &headers); 47 | 48 | if let Err(e) = jwt_verification_result { 49 | error!( 50 | tenant_id = %id, 51 | err = ?e, 52 | "JWT verification failed" 53 | ); 54 | return Err(e); 55 | } 56 | 57 | // ---- retrieve body from form 58 | let mut body = FcmV1UpdateBody { 59 | credentials: Default::default(), 60 | value_changed_: false, 61 | }; 62 | while let Some(field) = form_body.next_field().await? { 63 | let name = field.name().unwrap_or("unknown").to_string(); 64 | let data = field.text().await?; 65 | 66 | if name.to_lowercase().as_str() == "credentials" { 67 | body.credentials = data; 68 | body.value_changed_ = true; 69 | }; 70 | } 71 | if !body.value_changed_ { 72 | return Err(InvalidMultipartBody); 73 | } 74 | 75 | // Client will validate the key on startup 76 | fcm_v1::Client::from_key( 77 | serde_json::from_str(&body.credentials).map_err(Error::FcmV1InvalidServiceAccountKey)?, 78 | ) 79 | .await 80 | .map_err(|e| { 81 | debug!("Failed credential validation: {e}"); 82 | Error::BadFcmV1Credentials 83 | })?; 84 | 85 | // ---- handler 86 | let update_body = TenantFcmV1UpdateParams { 87 | fcm_v1_credentials: body.credentials, 88 | }; 89 | 90 | let new_tenant = state 91 | .tenant_store 92 | .update_tenant_fcm_v1(&id, update_body) 93 | .await?; 94 | 95 | if new_tenant.suspended { 96 | // If suspended, it can be restored now because valid credentials have been 97 | // provided 98 | state.tenant_store.unsuspend_tenant(&new_tenant.id).await?; 99 | } 100 | 101 | increment_counter!(state.metrics, tenant_fcm_v1_updates); 102 | 103 | Ok(Json(UpdateTenantFcmV1Response { success: true })) 104 | } 105 | -------------------------------------------------------------------------------- /src/jwt_validation/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::error::Result, 3 | jsonwebtoken::{Algorithm, DecodingKey, TokenData, Validation}, 4 | serde::{Deserialize, Serialize}, 5 | }; 6 | 7 | #[derive(Serialize, Deserialize)] 8 | pub struct Claims { 9 | pub sub: String, 10 | } 11 | 12 | #[derive(Clone)] 13 | pub struct JwtValidationClient { 14 | decoding_key: DecodingKey, 15 | validation: Validation, 16 | } 17 | 18 | impl JwtValidationClient { 19 | pub fn new(jwt_secret: String) -> JwtValidationClient { 20 | JwtValidationClient { 21 | decoding_key: DecodingKey::from_secret(jwt_secret.as_bytes()), 22 | validation: Validation::new(Algorithm::HS256), 23 | } 24 | } 25 | 26 | pub fn is_valid_token(&self, jwt: String) -> Result> { 27 | Ok(jsonwebtoken::decode::( 28 | &jwt, 29 | &self.decoding_key, 30 | &self.validation, 31 | )?) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/log/mod.rs: -------------------------------------------------------------------------------- 1 | //! This library serves as a thin, opinionated wrapper over the underlying 2 | //! logger apparatus. By default, this crate only exports the various macros, 3 | //! traits and types used in library logging. 4 | //! 5 | //! However, the top level binary may enable the "logger" feature to gain access 6 | //! to the machinery for initializing the global logger. 7 | //! 8 | //! There also some other utility functions that may be accessed by their 9 | //! feature gate. See the [features] section of Cargo.toml for more. 10 | pub use tracing::{debug, error, info, trace, warn}; 11 | use { 12 | tracing_appender::non_blocking::WorkerGuard, 13 | tracing_subscriber::{prelude::*, EnvFilter}, 14 | }; 15 | 16 | pub mod prelude { 17 | //! Reexport of the most common macros and traits used for logging. 18 | //! 19 | //! Typically you may simply add `use log::prelude::*` and get access to all 20 | //! of the usual macros (info!, error!, debug!, etc). 21 | 22 | pub use tracing::{debug, error, info, trace, warn}; 23 | } 24 | 25 | /// The default log level for the stderr logger, which is used as a fallback if 26 | /// no other can be found. 27 | const DEFAULT_LOG_LEVEL_STDERR: tracing::Level = tracing::Level::WARN; 28 | 29 | /// The environment variable used to control the stderr logger. 30 | const ENV_LOG_LEVEL_STDERR: &str = "LOG_LEVEL"; 31 | 32 | pub struct Logger { 33 | _guard: WorkerGuard, 34 | } 35 | 36 | impl Logger { 37 | pub fn init() -> crate::error::Result { 38 | let stderr_filter = EnvFilter::try_from_env(ENV_LOG_LEVEL_STDERR) 39 | .unwrap_or_else(|_| EnvFilter::new(DEFAULT_LOG_LEVEL_STDERR.to_string())); 40 | 41 | let (writer, guard) = tracing_appender::non_blocking(std::io::stderr()); 42 | 43 | let logger = tracing_subscriber::fmt::layer() 44 | .with_target(false) 45 | .with_ansi(atty::is(atty::Stream::Stderr)) 46 | .with_writer(writer) 47 | .with_filter(stderr_filter) 48 | .boxed(); 49 | 50 | tracing_subscriber::registry().with(logger).init(); 51 | 52 | Ok(Self { _guard: guard }) 53 | } 54 | 55 | pub fn stop(self) { 56 | // Consume self to trigger drop. 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! increment_counter { 3 | ($state:ident$(.$property:ident)*, $metric:ident) => {{ 4 | use tracing::debug; 5 | 6 | if let Some(metrics) = &$state$(.$property)* { 7 | metrics.$metric.add(1, &[]); 8 | debug!("incremented `{}` counter", stringify!($metric)); 9 | } 10 | }}; 11 | } 12 | 13 | #[macro_export] 14 | macro_rules! decrement_counter { 15 | ($state:ident$(.$property:ident)*, $metric:ident) => {{ 16 | use tracing::debug; 17 | 18 | if let Some(metrics) = &$state$(.$property)* { 19 | metrics.$metric.add(-1, &[]); 20 | debug!("decremented `{}` counter", stringify!($metric)); 21 | } 22 | }}; 23 | } 24 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use { 2 | dotenv::dotenv, 3 | echo_server::{config, log}, 4 | tokio::sync::broadcast, 5 | }; 6 | 7 | #[tokio::main] 8 | async fn main() -> echo_server::error::Result<()> { 9 | let logger = log::Logger::init().expect("Failed to start logging"); 10 | 11 | let (_signal, shutdown) = broadcast::channel(1); 12 | dotenv().ok(); 13 | let config = config::get_config() 14 | .expect("Failed to load config, please ensure all env vars are defined."); 15 | 16 | let result = echo_server::bootstap(shutdown, config).await; 17 | 18 | logger.stop(); 19 | 20 | result 21 | } 22 | -------------------------------------------------------------------------------- /src/metrics/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | std::time::Instant, 3 | wc::metrics::{ 4 | otel::{ 5 | metrics::{Counter, Histogram}, 6 | KeyValue, 7 | }, 8 | ServiceMetrics, 9 | }, 10 | }; 11 | 12 | #[derive(Clone)] 13 | pub struct Metrics { 14 | pub received_notifications: Counter, 15 | pub sent_fcm_notifications: Counter, 16 | pub sent_fcm_v1_notifications: Counter, 17 | pub sent_apns_notifications: Counter, 18 | 19 | pub registered_clients: Counter, 20 | pub registered_tenants: Counter, 21 | 22 | pub tenant_apns_updates: Counter, 23 | pub tenant_fcm_updates: Counter, 24 | pub tenant_fcm_v1_updates: Counter, 25 | 26 | pub tenant_suspensions: Counter, 27 | pub client_suspensions: Counter, 28 | 29 | postgres_queries: Counter, 30 | postgres_query_latency: Histogram, 31 | } 32 | 33 | impl Default for Metrics { 34 | fn default() -> Self { 35 | Self::new() 36 | } 37 | } 38 | 39 | impl Metrics { 40 | pub fn new() -> Self { 41 | ServiceMetrics::init_with_name("echo-server"); 42 | let meter = ServiceMetrics::meter(); 43 | 44 | let clients_counter = meter 45 | .u64_counter("registered_clients") 46 | .with_description("The number of currently registered clients") 47 | .init(); 48 | 49 | let tenants_counter = meter 50 | .u64_counter("registered_tenants") 51 | .with_description("The number of currently registered tenants") 52 | .init(); 53 | 54 | let received_notification_counter = meter 55 | .u64_counter("received_notifications") 56 | .with_description("The number of notification received") 57 | .init(); 58 | 59 | let sent_fcm_notification_counter = meter 60 | .u64_counter("sent_fcm_notifications") 61 | .with_description("The number of notifications sent to FCM") 62 | .init(); 63 | 64 | let sent_fcm_v1_notification_counter = meter 65 | .u64_counter("sent_fcm_v1_notifications") 66 | .with_description("The number of notifications sent to FCM") 67 | .init(); 68 | 69 | let sent_apns_notification_counter = meter 70 | .u64_counter("sent_apns_notifications") 71 | .with_description("The number of notifications sent to APNS") 72 | .init(); 73 | 74 | let tenant_apns_updates_counter = meter 75 | .u64_counter("tenant_apns_updates") 76 | .with_description("The number of times tenants have updated their APNS") 77 | .init(); 78 | 79 | let tenant_fcm_updates_counter = meter 80 | .u64_counter("tenant_fcm_updates") 81 | .with_description("The number of times tenants have updated their FCM") 82 | .init(); 83 | 84 | let tenant_fcm_v1_updates_counter = meter 85 | .u64_counter("tenant_fcm_v1_updates") 86 | .with_description("The number of times tenants have updated their FCM") 87 | .init(); 88 | 89 | let tenant_suspensions_counter = meter 90 | .u64_counter("tenant_suspensions") 91 | .with_description("The number of tenants that have been suspended") 92 | .init(); 93 | 94 | let client_suspensions_counter = meter 95 | .u64_counter("client_suspensions") 96 | .with_description("The number of clients that have been suspended") 97 | .init(); 98 | 99 | let postgres_queries: Counter = meter 100 | .u64_counter("postgres_queries") 101 | .with_description("The number of Postgres queries executed") 102 | .init(); 103 | 104 | let postgres_query_latency: Histogram = meter 105 | .u64_histogram("postgres_query_latency") 106 | .with_description("The latency Postgres queries") 107 | .init(); 108 | 109 | Metrics { 110 | registered_clients: clients_counter, 111 | received_notifications: received_notification_counter, 112 | sent_fcm_notifications: sent_fcm_notification_counter, 113 | sent_fcm_v1_notifications: sent_fcm_v1_notification_counter, 114 | sent_apns_notifications: sent_apns_notification_counter, 115 | registered_tenants: tenants_counter, 116 | tenant_apns_updates: tenant_apns_updates_counter, 117 | tenant_fcm_updates: tenant_fcm_updates_counter, 118 | tenant_fcm_v1_updates: tenant_fcm_v1_updates_counter, 119 | tenant_suspensions: tenant_suspensions_counter, 120 | client_suspensions: client_suspensions_counter, 121 | postgres_queries, 122 | postgres_query_latency, 123 | } 124 | } 125 | 126 | pub fn postgres_query(&self, query_name: &'static str, start: Instant) { 127 | let elapsed = start.elapsed(); 128 | 129 | let attributes = [KeyValue::new("name", query_name)]; 130 | self.postgres_queries.add(1, &attributes); 131 | self.postgres_query_latency 132 | .record(elapsed.as_millis() as u64, &attributes); 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /src/middleware/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod rate_limit; 2 | pub mod validate_signature; 3 | -------------------------------------------------------------------------------- /src/middleware/rate_limit.rs: -------------------------------------------------------------------------------- 1 | use crate::{networking, state::AppState}; 2 | use axum::{ 3 | extract::Request, 4 | extract::State, 5 | http::StatusCode, 6 | middleware::Next, 7 | response::{IntoResponse, Response}, 8 | }; 9 | use moka::future::Cache; 10 | use std::{net::IpAddr, sync::Arc}; 11 | use tokio::time::Duration; 12 | use tracing::error; 13 | 14 | #[derive(Clone)] 15 | pub struct RateLimiter { 16 | cache: Arc>, 17 | max_requests: u32, 18 | } 19 | 20 | impl RateLimiter { 21 | pub fn new(max_requests: u32, window: Duration) -> Self { 22 | Self { 23 | cache: Arc::new(Cache::builder().time_to_live(window).build()), 24 | max_requests, 25 | } 26 | } 27 | } 28 | 29 | /// Rate limit middleware that limits the number of requests per second from a single IP address and 30 | /// uses in-memory caching to store the number of requests. 31 | pub async fn rate_limit_middleware( 32 | State(state): State>, 33 | req: Request, 34 | next: Next, 35 | ) -> Response { 36 | let headers = req.headers().clone(); 37 | let client_ip = match networking::get_forwarded_ip(headers.clone()) { 38 | Some(ip) => ip, 39 | None => { 40 | error!( 41 | "Failed to get forwarded IP from request in rate limiting middleware. Skipping the \ 42 | rate-limiting." 43 | ); 44 | // We are skipping the drop to the connect info IP address here, because we are 45 | // using the Load Balancer and if any issues with the X-Forwarded-IP header, we 46 | // will rate-limit the LB IP address. 47 | return next.run(req).await; 48 | } 49 | }; 50 | 51 | let rate_limiter = &state.rate_limit; 52 | let mut rate_limit = rate_limiter.cache.get_with(client_ip, async { 0 }).await; 53 | 54 | if rate_limit < rate_limiter.max_requests { 55 | rate_limit += 1; 56 | rate_limiter.cache.insert(client_ip, rate_limit).await; 57 | next.run(req).await 58 | } else { 59 | (StatusCode::TOO_MANY_REQUESTS, "Too many requests").into_response() 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/middleware/validate_signature.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::Error::{ 4 | FromRequestError, MissingAllSignatureHeader, MissingSignatureHeader, 5 | MissingTimestampHeader, ToBytesError, 6 | }, 7 | state::State, 8 | }, 9 | async_trait::async_trait, 10 | axum::{ 11 | body::to_bytes, 12 | extract::{FromRequest, Request}, 13 | }, 14 | ed25519_dalek::{Signature, VerifyingKey}, 15 | tracing::instrument, 16 | }; 17 | 18 | pub const SIGNATURE_HEADER_NAME: &str = "X-Ed25519-Signature"; 19 | pub const TIMESTAMP_HEADER_NAME: &str = "X-Ed25519-Timestamp"; 20 | 21 | pub struct RequireValidSignature(pub T); 22 | 23 | #[async_trait] 24 | impl FromRequest for RequireValidSignature 25 | where 26 | S: Send + Sync + State, 27 | T: FromRequest, 28 | { 29 | type Rejection = crate::error::Error; 30 | 31 | #[instrument(skip_all)] 32 | async fn from_request(req: Request, state: &S) -> Result { 33 | if !state.validate_signatures() { 34 | // Skip signature validation 35 | return T::from_request(req, state) 36 | .await 37 | .map(Self) 38 | .map_err(|_| FromRequestError); 39 | } 40 | 41 | let state_binding = state.relay_client(); 42 | let public_key = state_binding.get_verifying_key(); 43 | 44 | let (parts, body_raw) = req.into_parts(); 45 | const MAX_BODY: usize = 1024 * 1024 * 100; // prolly too big but better than usize::MAX 46 | let bytes = to_bytes(body_raw, MAX_BODY) 47 | .await 48 | .map_err(|_| ToBytesError)?; 49 | let body = String::from_utf8_lossy(&bytes); 50 | 51 | let signature_header = parts 52 | .headers 53 | .get(SIGNATURE_HEADER_NAME) 54 | .and_then(|header| header.to_str().ok()); 55 | 56 | let timestamp_header = parts 57 | .headers 58 | .get(TIMESTAMP_HEADER_NAME) 59 | .and_then(|header| header.to_str().ok()); 60 | 61 | match (signature_header, timestamp_header) { 62 | (Some(signature), Some(timestamp)) 63 | if signature_is_valid(signature, timestamp, &body, public_key).await? => 64 | { 65 | let req = Request::from_parts(parts, bytes.into()); 66 | Ok(T::from_request(req, state) 67 | .await 68 | .map(Self) 69 | .map_err(|_| FromRequestError)?) 70 | } 71 | (Some(_), None) => Err(MissingTimestampHeader), 72 | (None, Some(_)) => Err(MissingSignatureHeader), 73 | (None, None) => Err(MissingAllSignatureHeader), 74 | _ => Err(MissingAllSignatureHeader), 75 | } 76 | } 77 | } 78 | 79 | pub async fn signature_is_valid( 80 | signature: &str, 81 | timestamp: &str, 82 | body: &str, 83 | public_key: &VerifyingKey, 84 | ) -> Result { 85 | let sig_body = format!("{}.{}.{}", timestamp, body.len(), body); 86 | 87 | let sig_bytes = hex::decode(signature).map_err(crate::error::Error::Hex)?; 88 | let sig = Signature::try_from(sig_bytes.as_slice())?; 89 | 90 | Ok(public_key.verify_strict(sig_body.as_bytes(), &sig).is_ok()) 91 | } 92 | -------------------------------------------------------------------------------- /src/networking.rs: -------------------------------------------------------------------------------- 1 | use {axum::http::HeaderMap, ipnet::IpNet, std::net::IpAddr}; 2 | 3 | #[derive(thiserror::Error, Debug)] 4 | pub enum NetworkInterfaceError { 5 | #[error("machine has no public IP address")] 6 | PublicAddressNotFound, 7 | #[error("machine has multiple public IP addresses")] 8 | MultiplePublicAddresses, 9 | } 10 | 11 | /// Attempts to find the public IP address of this machine. 12 | pub fn find_public_ip_addr() -> Result { 13 | let addrs: Vec<_> = pnet_datalink::interfaces() 14 | .into_iter() 15 | .flat_map(|iface| { 16 | iface 17 | .ips 18 | .into_iter() 19 | .filter(|ip| ip.is_ipv4() && is_public_ip_addr(ip.ip())) 20 | .map(|ip| ip.ip()) 21 | }) 22 | .collect(); 23 | 24 | if addrs.is_empty() { 25 | Err(NetworkInterfaceError::PublicAddressNotFound) 26 | } else if addrs.len() > 1 { 27 | Err(NetworkInterfaceError::MultiplePublicAddresses) 28 | } else { 29 | Ok(addrs[0]) 30 | } 31 | } 32 | 33 | fn is_public_ip_addr(addr: IpAddr) -> bool { 34 | use once_cell::sync::Lazy; 35 | 36 | static RESERVED_NETWORKS: Lazy<[IpNet; 24]> = Lazy::new(|| { 37 | [ 38 | "0.0.0.0/8", 39 | "0.0.0.0/32", 40 | "100.64.0.0/10", 41 | "127.0.0.0/8", 42 | "169.254.0.0/16", 43 | "172.16.0.0/12", 44 | "192.0.0.0/24", 45 | "192.0.0.0/29", 46 | "192.0.0.8/32", 47 | "192.0.0.9/32", 48 | "192.0.0.10/32", 49 | "192.0.0.170/32", 50 | "192.0.0.171/32", 51 | "192.0.2.0/24", 52 | "192.31.196.0/24", 53 | "192.52.193.0/24", 54 | "192.88.99.0/24", 55 | "192.168.0.0/16", 56 | "192.175.48.0/24", 57 | "198.18.0.0/15", 58 | "198.51.100.0/24", 59 | "203.0.113.0/24", 60 | "240.0.0.0/4", 61 | "255.255.255.255/32", 62 | ] 63 | .map(|net| net.parse().unwrap()) 64 | }); 65 | 66 | RESERVED_NETWORKS.iter().all(|range| !range.contains(&addr)) 67 | } 68 | 69 | pub fn get_forwarded_ip(headers: HeaderMap) -> Option { 70 | headers 71 | .get("X-Forwarded-For") 72 | .and_then(|header| header.to_str().ok()) 73 | .and_then(|header| header.split(',').next()) 74 | .and_then(|client_ip| client_ip.trim().parse::().ok()) 75 | } 76 | -------------------------------------------------------------------------------- /src/providers/fcm.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::{LegacyPushMessage, PushMessage}, 3 | crate::{blob::DecryptedPayloadBlob, error::Error, providers::PushProvider}, 4 | async_trait::async_trait, 5 | fcm::{ErrorReason, FcmError, FcmResponse, MessageBuilder, NotificationBuilder, Priority}, 6 | std::fmt::{Debug, Formatter}, 7 | tracing::{debug, instrument}, 8 | }; 9 | 10 | pub struct FcmProvider { 11 | api_key: String, 12 | client: fcm::Client, 13 | } 14 | 15 | impl FcmProvider { 16 | pub fn new(api_key: String) -> Self { 17 | FcmProvider { 18 | api_key, 19 | client: fcm::Client::new(), 20 | } 21 | } 22 | } 23 | 24 | #[async_trait] 25 | impl PushProvider for FcmProvider { 26 | #[instrument(name = "send_fcm_notification")] 27 | async fn send_notification( 28 | &self, 29 | token: String, 30 | body: PushMessage, 31 | ) -> crate::error::Result<()> { 32 | let mut message_builder = MessageBuilder::new(self.api_key.as_str(), token.as_str()); 33 | 34 | let result = match body { 35 | PushMessage::RawPushMessage(message) => { 36 | // Sending `always_raw` encrypted message 37 | debug!("Sending raw encrypted message"); 38 | message_builder 39 | .data(&message) 40 | .map_err(Error::InternalSerializationError)?; 41 | set_message_priority_high(&mut message_builder); 42 | let fcm_message = message_builder.finalize(); 43 | self.client.send(fcm_message).await 44 | } 45 | PushMessage::LegacyPushMessage(LegacyPushMessage { id: _, payload }) => { 46 | if payload.is_encrypted() { 47 | debug!("Sending legacy `is_encrypted` message"); 48 | message_builder 49 | .data(&payload) 50 | .map_err(Error::InternalSerializationError)?; 51 | set_message_priority_high(&mut message_builder); 52 | let fcm_message = message_builder.finalize(); 53 | self.client.send(fcm_message).await 54 | } else { 55 | debug!("Sending plain message"); 56 | let blob = DecryptedPayloadBlob::from_base64_encoded(&payload.blob)?; 57 | 58 | let mut notification_builder = NotificationBuilder::new(); 59 | notification_builder.title(blob.title.as_str()); 60 | notification_builder.body(blob.body.as_str()); 61 | let notification = notification_builder.finalize(); 62 | 63 | message_builder.notification(notification); 64 | message_builder 65 | .data(&payload.to_owned()) 66 | .map_err(Error::InternalSerializationError)?; 67 | let fcm_message = message_builder.finalize(); 68 | self.client.send(fcm_message).await 69 | } 70 | } 71 | }; 72 | 73 | match result { 74 | Ok(val) => { 75 | let FcmResponse { error, .. } = val; 76 | if let Some(error) = error { 77 | match error { 78 | ErrorReason::MissingRegistration => Err(Error::BadDeviceToken( 79 | "Missing registration for token".into(), 80 | )), 81 | ErrorReason::InvalidRegistration => { 82 | Err(Error::BadDeviceToken("Invalid token registration".into())) 83 | } 84 | ErrorReason::NotRegistered => { 85 | Err(Error::BadDeviceToken("Token is not registered".into())) 86 | } 87 | ErrorReason::InvalidApnsCredential => Err(Error::BadApnsCredentials), 88 | e => Err(Error::FcmResponse(e)), 89 | } 90 | } else { 91 | Ok(()) 92 | } 93 | } 94 | Err(e) => match e { 95 | FcmError::Unauthorized => Err(Error::BadFcmApiKey), 96 | e => Err(Error::Fcm(e)), 97 | }, 98 | } 99 | } 100 | } 101 | 102 | // Manual Impl Because `fcm::Client` does not derive anything and doesn't need 103 | // to be accounted for 104 | 105 | impl Clone for FcmProvider { 106 | fn clone(&self) -> Self { 107 | FcmProvider { 108 | api_key: self.api_key.clone(), 109 | client: fcm::Client::new(), 110 | } 111 | } 112 | 113 | fn clone_from(&mut self, source: &Self) { 114 | self.api_key.clone_from(&source.api_key); 115 | self.client = fcm::Client::new(); 116 | } 117 | } 118 | 119 | impl PartialEq for FcmProvider { 120 | fn eq(&self, other: &Self) -> bool { 121 | self.api_key == other.api_key 122 | } 123 | } 124 | 125 | impl Debug for FcmProvider { 126 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 127 | write!(f, "[FcmProvider] api_key = {}", self.api_key) 128 | } 129 | } 130 | 131 | /// Setting message priority to high and content-available to true 132 | /// on data-only messages or they don't show unless app is active 133 | /// https://rnfirebase.io/messaging/usage#data-only-messages 134 | fn set_message_priority_high(builder: &mut MessageBuilder) { 135 | builder.priority(Priority::High); 136 | builder.content_available(true); 137 | } 138 | -------------------------------------------------------------------------------- /src/providers/fcm_v1.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::{LegacyPushMessage, PushMessage}, 3 | crate::{blob::DecryptedPayloadBlob, error::Error, providers::PushProvider}, 4 | async_trait::async_trait, 5 | fcm_v1::{ 6 | gauth::serv_account::ServiceAccountKey, AndroidConfig, AndroidMessagePriority, ApnsConfig, 7 | Client, ClientBuildError, Message, Notification, SendError, Target, 8 | }, 9 | serde::Serialize, 10 | serde_json::json, 11 | std::sync::Arc, 12 | tracing::{debug, instrument}, 13 | }; 14 | 15 | #[derive(Debug, Clone)] 16 | pub struct FcmV1Provider { 17 | client: Client, 18 | } 19 | 20 | impl FcmV1Provider { 21 | pub async fn new( 22 | credentials: ServiceAccountKey, 23 | http_client: reqwest::Client, 24 | ) -> Result { 25 | let client = Client::builder() 26 | .http_client(http_client) 27 | .build(credentials) 28 | .await?; 29 | Ok(Self { client }) 30 | } 31 | } 32 | 33 | #[async_trait] 34 | impl PushProvider for FcmV1Provider { 35 | #[instrument(name = "send_fcm_v1_notification", skip_all)] 36 | async fn send_notification( 37 | &self, 38 | token: String, 39 | body: PushMessage, 40 | ) -> crate::error::Result<()> { 41 | fn make_message( 42 | token: String, 43 | notification: Option, 44 | data: serde_json::Value, 45 | ) -> Message { 46 | Message { 47 | data: Some(data), 48 | notification, 49 | target: Target::Token(token), 50 | android: Some(AndroidConfig { 51 | priority: Some(AndroidMessagePriority::High), 52 | ..Default::default() 53 | }), 54 | webpush: None, 55 | apns: Some(ApnsConfig { 56 | payload: Some(json!({ 57 | "aps": { 58 | "content-available": 1, 59 | } 60 | })), 61 | ..Default::default() 62 | }), 63 | fcm_options: None, 64 | } 65 | } 66 | 67 | let result = match body { 68 | PushMessage::RawPushMessage(message) => { 69 | // Sending `always_raw` encrypted message 70 | debug!("Sending raw encrypted message"); 71 | #[derive(Serialize)] 72 | pub struct FcmV1RawPushMessage { 73 | pub topic: Arc, 74 | pub tag: String, 75 | pub message: Arc, 76 | } 77 | let data = serde_json::to_value(FcmV1RawPushMessage { 78 | // All keys must be strings 79 | topic: message.topic, 80 | tag: message.tag.to_string(), 81 | message: message.message, 82 | }) 83 | .map_err(Error::InternalSerializationError)?; 84 | let message = make_message(token, None, data); 85 | self.client.send(message).await 86 | } 87 | PushMessage::LegacyPushMessage(LegacyPushMessage { id: _, payload }) => { 88 | #[derive(Serialize)] 89 | pub struct FcmV1MessagePayload { 90 | pub topic: Arc, 91 | pub flags: String, 92 | pub blob: Arc, 93 | } 94 | let data = serde_json::to_value(FcmV1MessagePayload { 95 | // All keys must be strings 96 | topic: payload.topic.clone(), 97 | flags: payload.flags.to_string(), 98 | blob: payload.blob.clone(), 99 | }) 100 | .map_err(Error::InternalSerializationError)?; 101 | 102 | if payload.is_encrypted() { 103 | debug!("Sending legacy `is_encrypted` message"); 104 | let message = make_message(token, None, data); 105 | self.client.send(message).await 106 | } else { 107 | debug!("Sending plain message"); 108 | let blob = DecryptedPayloadBlob::from_base64_encoded(&payload.blob)?; 109 | 110 | let notification = Notification { 111 | title: Some(blob.title), 112 | body: Some(blob.body), 113 | ..Default::default() 114 | }; 115 | let message = make_message(token, Some(notification), data); 116 | self.client.send(message).await 117 | } 118 | } 119 | }; 120 | 121 | result.map(|_| ()).map_err(|e| match e { 122 | SendError::Unregistered => Error::BadDeviceToken("Token was unregistered".into()), 123 | SendError::Forbidden => Error::BadFcmV1Credentials, 124 | e => Error::FcmV1(e), 125 | }) 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/providers/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod apns; 2 | pub mod fcm; 3 | pub mod fcm_v1; 4 | #[cfg(any(debug_assertions, test))] 5 | pub mod noop; 6 | 7 | use { 8 | self::fcm_v1::FcmV1Provider, 9 | crate::{ 10 | blob::ENCRYPTED_FLAG, 11 | error, 12 | providers::{apns::ApnsProvider, fcm::FcmProvider}, 13 | }, 14 | async_trait::async_trait, 15 | relay_rpc::rpc::msg_id::get_message_id, 16 | serde::{Deserialize, Serialize}, 17 | std::{ 18 | fmt::{Display, Formatter}, 19 | sync::Arc, 20 | }, 21 | tracing::instrument, 22 | }; 23 | 24 | #[cfg(any(debug_assertions, test))] 25 | use crate::providers::noop::NoopProvider; 26 | 27 | #[derive(Clone, Debug, Eq, PartialEq)] 28 | pub enum PushMessage { 29 | LegacyPushMessage(LegacyPushMessage), 30 | RawPushMessage(RawPushMessage), 31 | } 32 | 33 | impl PushMessage { 34 | pub fn message_id(&self) -> Arc { 35 | match self { 36 | Self::RawPushMessage(msg) => get_message_id(&msg.message).into(), 37 | Self::LegacyPushMessage(msg) => msg.id.clone(), 38 | } 39 | } 40 | 41 | pub fn topic(&self) -> Arc { 42 | match self { 43 | Self::RawPushMessage(msg) => msg.topic.clone(), 44 | Self::LegacyPushMessage(msg) => msg.payload.topic.clone(), 45 | } 46 | } 47 | } 48 | 49 | #[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)] 50 | pub struct LegacyPushMessage { 51 | pub id: Arc, 52 | pub payload: MessagePayload, 53 | } 54 | 55 | #[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)] 56 | pub struct MessagePayload { 57 | pub topic: Arc, 58 | pub flags: u32, 59 | pub blob: Arc, 60 | } 61 | 62 | impl MessagePayload { 63 | pub fn is_encrypted(&self) -> bool { 64 | (self.flags & ENCRYPTED_FLAG) == ENCRYPTED_FLAG 65 | } 66 | } 67 | 68 | #[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)] 69 | pub struct RawPushMessage { 70 | /// Topic is used by the SDKs to decrypt 71 | /// encrypted payloads on the client side 72 | pub topic: Arc, 73 | /// Filtering tag 74 | pub tag: u32, 75 | /// The payload message 76 | pub message: Arc, 77 | } 78 | 79 | #[async_trait] 80 | pub trait PushProvider { 81 | async fn send_notification(&self, token: String, body: PushMessage) -> error::Result<()>; 82 | } 83 | 84 | pub const PROVIDER_APNS: &str = "apns"; 85 | pub const PROVIDER_APNS_SANDBOX: &str = "apns-sandbox"; 86 | pub const PROVIDER_FCM: &str = "fcm"; 87 | pub const PROVIDER_FCM_V1: &str = "fcm_v1"; 88 | #[cfg(any(debug_assertions, test))] 89 | pub const PROVIDER_NOOP: &str = "noop"; 90 | 91 | #[derive(Debug, Copy, Clone, PartialEq, Eq, sqlx::Type)] 92 | #[sqlx(type_name = "provider")] 93 | #[sqlx(rename_all = "lowercase")] 94 | pub enum ProviderKind { 95 | Apns, 96 | ApnsSandbox, 97 | Fcm, 98 | // Intentionally no FcmV1 variant because ProviderKind is also used to determine token type (of which FCM and FCM V1 are the same) 99 | #[cfg(any(debug_assertions, test))] 100 | Noop, 101 | } 102 | 103 | impl ProviderKind { 104 | pub fn as_str(&self) -> &'static str { 105 | match self { 106 | Self::Apns => PROVIDER_APNS, 107 | Self::ApnsSandbox => PROVIDER_APNS_SANDBOX, 108 | Self::Fcm => PROVIDER_FCM, 109 | #[cfg(any(debug_assertions, test))] 110 | Self::Noop => PROVIDER_NOOP, 111 | } 112 | } 113 | } 114 | 115 | impl Display for ProviderKind { 116 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 117 | f.write_str(self.as_str()) 118 | } 119 | } 120 | 121 | impl From<&ProviderKind> for String { 122 | fn from(val: &ProviderKind) -> Self { 123 | val.as_str().to_string() 124 | } 125 | } 126 | 127 | impl From for String { 128 | fn from(val: ProviderKind) -> Self { 129 | val.as_str().to_string() 130 | } 131 | } 132 | 133 | impl From for &str { 134 | fn from(val: ProviderKind) -> Self { 135 | val.as_str() 136 | } 137 | } 138 | 139 | impl TryFrom<&str> for ProviderKind { 140 | type Error = error::Error; 141 | 142 | fn try_from(value: &str) -> Result { 143 | match value.to_lowercase().as_str() { 144 | PROVIDER_APNS => Ok(Self::Apns), 145 | PROVIDER_APNS_SANDBOX => Ok(Self::ApnsSandbox), 146 | PROVIDER_FCM => Ok(Self::Fcm), 147 | #[cfg(any(debug_assertions, test))] 148 | PROVIDER_NOOP => Ok(Self::Noop), 149 | _ => Err(error::Error::ProviderNotFound(value.to_owned())), 150 | } 151 | } 152 | } 153 | 154 | #[allow(clippy::large_enum_variant)] 155 | #[derive(Debug, Clone)] 156 | pub enum Provider { 157 | Fcm(FcmProvider), 158 | FcmV1(FcmV1Provider), 159 | Apns(ApnsProvider), 160 | #[cfg(any(debug_assertions, test))] 161 | Noop(NoopProvider), 162 | } 163 | 164 | #[async_trait] 165 | impl PushProvider for Provider { 166 | #[instrument(name = "send_notification")] 167 | async fn send_notification(&self, token: String, body: PushMessage) -> error::Result<()> { 168 | match self { 169 | Provider::Fcm(p) => p.send_notification(token, body).await, 170 | Provider::FcmV1(p) => p.send_notification(token, body).await, 171 | Provider::Apns(p) => p.send_notification(token, body).await, 172 | #[cfg(any(debug_assertions, test))] 173 | Provider::Noop(p) => p.send_notification(token, body).await, 174 | } 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/providers/noop.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::PushMessage, 3 | crate::providers::PushProvider, 4 | async_trait::async_trait, 5 | reqwest::Url, 6 | std::{collections::HashMap, sync::Arc}, 7 | tokio::sync::RwLock, 8 | tracing::instrument, 9 | }; 10 | 11 | #[derive(Debug, Default, Clone)] 12 | pub struct NoopProvider { 13 | notifications: Arc>>>, 14 | } 15 | 16 | impl NoopProvider { 17 | pub fn new() -> Self { 18 | Default::default() 19 | } 20 | } 21 | 22 | #[async_trait] 23 | impl PushProvider for NoopProvider { 24 | #[instrument(name = "send_noop_notification")] 25 | async fn send_notification( 26 | &self, 27 | token: String, 28 | body: PushMessage, 29 | ) -> crate::error::Result<()> { 30 | self.bootstrap(token.clone()).await; 31 | 32 | let mut lock = self.notifications.write().await; 33 | let notifications = lock.get_mut(&token).unwrap(); 34 | notifications.append(&mut vec![body]); 35 | 36 | if let Ok(url) = token.parse::() { 37 | assert!(reqwest::get(url).await?.status().is_success()); 38 | } 39 | 40 | Ok(()) 41 | } 42 | } 43 | 44 | // Utils 45 | impl NoopProvider { 46 | /// Insert empty notifications for a new token 47 | async fn bootstrap(&self, token: String) { 48 | self.notifications.write().await.entry(token).or_default(); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/relay/mod.rs: -------------------------------------------------------------------------------- 1 | use ed25519_dalek::VerifyingKey; 2 | 3 | #[derive(Clone)] 4 | pub struct RelayClient { 5 | public_key: VerifyingKey, 6 | } 7 | 8 | impl RelayClient { 9 | pub fn new(string_public_key: String) -> crate::error::Result { 10 | let verifying_key = Self::string_to_verifying_key(&string_public_key)?; 11 | Ok(RelayClient { 12 | public_key: verifying_key, 13 | }) 14 | } 15 | 16 | pub fn get_verifying_key(&self) -> &VerifyingKey { 17 | &self.public_key 18 | } 19 | 20 | fn string_to_verifying_key(string_key: &str) -> crate::error::Result { 21 | let key_bytes = hex::decode(string_key).map_err(crate::error::Error::Hex)?; 22 | Ok(VerifyingKey::from_bytes( 23 | <&[u8; 32]>::try_from(key_bytes.as_slice()).unwrap(), 24 | )?) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/state.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | config::Config, 4 | metrics::Metrics, 5 | middleware::rate_limit, 6 | networking, 7 | providers::Provider, 8 | relay::RelayClient, 9 | stores::{client::ClientStore, notification::NotificationStore, tenant::TenantStore}, 10 | }, 11 | build_info::BuildInfo, 12 | moka::future::Cache, 13 | std::{net::IpAddr, sync::Arc}, 14 | tokio::time::Duration, 15 | wc::geoip::{block::middleware::GeoBlockLayer, MaxMindResolver}, 16 | }; 17 | 18 | #[cfg(feature = "analytics")] 19 | use crate::analytics::PushAnalytics; 20 | #[cfg(feature = "multitenant")] 21 | use crate::jwt_validation::JwtValidationClient; 22 | 23 | pub type ClientStoreArc = Arc; 24 | pub type NotificationStoreArc = Arc; 25 | pub type TenantStoreArc = Arc; 26 | 27 | pub trait State { 28 | fn config(&self) -> Config; 29 | fn build_info(&self) -> BuildInfo; 30 | fn client_store(&self) -> ClientStoreArc; 31 | fn notification_store(&self) -> NotificationStoreArc; 32 | fn tenant_store(&self) -> TenantStoreArc; 33 | fn relay_client(&self) -> RelayClient; 34 | fn is_multitenant(&self) -> bool; 35 | fn validate_signatures(&self) -> bool; 36 | } 37 | 38 | #[derive(Clone)] 39 | pub struct AppState { 40 | pub config: Config, 41 | pub build_info: BuildInfo, 42 | pub metrics: Option, 43 | #[cfg(feature = "analytics")] 44 | pub analytics: Option, 45 | pub client_store: ClientStoreArc, 46 | pub notification_store: NotificationStoreArc, 47 | pub tenant_store: TenantStoreArc, 48 | pub relay_client: RelayClient, 49 | #[cfg(feature = "multitenant")] 50 | pub jwt_validation_client: JwtValidationClient, 51 | pub public_ip: Option, 52 | is_multitenant: bool, 53 | pub geoblock: Option>>, 54 | /// Service instance identifier 55 | pub instance_id: uuid::Uuid, 56 | /// Service instance uptime measurement 57 | pub uptime: std::time::Instant, 58 | pub http_client: reqwest::Client, 59 | pub provider_cache: Cache, 60 | pub rate_limit: rate_limit::RateLimiter, 61 | } 62 | 63 | build_info::build_info!(fn build_info); 64 | 65 | pub fn new_state( 66 | config: Config, 67 | client_store: ClientStoreArc, 68 | notification_store: NotificationStoreArc, 69 | tenant_store: TenantStoreArc, 70 | ) -> crate::error::Result { 71 | let build_info: &BuildInfo = build_info(); 72 | 73 | #[cfg(feature = "multitenant")] 74 | let is_multitenant = true; 75 | 76 | #[cfg(not(feature = "multitenant"))] 77 | let is_multitenant = false; 78 | 79 | #[cfg(feature = "multitenant")] 80 | let jwt_secret = config.jwt_secret.clone(); 81 | 82 | let public_ip = networking::find_public_ip_addr().ok(); 83 | 84 | Ok(AppState { 85 | config: config.clone(), 86 | build_info: build_info.clone(), 87 | metrics: None, 88 | #[cfg(feature = "analytics")] 89 | analytics: None, 90 | client_store, 91 | notification_store, 92 | tenant_store, 93 | relay_client: RelayClient::new(config.relay_public_key)?, 94 | #[cfg(feature = "multitenant")] 95 | jwt_validation_client: JwtValidationClient::new(jwt_secret), 96 | public_ip, 97 | is_multitenant, 98 | geoblock: None, 99 | instance_id: uuid::Uuid::new_v4(), 100 | uptime: std::time::Instant::now(), 101 | http_client: reqwest::Client::new(), 102 | provider_cache: Cache::new(100), 103 | rate_limit: rate_limit::RateLimiter::new(100, Duration::from_secs(60)), 104 | }) 105 | } 106 | 107 | impl AppState { 108 | pub fn set_metrics(&mut self, metrics: Metrics) { 109 | self.metrics = Some(metrics); 110 | } 111 | } 112 | 113 | impl State for Arc { 114 | fn config(&self) -> Config { 115 | self.config.clone() 116 | } 117 | 118 | fn build_info(&self) -> BuildInfo { 119 | self.build_info.clone() 120 | } 121 | 122 | fn client_store(&self) -> ClientStoreArc { 123 | self.client_store.clone() 124 | } 125 | 126 | fn notification_store(&self) -> NotificationStoreArc { 127 | self.notification_store.clone() 128 | } 129 | 130 | fn tenant_store(&self) -> TenantStoreArc { 131 | self.tenant_store.clone() 132 | } 133 | 134 | fn relay_client(&self) -> RelayClient { 135 | self.relay_client.clone() 136 | } 137 | 138 | fn is_multitenant(&self) -> bool { 139 | self.is_multitenant 140 | } 141 | 142 | fn validate_signatures(&self) -> bool { 143 | self.config.validate_signatures 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /src/stores/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod client; 2 | pub mod notification; 3 | pub mod tenant; 4 | 5 | type Result = std::result::Result; 6 | 7 | #[derive(Debug, thiserror::Error)] 8 | pub enum StoreError { 9 | #[error(transparent)] 10 | Database(#[from] sqlx::Error), 11 | 12 | /// Not found error, params are entity name and identifier 13 | #[error("Cannot find {0} with specified identifier {1}")] 14 | NotFound(String, String), 15 | } 16 | -------------------------------------------------------------------------------- /src/stores/notification.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | handlers::push_message::PushMessageBody, 4 | stores::{self, StoreError::NotFound}, 5 | }, 6 | async_trait::async_trait, 7 | chrono::{DateTime, Utc}, 8 | serde_json::Value, 9 | sqlx::{types::Json, Executor}, 10 | tracing::instrument, 11 | }; 12 | 13 | #[derive(Debug, Clone, PartialEq, Eq, sqlx::FromRow)] 14 | pub struct Notification { 15 | pub id: String, 16 | pub client_id: String, 17 | 18 | pub last_payload: Json, 19 | pub previous_payloads: Vec>, 20 | 21 | pub last_received_at: DateTime, 22 | pub created_at: DateTime, 23 | } 24 | 25 | #[async_trait] 26 | pub trait NotificationStore { 27 | async fn create_or_update_notification( 28 | &self, 29 | id: &str, 30 | tenant_id: &str, 31 | client_id: &str, 32 | payload: &PushMessageBody, 33 | ) -> stores::Result; 34 | async fn get_notification( 35 | &self, 36 | id: &str, 37 | client_id: &str, 38 | tenant_id: &str, 39 | ) -> stores::Result; 40 | async fn delete_notification(&self, id: &str, tenant_id: &str) -> stores::Result<()>; 41 | } 42 | 43 | #[async_trait] 44 | impl NotificationStore for sqlx::PgPool { 45 | #[instrument(skip(self, payload))] 46 | async fn create_or_update_notification( 47 | &self, 48 | id: &str, 49 | tenant_id: &str, 50 | client_id: &str, 51 | payload: &PushMessageBody, 52 | ) -> stores::Result { 53 | let mut transaction = self.begin().await?; 54 | 55 | sqlx::query("SELECT pg_advisory_xact_lock(abs(hashtext($1::text)))") 56 | .bind(client_id) 57 | .execute(&mut transaction) 58 | .await?; 59 | 60 | let res = sqlx::query_as::( 61 | " 62 | INSERT INTO public.notifications (id, tenant_id, client_id, last_payload) 63 | VALUES ($1, $2, $3, $4) 64 | ON CONFLICT (id, client_id) 65 | DO UPDATE SET last_received_at = now() 66 | RETURNING *;", 67 | ) 68 | .bind(id) 69 | .bind(tenant_id) 70 | .bind(client_id) 71 | .bind(Json(payload)) 72 | .fetch_one(&mut transaction) 73 | .await; 74 | 75 | transaction.commit().await?; 76 | 77 | match res { 78 | Err(e) => Err(e.into()), 79 | Ok(row) => Ok(row), 80 | } 81 | } 82 | 83 | #[instrument(skip(self))] 84 | async fn get_notification( 85 | &self, 86 | id: &str, 87 | client_id: &str, 88 | tenant_id: &str, 89 | ) -> stores::Result { 90 | let res = sqlx::query_as::( 91 | " 92 | SELECT * 93 | FROM public.notifications 94 | WHERE id = $1 AND client_id = $2 AND tenant_id = $3", 95 | ) 96 | .bind(id) 97 | .bind(client_id) 98 | .bind(tenant_id) 99 | .fetch_one(self) 100 | .await; 101 | 102 | match res { 103 | Err(sqlx::Error::RowNotFound) => { 104 | Err(NotFound("notification".to_string(), id.to_string())) 105 | } 106 | Err(e) => Err(e.into()), 107 | Ok(row) => Ok(row), 108 | } 109 | } 110 | 111 | #[instrument(skip(self))] 112 | async fn delete_notification(&self, id: &str, tenant_id: &str) -> stores::Result<()> { 113 | let mut query_builder = 114 | sqlx::QueryBuilder::new("DELETE FROM public.notifications WHERE id = "); 115 | query_builder.push_bind(id); 116 | query_builder.push("and tenant_id = "); 117 | query_builder.push_bind(tenant_id); 118 | let query = query_builder.build(); 119 | 120 | self.execute(query).await?; 121 | 122 | Ok(()) 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /tenant_migrations/1667510326_initial.sql: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA IF NOT EXISTS public; -------------------------------------------------------------------------------- /tenant_migrations/1667510351_create-tenants.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE public.tenants 2 | ( 3 | id varchar(255) primary key default gen_random_uuid(), 4 | 5 | fcm_api_key text, 6 | 7 | apns_sandbox boolean, 8 | apns_topic text, 9 | apns_certificate text, 10 | apns_certificate_password text, 11 | 12 | created_at timestamptz not null default now(), 13 | updated_at timestamptz not null default now() 14 | ); -------------------------------------------------------------------------------- /tenant_migrations/1674744346_deprecate-sandbox-flag.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE public.tenants 2 | DROP COLUMN apns_sandbox; -------------------------------------------------------------------------------- /tenant_migrations/1676394119_apns-team-tokens.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE public.apns_type AS ENUM ('certificate', 'token'); 2 | 3 | ALTER TABLE public.tenants 4 | ADD COLUMN apns_type public.apns_type; 5 | 6 | ALTER TABLE public.tenants 7 | ADD COLUMN apns_key_id public.apns_type; 8 | 9 | ALTER TABLE public.tenants 10 | ADD COLUMN apns_team_id public.apns_type; 11 | 12 | ALTER TABLE public.tenants 13 | ADD COLUMN apns_pkcs8_pem public.apns_type; -------------------------------------------------------------------------------- /tenant_migrations/1676813285_fix-apns-team-tokens.sql: -------------------------------------------------------------------------------- 1 | -- NOTE: Previous file is broken but migrations already run -> this is patched in this migration! 2 | 3 | -- Delete old columns with incorrect types 4 | ALTER TABLE public.tenants 5 | DROP COLUMN apns_key_id; 6 | 7 | ALTER TABLE public.tenants 8 | DROP COLUMN apns_team_id; 9 | 10 | ALTER TABLE public.tenants 11 | DROP COLUMN apns_pkcs8_pem; 12 | 13 | -- Recreate with correct types 14 | ALTER TABLE public.tenants 15 | ADD COLUMN apns_key_id text; 16 | 17 | ALTER TABLE public.tenants 18 | ADD COLUMN apns_team_id text; 19 | 20 | ALTER TABLE public.tenants 21 | ADD COLUMN apns_pkcs8_pem text; -------------------------------------------------------------------------------- /tenant_migrations/1691518766_add-suspension.sql: -------------------------------------------------------------------------------- 1 | alter table public.tenants 2 | add suspended bool not null default false; 3 | 4 | alter table public.tenants 5 | add suspended_reason text; -------------------------------------------------------------------------------- /tenant_migrations/1713226293_fcm-v1.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE public.tenants 2 | ADD COLUMN fcm_v1_credentials TEXT NULL DEFAULT NULL; 3 | -------------------------------------------------------------------------------- /tenant_migrations/README.md: -------------------------------------------------------------------------------- 1 | # Migrations 2 | 3 | This folder contains migrations for Echo Server and they are automatically called on start-up. 4 | 5 | ## Format 6 | ``` 7 | {unix timestamp}_{description}.sql 8 | ``` 9 | 10 | ## Contributors 11 | To create a new migration run `./new.sh [description]` to make a new migration 12 | -------------------------------------------------------------------------------- /tenant_migrations/new.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | DESCRIPTION=$1 3 | touch "./$(date +%s)_$DESCRIPTION.sql" -------------------------------------------------------------------------------- /terraform/.terraform-docs.yml: -------------------------------------------------------------------------------- 1 | formatter: 'markdown table' 2 | 3 | recursive: 4 | enabled: true 5 | path: . 6 | 7 | output: 8 | file: README.md 9 | mode: inject 10 | template: |- 11 | 12 | {{ .Content }} 13 | 14 | 15 | content: | 16 | {{ .Header }} 17 | {{ .Requirements }} 18 | {{ .Providers }} 19 | {{ .Modules }} 20 | 21 | ## Inputs 22 | {{- $hideInputs := list "namespace" "region" "stage" "name" "delimiter" "attributes" "tags" "regex_replace_chars" "id_length_limit" "label_key_case" "label_value_case" "label_order" }} 23 | {{- $filteredInputs := list -}} 24 | {{- range .Module.Inputs -}} 25 | {{- if not (has .Name $hideInputs) -}} 26 | {{- $filteredInputs = append $filteredInputs . -}} 27 | {{- end -}} 28 | {{- end -}} 29 | {{ if not $filteredInputs }} 30 | 31 | No inputs. 32 | {{ else }} 33 | | Name | Description | Type | Default | Required | 34 | |------|-------------|------|---------|:--------:| 35 | {{- range $filteredInputs }} 36 | | {{ anchorNameMarkdown "input" .Name }} | {{ tostring .Description | sanitizeMarkdownTbl }} | {{ printf " " }}
{{ tostring .Type | sanitizeMarkdownTbl }}
| {{ printf " " }}
{{ .GetValue | sanitizeMarkdownTbl }}
| {{ printf " " }}{{ ternary .Required "yes" "no" }} | 37 | {{- end }} 38 | {{- end }} 39 | {{ .Outputs }} 40 | {{/** End of file fixer */}} 41 | -------------------------------------------------------------------------------- /terraform/backend.tf: -------------------------------------------------------------------------------- 1 | # Terraform Configuration 2 | terraform { 3 | required_version = "~> 1.0" 4 | 5 | backend "remote" { 6 | hostname = "app.terraform.io" 7 | organization = "wallet-connect" 8 | workspaces { 9 | prefix = "echo-server-" 10 | } 11 | } 12 | 13 | required_providers { 14 | assert = { 15 | source = "bwoznicki/assert" 16 | } 17 | aws = { 18 | source = "hashicorp/aws" 19 | version = "~> 4.31" 20 | } 21 | grafana = { 22 | source = "grafana/grafana" 23 | version = ">= 2.1" 24 | } 25 | random = { 26 | source = "hashicorp/random" 27 | version = "3.4.3" 28 | } 29 | github = { 30 | source = "integrations/github" 31 | version = "5.7.0" 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /terraform/ecs/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Requirements 4 | 5 | | Name | Version | 6 | |------|---------| 7 | | [terraform](#requirement\_terraform) | ~> 1.0 | 8 | | [aws](#requirement\_aws) | ~> 4.31 | 9 | ## Providers 10 | 11 | | Name | Version | 12 | |------|---------| 13 | | [aws](#provider\_aws) | ~> 4.31 | 14 | ## Modules 15 | 16 | No modules. 17 | 18 | ## Inputs 19 | | Name | Description | Type | Default | Required | 20 | |------|-------------|------|---------|:--------:| 21 | | [acm\_certificate\_arn](#input\_acm\_certificate\_arn) | n/a |
string
|
n/a
| yes | 22 | | [allowed\_origins](#input\_allowed\_origins) | n/a |
string
|
n/a
| yes | 23 | | [analytics\_datalake\_bucket\_name](#input\_analytics\_datalake\_bucket\_name) | The name of the bucket where the analytics data will be stored |
string
|
n/a
| yes | 24 | | [analytics\_geoip\_db\_bucket\_name](#input\_analytics\_geoip\_db\_bucket\_name) | The name of the bucket where the geoip database is stored |
string
|
n/a
| yes | 25 | | [analytics\_geoip\_db\_key](#input\_analytics\_geoip\_db\_key) | The key of the geoip database in the bucket |
string
|
n/a
| yes | 26 | | [analytics\_key\_arn](#input\_analytics\_key\_arn) | The ARN of the KMS key used to encrypt the analytics data |
string
|
n/a
| yes | 27 | | [app\_name](#input\_app\_name) | n/a |
string
|
n/a
| yes | 28 | | [autoscaling\_max\_capacity](#input\_autoscaling\_max\_capacity) | n/a |
number
|
n/a
| yes | 29 | | [autoscaling\_min\_capacity](#input\_autoscaling\_min\_capacity) | n/a |
number
|
n/a
| yes | 30 | | [aws\_otel\_collector\_ecr\_repository\_url](#input\_aws\_otel\_collector\_ecr\_repository\_url) | n/a |
string
|
n/a
| yes | 31 | | [backup\_acm\_certificate\_arn](#input\_backup\_acm\_certificate\_arn) | n/a |
string
|
n/a
| yes | 32 | | [backup\_fqdn](#input\_backup\_fqdn) | n/a |
string
|
n/a
| yes | 33 | | [backup\_route53\_zone\_id](#input\_backup\_route53\_zone\_id) | n/a |
string
|
n/a
| yes | 34 | | [cpu](#input\_cpu) | n/a |
number
|
n/a
| yes | 35 | | [database\_url](#input\_database\_url) | n/a |
string
|
n/a
| yes | 36 | | [desired\_count](#input\_desired\_count) | n/a |
number
|
n/a
| yes | 37 | | [environment](#input\_environment) | n/a |
string
|
n/a
| yes | 38 | | [fqdn](#input\_fqdn) | n/a |
string
|
n/a
| yes | 39 | | [image](#input\_image) | n/a |
string
|
n/a
| yes | 40 | | [image\_version](#input\_image\_version) | n/a |
string
|
n/a
| yes | 41 | | [jwt\_secret](#input\_jwt\_secret) | n/a |
string
|
n/a
| yes | 42 | | [memory](#input\_memory) | n/a |
number
|
n/a
| yes | 43 | | [private\_subnets](#input\_private\_subnets) | n/a |
set(string)
|
n/a
| yes | 44 | | [prometheus\_endpoint](#input\_prometheus\_endpoint) | n/a |
string
|
n/a
| yes | 45 | | [public\_subnets](#input\_public\_subnets) | n/a |
set(string)
|
n/a
| yes | 46 | | [relay\_public\_key](#input\_relay\_public\_key) | n/a |
string
|
n/a
| yes | 47 | | [route53\_zone\_id](#input\_route53\_zone\_id) | n/a |
string
|
n/a
| yes | 48 | | [telemetry\_sample\_ratio](#input\_telemetry\_sample\_ratio) | n/a |
number
|
n/a
| yes | 49 | | [tenant\_database\_url](#input\_tenant\_database\_url) | n/a |
string
|
n/a
| yes | 50 | | [vpc\_cidr](#input\_vpc\_cidr) | n/a |
string
|
n/a
| yes | 51 | | [vpc\_id](#input\_vpc\_id) | n/a |
string
|
n/a
| yes | 52 | ## Outputs 53 | 54 | | Name | Description | 55 | |------|-------------| 56 | | [load\_balancer\_arn](#output\_load\_balancer\_arn) | n/a | 57 | 58 | 59 | -------------------------------------------------------------------------------- /terraform/ecs/iam.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_role" "ecs_task_execution_role" { 2 | name = "${var.app_name}-ecs-task-execution-role" 3 | assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json 4 | } 5 | 6 | data "aws_iam_policy_document" "assume_role_policy" { 7 | statement { 8 | actions = ["sts:AssumeRole"] 9 | 10 | principals { 11 | type = "Service" 12 | identifiers = ["ecs-tasks.amazonaws.com"] 13 | } 14 | } 15 | } 16 | 17 | resource "aws_iam_role_policy_attachment" "ecs_task_execution_role_policy" { 18 | role = aws_iam_role.ecs_task_execution_role.name 19 | policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" 20 | } 21 | 22 | # Prometheus Access 23 | resource "aws_iam_role_policy_attachment" "prometheus_write_policy" { 24 | role = aws_iam_role.ecs_task_execution_role.name 25 | policy_arn = "arn:aws:iam::aws:policy/AmazonPrometheusRemoteWriteAccess" 26 | } 27 | 28 | # CloudWatch Access 29 | resource "aws_iam_role_policy_attachment" "cloudwatch_write_policy" { 30 | role = aws_iam_role.ecs_task_execution_role.name 31 | policy_arn = "arn:aws:iam::aws:policy/CloudWatchLogsFullAccess" 32 | } 33 | 34 | resource "aws_iam_role_policy_attachment" "ssm_read_only_policy" { 35 | role = aws_iam_role.ecs_task_execution_role.name 36 | policy_arn = "arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess" 37 | } 38 | 39 | resource "aws_iam_role_policy_attachment" "ecs_task_execution_role_xray_policy" { 40 | role = aws_iam_role.ecs_task_execution_role.name 41 | policy_arn = "arn:aws:iam::aws:policy/AWSXRayDaemonWriteAccess" 42 | } 43 | 44 | # OpenTelemetry 45 | data "aws_iam_policy_document" "otel" { 46 | statement { 47 | actions = [ 48 | "logs:PutLogEvents", 49 | "logs:CreateLogGroup", 50 | "logs:CreateLogStream", 51 | "logs:DescribeLogStreams", 52 | "logs:DescribeLogGroups", 53 | "xray:PutTraceSegments", 54 | "xray:PutTelemetryRecords", 55 | "xray:GetSamplingRules", 56 | "xray:GetSamplingTargets", 57 | "xray:GetSamplingStatisticSummaries", 58 | "ssm:GetParameters", 59 | ] 60 | resources = [ 61 | "*" 62 | ] 63 | } 64 | } 65 | 66 | resource "aws_iam_policy" "otel" { 67 | name = "${var.app_name}-otel" 68 | path = "/" 69 | policy = data.aws_iam_policy_document.otel.json 70 | } 71 | 72 | resource "aws_iam_role_policy_attachment" "ecs_task_execution_fetch_ghcr_secret_policy" { 73 | role = aws_iam_role.ecs_task_execution_role.name 74 | policy_arn = aws_iam_policy.otel.arn 75 | } 76 | 77 | # Analytics Bucket Access 78 | #tfsec:ignore:aws-iam-no-policy-wildcards 79 | resource "aws_iam_policy" "analytics-data-lake_bucket_access" { 80 | name = "${var.app_name}_analytics-data-lake_bucket_access" 81 | path = "/" 82 | description = "Allows ${var.app_name} to read/write from ${var.analytics_datalake_bucket_name}" 83 | 84 | policy = jsonencode({ 85 | "Version" : "2012-10-17", 86 | "Statement" : [ 87 | { 88 | "Sid" : "ListObjectsInAnalyticsBucket", 89 | "Effect" : "Allow", 90 | "Action" : ["s3:ListBucket"], 91 | "Resource" : ["arn:aws:s3:::${var.analytics_datalake_bucket_name}"] 92 | }, 93 | { 94 | "Sid" : "AllObjectActionsInAnalyticsBucket", 95 | "Effect" : "Allow", 96 | "Action" : ["s3:*Object"], 97 | "Resource" : ["arn:aws:s3:::${var.analytics_datalake_bucket_name}/*"] 98 | }, 99 | { 100 | "Sid" : "AllGenerateDataKeyForAnalyticsBucket", 101 | "Effect" : "Allow", 102 | "Action" : ["kms:GenerateDataKey"], 103 | "Resource" : [var.analytics_key_arn] 104 | } 105 | ] 106 | }) 107 | } 108 | 109 | resource "aws_iam_role_policy_attachment" "analytics-data-lake-bucket-policy-attach" { 110 | role = aws_iam_role.ecs_task_execution_role.name 111 | policy_arn = aws_iam_policy.analytics-data-lake_bucket_access.arn 112 | } 113 | 114 | # GeoIP Bucket Access 115 | resource "aws_iam_policy" "geoip_bucket_access" { 116 | name = "${var.app_name}_geoip_bucket_access" 117 | path = "/" 118 | description = "Allows ${var.app_name} to read from ${var.analytics_geoip_db_bucket_name}" 119 | 120 | policy = jsonencode({ 121 | "Version" : "2012-10-17", 122 | "Statement" : [ 123 | { 124 | "Sid" : "ListObjectsInGeoipBucket", 125 | "Effect" : "Allow", 126 | "Action" : ["s3:ListBucket"], 127 | "Resource" : ["arn:aws:s3:::${var.analytics_geoip_db_bucket_name}"] 128 | }, 129 | { 130 | "Sid" : "AllObjectActionsInGeoipBucket", 131 | "Effect" : "Allow", 132 | "Action" : ["s3:CopyObject", "s3:GetObject", "s3:HeadObject"], 133 | "Resource" : ["arn:aws:s3:::${var.analytics_geoip_db_bucket_name}/*"] 134 | } 135 | ] 136 | }) 137 | } 138 | 139 | resource "aws_iam_role_policy_attachment" "geoip-bucket-policy-attach" { 140 | role = aws_iam_role.ecs_task_execution_role.name 141 | policy_arn = aws_iam_policy.geoip_bucket_access.arn 142 | } 143 | -------------------------------------------------------------------------------- /terraform/ecs/outputs.tf: -------------------------------------------------------------------------------- 1 | output "load_balancer_arn" { 2 | value = aws_lb.application_load_balancer.arn 3 | } 4 | -------------------------------------------------------------------------------- /terraform/ecs/terraform.tf: -------------------------------------------------------------------------------- 1 | # Terraform Configuration 2 | terraform { 3 | required_version = "~> 1.0" 4 | 5 | required_providers { 6 | aws = { 7 | source = "hashicorp/aws" 8 | version = "~> 4.31" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /terraform/ecs/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | type = string 3 | } 4 | 5 | variable "app_name" { 6 | type = string 7 | } 8 | 9 | variable "environment" { 10 | type = string 11 | } 12 | 13 | variable "image" { 14 | type = string 15 | } 16 | 17 | variable "image_version" { 18 | type = string 19 | } 20 | 21 | variable "database_url" { 22 | type = string 23 | sensitive = true 24 | } 25 | 26 | variable "tenant_database_url" { 27 | type = string 28 | sensitive = true 29 | } 30 | 31 | variable "prometheus_endpoint" { 32 | type = string 33 | } 34 | 35 | variable "vpc_id" { 36 | type = string 37 | } 38 | 39 | variable "vpc_cidr" { 40 | type = string 41 | } 42 | 43 | variable "route53_zone_id" { 44 | type = string 45 | } 46 | 47 | variable "fqdn" { 48 | type = string 49 | } 50 | 51 | variable "acm_certificate_arn" { 52 | type = string 53 | } 54 | 55 | variable "backup_acm_certificate_arn" { 56 | type = string 57 | } 58 | 59 | variable "backup_fqdn" { 60 | type = string 61 | } 62 | 63 | variable "backup_route53_zone_id" { 64 | type = string 65 | } 66 | 67 | variable "public_subnets" { 68 | type = set(string) 69 | } 70 | 71 | variable "private_subnets" { 72 | type = set(string) 73 | } 74 | 75 | variable "cpu" { 76 | type = number 77 | } 78 | 79 | variable "memory" { 80 | type = number 81 | } 82 | 83 | variable "telemetry_sample_ratio" { 84 | type = number 85 | } 86 | 87 | variable "aws_otel_collector_ecr_repository_url" { 88 | type = string 89 | } 90 | 91 | variable "allowed_origins" { 92 | type = string 93 | } 94 | 95 | variable "analytics_datalake_bucket_name" { 96 | description = "The name of the bucket where the analytics data will be stored" 97 | type = string 98 | } 99 | 100 | variable "analytics_geoip_db_bucket_name" { 101 | description = "The name of the bucket where the geoip database is stored" 102 | type = string 103 | } 104 | 105 | variable "analytics_geoip_db_key" { 106 | description = "The key of the geoip database in the bucket" 107 | type = string 108 | } 109 | 110 | variable "analytics_key_arn" { 111 | description = "The ARN of the KMS key used to encrypt the analytics data" 112 | type = string 113 | } 114 | 115 | variable "desired_count" { 116 | type = number 117 | } 118 | 119 | variable "autoscaling_max_capacity" { 120 | type = number 121 | } 122 | 123 | variable "autoscaling_min_capacity" { 124 | type = number 125 | } 126 | 127 | variable "jwt_secret" { 128 | type = string 129 | sensitive = true 130 | } 131 | 132 | variable "relay_public_key" { 133 | type = string 134 | sensitive = true 135 | } 136 | -------------------------------------------------------------------------------- /terraform/inputs.tf: -------------------------------------------------------------------------------- 1 | 2 | data "terraform_remote_state" "monitoring" { 3 | backend = "remote" 4 | config = { 5 | organization = "wallet-connect" 6 | workspaces = { 7 | name = "monitoring" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /terraform/monitoring/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Requirements 4 | 5 | | Name | Version | 6 | |------|---------| 7 | | [terraform](#requirement\_terraform) | ~> 1.0 | 8 | | [grafana](#requirement\_grafana) | ~> 1.28 | 9 | ## Providers 10 | 11 | | Name | Version | 12 | |------|---------| 13 | | [grafana](#provider\_grafana) | ~> 1.28 | 14 | ## Modules 15 | 16 | No modules. 17 | 18 | ## Inputs 19 | | Name | Description | Type | Default | Required | 20 | |------|-------------|------|---------|:--------:| 21 | | [app\_name](#input\_app\_name) | n/a |
string
|
n/a
| yes | 22 | | [environment](#input\_environment) | n/a |
string
|
n/a
| yes | 23 | | [load\_balancer\_arn](#input\_load\_balancer\_arn) | n/a |
string
|
n/a
| yes | 24 | | [prometheus\_workspace\_id](#input\_prometheus\_workspace\_id) | n/a |
string
|
n/a
| yes | 25 | ## Outputs 26 | 27 | No outputs. 28 | 29 | -------------------------------------------------------------------------------- /terraform/monitoring/context.tf: -------------------------------------------------------------------------------- 1 | module "this" { 2 | source = "app.terraform.io/wallet-connect/label/null" 3 | version = "0.3.2" 4 | 5 | region = var.region 6 | name = var.app_name 7 | } 8 | -------------------------------------------------------------------------------- /terraform/monitoring/dashboard.jsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import 'grafonnet-lib/grafana.libsonnet'; 2 | local panels = import 'panels/panels.libsonnet'; 3 | 4 | local dashboard = grafana.dashboard; 5 | local row = grafana.row; 6 | local annotation = grafana.annotation; 7 | local layout = grafana.layout; 8 | 9 | local ds = { 10 | prometheus: { 11 | type: 'prometheus', 12 | uid: std.extVar('prometheus_uid'), 13 | }, 14 | cloudwatch: { 15 | type: 'cloudwatch', 16 | uid: std.extVar('cloudwatch_uid'), 17 | }, 18 | }; 19 | local vars = { 20 | namespace: 'Push', 21 | environment: std.extVar('environment'), 22 | notifications: std.parseJson(std.extVar('notifications')), 23 | }; 24 | 25 | //////////////////////////////////////////////////////////////////////////////// 26 | 27 | local height = 8; 28 | local pos = grafana.layout.pos(height); 29 | 30 | //////////////////////////////////////////////////////////////////////////////// 31 | 32 | dashboard.new( 33 | title=std.extVar('dashboard_title'), 34 | uid=std.extVar('dashboard_uid'), 35 | editable=true, 36 | graphTooltip=dashboard.graphTooltips.sharedCrosshair, 37 | timezone=dashboard.timezones.utc, 38 | ) 39 | .addAnnotation( 40 | annotation.new( 41 | target={ 42 | limit: 100, 43 | matchAny: false, 44 | tags: [], 45 | type: 'dashboard', 46 | }, 47 | ) 48 | ) 49 | .addPanels(layout.generate_grid([ 50 | ////////////////////////////////////////////////////////////////////////////// 51 | row.new('Application'), 52 | panels.app.postgres_query_rate(ds, vars) { gridPos: pos._6 }, 53 | panels.app.postgres_query_latency(ds, vars) { gridPos: pos._6 }, 54 | ])) 55 | -------------------------------------------------------------------------------- /terraform/monitoring/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Turns the arn into the format expected by 3 | # the Grafana provider e.g. 4 | # net/prod-relay-load-balancer/e9a51c46020a0f85 5 | load_balancer = join("/", slice(split("/", var.load_balancer_arn), 1, 4)) 6 | } 7 | 8 | module "monitoring-role" { 9 | source = "app.terraform.io/wallet-connect/monitoring-role/aws" 10 | version = "1.1.0" 11 | context = module.this 12 | remote_role_arn = var.monitoring_role_arn 13 | } 14 | 15 | resource "grafana_data_source" "prometheus" { 16 | type = "prometheus" 17 | name = "${var.app_name}-amp" 18 | url = "https://aps-workspaces.eu-central-1.amazonaws.com/workspaces/${var.prometheus_workspace_id}/" 19 | 20 | json_data_encoded = jsonencode({ 21 | httpMethod = "GET" 22 | manageAlerts = false 23 | sigV4Auth = true 24 | sigV4AuthType = "ec2_iam_role" 25 | sigV4Region = "eu-central-1" 26 | sigV4AssumeRoleArn = module.monitoring-role.iam_role_arn 27 | }) 28 | } 29 | 30 | resource "grafana_data_source" "cloudwatch" { 31 | type = "cloudwatch" 32 | name = "${var.app_name}-cloudwatch" 33 | 34 | json_data_encoded = jsonencode({ 35 | defaultRegion = "eu-central-1" 36 | assumeRoleArn = module.monitoring-role.iam_role_arn 37 | }) 38 | } 39 | 40 | data "jsonnet_file" "dashboard" { 41 | source = "${path.module}/dashboard.jsonnet" 42 | 43 | ext_str = { 44 | dashboard_title = "Push Server - ${title(var.environment)}" 45 | dashboard_uid = "push-${var.environment}" 46 | 47 | prometheus_uid = grafana_data_source.prometheus.uid 48 | cloudwatch_uid = grafana_data_source.cloudwatch.uid 49 | 50 | environment = var.environment 51 | notifications = jsonencode(var.notification_channels) 52 | } 53 | } 54 | 55 | resource "grafana_dashboard" "push_server" { 56 | overwrite = true 57 | message = "Updated by Terraform" 58 | config_json = data.jsonnet_file.dashboard.rendered 59 | } 60 | 61 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/postgres_query_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 2 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title='Postgres Query Latency', 11 | datasource=ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('ms') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource=ds.prometheus, 20 | expr='sum by (aws_ecs_task_revision, name) (rate(postgres_query_latency_sum[$__rate_interval])) / sum by (aws_ecs_task_revision, name) (rate(postgres_query_latency_count[$__rate_interval]))', 21 | legendFormat='{{name}} r{{aws_ecs_task_revision}}', 22 | exemplar=false, 23 | refId='PostgresQueryLatency', 24 | )), 25 | } 26 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/postgres_query_rate.libsonnet: -------------------------------------------------------------------------------- 1 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 2 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title='Postgres Query Rate', 11 | datasource=ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('cps') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource=ds.prometheus, 20 | expr='sum by (aws_ecs_task_revision, name) (rate(postgres_queries_total[$__rate_interval]))', 21 | legendFormat='{{name}} r{{aws_ecs_task_revision}}', 22 | exemplar=true, 23 | refId='PostgresQueryRate', 24 | )) 25 | 26 | .addTarget(targets.prometheus( 27 | datasource=ds.prometheus, 28 | expr='sum(rate(postgres_queries_total[$__rate_interval]))', 29 | legendFormat='r{{aws_ecs_task_revision}}', 30 | exemplar=true, 31 | refId='PostgresQueryRateTotal', 32 | )), 33 | } 34 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/panels.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | app: { 3 | postgres_query_rate: (import 'app/postgres_query_rate.libsonnet').new, 4 | postgres_query_latency: (import 'app/postgres_query_latency.libsonnet').new, 5 | }, 6 | } 7 | -------------------------------------------------------------------------------- /terraform/monitoring/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | grafana = { 6 | source = "grafana/grafana" 7 | version = "~> 2.0" 8 | } 9 | jsonnet = { 10 | source = "alxrem/jsonnet" 11 | version = "~> 2.3.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terraform/monitoring/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | type = string 3 | default = "eu-central-1" 4 | } 5 | 6 | variable "app_name" { 7 | type = string 8 | } 9 | 10 | variable "environment" { 11 | type = string 12 | } 13 | 14 | variable "prometheus_workspace_id" { 15 | type = string 16 | } 17 | 18 | variable "load_balancer_arn" { 19 | type = string 20 | } 21 | 22 | variable "notification_channels" { 23 | description = "The notification channels to send alerts to" 24 | type = list(any) 25 | } 26 | 27 | variable "monitoring_role_arn" { 28 | description = "The ARN of the monitoring role." 29 | type = string 30 | } 31 | -------------------------------------------------------------------------------- /terraform/provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | 4 | # Make it faster by skipping something 5 | skip_metadata_api_check = true 6 | skip_region_validation = true 7 | skip_credentials_validation = true 8 | skip_requesting_account_id = true 9 | 10 | default_tags { 11 | tags = module.tags.tags 12 | } 13 | } 14 | 15 | provider "grafana" { 16 | url = "https://${data.terraform_remote_state.monitoring.outputs.grafana_workspaces.central.grafana_endpoint}" 17 | auth = var.grafana_auth 18 | } 19 | 20 | provider "random" {} 21 | 22 | provider "github" {} 23 | -------------------------------------------------------------------------------- /terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | type = string 3 | default = "eu-central-1" 4 | } 5 | 6 | variable "environment" { 7 | type = string 8 | default = "staging" 9 | } 10 | 11 | variable "azs" { 12 | type = list(string) 13 | default = ["eu-central-1a", "eu-central-1b", "eu-central-1c"] 14 | } 15 | 16 | variable "public_url" { 17 | type = string 18 | default = "echo.walletconnect.com" 19 | } 20 | 21 | variable "grafana_auth" { 22 | type = string 23 | sensitive = true 24 | } 25 | 26 | variable "image_version" { 27 | type = string 28 | default = "" 29 | } 30 | 31 | variable "geoip_db_key" { 32 | description = "The key to the GeoIP database" 33 | type = string 34 | default = "GeoLite2-City.mmdb" 35 | } 36 | 37 | variable "jwt_secret" { 38 | type = string 39 | sensitive = true 40 | } 41 | 42 | variable "relay_public_key" { 43 | type = string 44 | sensitive = true 45 | } 46 | 47 | #------------------------------------------------------------------------------- 48 | # Alerting / Monitoring 49 | 50 | variable "notification_channels" { 51 | description = "The notification channels to send alerts to" 52 | type = list(any) 53 | default = [] 54 | } 55 | -------------------------------------------------------------------------------- /terraform/vars/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Requirements 4 | 5 | No requirements. 6 | ## Providers 7 | 8 | No providers. 9 | ## Modules 10 | 11 | No modules. 12 | 13 | ## Inputs 14 | 15 | No inputs. 16 | 17 | ## Outputs 18 | 19 | No outputs. 20 | 21 | -------------------------------------------------------------------------------- /terraform/vars/dev.tfvars: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reown-com/push-server/cf976f3db14c94c3085798ee6c9a3196d5742fe2/terraform/vars/dev.tfvars -------------------------------------------------------------------------------- /terraform/vars/prod.tfvars: -------------------------------------------------------------------------------- 1 | notification_channels = [{ uid : "NNOynGwVz" }] 2 | -------------------------------------------------------------------------------- /terraform/vars/staging.tfvars: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reown-com/push-server/cf976f3db14c94c3085798ee6c9a3196d5742fe2/terraform/vars/staging.tfvars -------------------------------------------------------------------------------- /tests/context/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "functional_tests")] 2 | use echo_server::state::{ClientStoreArc, NotificationStoreArc, TenantStoreArc}; 3 | use { 4 | self::server::EchoServer, 5 | async_trait::async_trait, 6 | echo_server::config::Config, 7 | sqlx::{Pool, Postgres}, 8 | std::{env, sync::Arc}, 9 | test_context::{AsyncTestContext, TestContext}, 10 | }; 11 | 12 | mod server; 13 | mod stores; 14 | 15 | pub struct ConfigContext { 16 | pub config: Config, 17 | } 18 | 19 | pub struct EchoServerContext { 20 | pub server: EchoServer, 21 | #[cfg(all( 22 | feature = "multitenant", 23 | feature = "apns_tests", 24 | feature = "fcm_tests", 25 | feature = "fcmv1_tests" 26 | ))] 27 | pub config: Config, 28 | } 29 | 30 | pub struct StoreContext { 31 | pub pool: Arc>, 32 | pub tenant_pool: Arc>, 33 | 34 | #[cfg(feature = "functional_tests")] 35 | pub clients: ClientStoreArc, 36 | #[cfg(feature = "functional_tests")] 37 | pub notifications: NotificationStoreArc, 38 | #[cfg(feature = "functional_tests")] 39 | pub tenants: TenantStoreArc, 40 | } 41 | 42 | impl TestContext for ConfigContext { 43 | fn setup() -> Self { 44 | let public_port = self::server::get_random_port(); 45 | let config = Config { 46 | port: public_port, 47 | public_url: format!("http://127.0.0.1:{public_port}"), 48 | log_level: "info,echo-server=info".into(), 49 | log_level_otel: "info,echo-server=trace".into(), 50 | disable_header: true, 51 | validate_signatures: false, 52 | relay_public_key: env::var("RELAY_PUBLIC_KEY").unwrap_or( 53 | // Default relay public key if env not set 54 | // TODO I don't think this is used in the tests, so this should be refactored/removed 55 | "ff469faa970df23c23a6542765ce8dba2a907538522833b2327a153e365d138e".to_string(), 56 | ), 57 | database_url: env::var("DATABASE_URL") 58 | .expect("DATABASE_URL environment variable is not set"), 59 | tenant_database_url: env::var("TENANT_DATABASE_URL") 60 | .expect("TENANT_DATABASE_URL environment variable is not set"), 61 | #[cfg(feature = "multitenant")] 62 | jwt_secret: "n/a".to_string(), 63 | otel_exporter_otlp_endpoint: None, 64 | telemetry_prometheus_port: Some(self::server::get_random_port()), 65 | #[cfg(not(feature = "multitenant"))] 66 | apns_type: None, 67 | #[cfg(not(feature = "multitenant"))] 68 | apns_certificate: None, 69 | #[cfg(not(feature = "multitenant"))] 70 | apns_certificate_password: None, 71 | #[cfg(not(feature = "multitenant"))] 72 | apns_pkcs8_pem: None, 73 | #[cfg(not(feature = "multitenant"))] 74 | apns_team_id: None, 75 | #[cfg(not(feature = "multitenant"))] 76 | apns_key_id: None, 77 | #[cfg(not(feature = "multitenant"))] 78 | apns_topic: None, 79 | #[cfg(not(feature = "multitenant"))] 80 | fcm_api_key: None, 81 | #[cfg(not(feature = "multitenant"))] 82 | fcm_v1_credentials: None, 83 | #[cfg(any(feature = "analytics", feature = "geoblock"))] 84 | s3_endpoint: None, 85 | #[cfg(any(feature = "analytics", feature = "geoblock"))] 86 | geoip_db_bucket: None, 87 | #[cfg(any(feature = "analytics", feature = "geoblock"))] 88 | geoip_db_key: None, 89 | #[cfg(feature = "analytics")] 90 | analytics_export_bucket: "example-bucket".to_string(), 91 | is_test: true, 92 | cors_allowed_origins: vec!["*".to_string()], 93 | #[cfg(feature = "geoblock")] 94 | blocked_countries: vec![], 95 | }; 96 | Self { config } 97 | } 98 | } 99 | 100 | #[async_trait] 101 | impl AsyncTestContext for EchoServerContext { 102 | async fn setup() -> Self { 103 | Self { 104 | server: EchoServer::start(ConfigContext::setup().config).await, 105 | #[cfg(all( 106 | feature = "multitenant", 107 | feature = "apns_tests", 108 | feature = "fcm_tests", 109 | feature = "fcmv1_tests" 110 | ))] 111 | config: ConfigContext::setup().config, 112 | } 113 | } 114 | 115 | async fn teardown(mut self) { 116 | self.server.shutdown().await; 117 | } 118 | } 119 | 120 | #[async_trait] 121 | impl AsyncTestContext for StoreContext { 122 | async fn setup() -> Self { 123 | let config = ConfigContext::setup().config; 124 | let (db, tenant_db) = 125 | stores::open_pg_connections(&config.database_url, &config.tenant_database_url).await; 126 | 127 | let db_arc = Arc::new(db); 128 | let tenant_db_arc = Arc::new(tenant_db); 129 | 130 | Self { 131 | pool: db_arc.clone(), 132 | tenant_pool: tenant_db_arc.clone(), 133 | #[cfg(feature = "functional_tests")] 134 | clients: db_arc.clone(), 135 | #[cfg(feature = "functional_tests")] 136 | notifications: db_arc.clone(), 137 | #[cfg(feature = "functional_tests")] 138 | tenants: tenant_db_arc.clone(), 139 | } 140 | } 141 | 142 | async fn teardown(self) { 143 | self.pool.close().await; 144 | self.tenant_pool.close().await; 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /tests/context/server.rs: -------------------------------------------------------------------------------- 1 | use { 2 | echo_server::config::Config, 3 | std::net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4, TcpListener}, 4 | tokio::{ 5 | runtime::Handle, 6 | sync::broadcast, 7 | time::{sleep, Duration}, 8 | }, 9 | }; 10 | 11 | pub struct EchoServer { 12 | pub public_addr: SocketAddr, 13 | shutdown_signal: tokio::sync::broadcast::Sender<()>, 14 | is_shutdown: bool, 15 | } 16 | 17 | #[derive(Debug, thiserror::Error)] 18 | pub enum Error {} 19 | 20 | impl EchoServer { 21 | pub async fn start(config: Config) -> Self { 22 | let (public_addr, signal, is_shutdown) = start_server(config).await; 23 | Self { 24 | public_addr, 25 | shutdown_signal: signal, 26 | is_shutdown, 27 | } 28 | } 29 | 30 | pub async fn shutdown(&mut self) { 31 | if self.is_shutdown { 32 | return; 33 | } 34 | self.is_shutdown = true; 35 | let _ = self.shutdown_signal.send(()); 36 | wait_for_server_to_shutdown(self.public_addr.port()) 37 | .await 38 | .unwrap(); 39 | } 40 | } 41 | 42 | async fn start_server( 43 | config: Config, 44 | ) -> ( 45 | std::net::SocketAddr, 46 | tokio::sync::broadcast::Sender<()>, 47 | bool, 48 | ) { 49 | let rt = Handle::current(); 50 | let port = config.port; 51 | let public_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), port); 52 | 53 | let (signal, shutdown) = broadcast::channel(1); 54 | 55 | std::thread::spawn(move || { 56 | rt.block_on(async move { echo_server::bootstap(shutdown, config).await }) 57 | .unwrap(); 58 | }); 59 | 60 | if let Err(e) = wait_for_server_to_start(port).await { 61 | panic!("Failed to start server with error: {e:?}") 62 | } 63 | 64 | (public_addr, signal, false) 65 | } 66 | 67 | // Finds a free port. 68 | pub fn get_random_port() -> u16 { 69 | use std::sync::atomic::{AtomicU16, Ordering}; 70 | 71 | static NEXT_PORT: AtomicU16 = AtomicU16::new(9000); 72 | 73 | loop { 74 | let port = NEXT_PORT.fetch_add(1, Ordering::SeqCst); 75 | 76 | if is_port_available(port) { 77 | return port; 78 | } 79 | } 80 | } 81 | 82 | fn is_port_available(port: u16) -> bool { 83 | TcpListener::bind(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, port)).is_ok() 84 | } 85 | 86 | async fn wait_for_server_to_shutdown(port: u16) -> crate::ErrorResult<()> { 87 | let poll_fut = async { 88 | while !is_port_available(port) { 89 | sleep(Duration::from_millis(10)).await; 90 | } 91 | }; 92 | 93 | Ok(tokio::time::timeout(Duration::from_secs(3), poll_fut).await?) 94 | } 95 | 96 | async fn wait_for_server_to_start(port: u16) -> crate::ErrorResult<()> { 97 | let poll_fut = async { 98 | while is_port_available(port) { 99 | sleep(Duration::from_millis(10)).await; 100 | } 101 | }; 102 | 103 | Ok(tokio::time::timeout(Duration::from_secs(5), poll_fut).await?) 104 | } 105 | -------------------------------------------------------------------------------- /tests/context/stores.rs: -------------------------------------------------------------------------------- 1 | use { 2 | sqlx::{ 3 | postgres::{PgConnectOptions, PgPoolOptions}, 4 | ConnectOptions, Pool, Postgres, 5 | }, 6 | std::{str::FromStr, time::Duration}, 7 | tracing::log::LevelFilter, 8 | }; 9 | 10 | const PG_CONNECTION_POOL_SIZE: u32 = 30; 11 | 12 | pub async fn open_pg_connections( 13 | database_uri: &str, 14 | tenant_database_uri: &str, 15 | ) -> (Pool, Pool) { 16 | let pg_options = PgConnectOptions::from_str(database_uri) 17 | .expect("failed to parse postgres url") 18 | .log_statements(LevelFilter::Debug) 19 | .log_slow_statements(LevelFilter::Info, Duration::from_millis(250)) 20 | .clone(); 21 | 22 | let pool = PgPoolOptions::new() 23 | .max_connections(PG_CONNECTION_POOL_SIZE) 24 | .connect_with(pg_options) 25 | .await 26 | .expect("failed to connect to postgres"); 27 | 28 | sqlx::migrate!("./migrations") 29 | .run(&pool) 30 | .await 31 | .expect("failed to run migrations"); 32 | 33 | let tenant_pg_options = PgConnectOptions::from_str(tenant_database_uri) 34 | .expect("failed to parse postgres url") 35 | .log_statements(LevelFilter::Debug) 36 | .log_slow_statements(LevelFilter::Info, Duration::from_millis(250)) 37 | .clone(); 38 | 39 | let tenant_pool = PgPoolOptions::new() 40 | .max_connections(PG_CONNECTION_POOL_SIZE) 41 | .connect_with(tenant_pg_options) 42 | .await 43 | .expect("failed to connect to postgres"); 44 | 45 | sqlx::migrate!("./tenant_migrations") 46 | .run(&tenant_pool) 47 | .await 48 | .expect("failed to run migrations"); 49 | 50 | (pool, tenant_pool) 51 | } 52 | -------------------------------------------------------------------------------- /tests/functional/mod.rs: -------------------------------------------------------------------------------- 1 | /// Functional tests that cover Echo Server when running with a database and all 2 | /// other expectations as a complete system 3 | #[cfg(feature = "multitenant")] 4 | mod multitenant; 5 | #[cfg(not(feature = "multitenant"))] 6 | mod singletenant; 7 | mod stores; 8 | -------------------------------------------------------------------------------- /tests/functional/multitenant/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::context::EchoServerContext, 3 | jsonwebtoken::{encode, EncodingKey, Header}, 4 | random_string::generate, 5 | serde::Serialize, 6 | std::time::SystemTime, 7 | test_context::test_context, 8 | }; 9 | 10 | #[cfg(feature = "apns_tests")] 11 | mod apns; 12 | #[cfg(feature = "fcm_tests")] 13 | mod fcm; 14 | #[cfg(feature = "fcmv1_tests")] 15 | mod fcm_v1; 16 | #[cfg(feature = "multitenant")] 17 | mod tenancy; 18 | 19 | /// Struct to hold claims for JWT validation 20 | #[derive(Serialize)] 21 | pub struct ClaimsForValidation { 22 | sub: String, 23 | exp: usize, 24 | } 25 | 26 | #[test_context(EchoServerContext)] 27 | #[tokio::test] 28 | async fn test_health(ctx: &mut EchoServerContext) { 29 | let body = reqwest::get(format!("http://{}/health", ctx.server.public_addr)) 30 | .await 31 | .expect("Failed to call /health") 32 | .status(); 33 | assert!(body.is_success()); 34 | } 35 | 36 | pub fn generate_random_tenant_id(jwt_secret: &str) -> (String, String) { 37 | let charset = "1234567890"; 38 | let tenant_id = generate(12, charset); 39 | let unix_timestamp = SystemTime::now() 40 | .duration_since(SystemTime::UNIX_EPOCH) 41 | .unwrap() 42 | .as_secs() as usize; 43 | let token_claims = ClaimsForValidation { 44 | sub: tenant_id.clone(), 45 | exp: unix_timestamp + 60 * 60, // Add an hour for expiration 46 | }; 47 | let jwt_token = encode( 48 | &Header::default(), 49 | &token_claims, 50 | &EncodingKey::from_secret(jwt_secret.as_bytes()), 51 | ) 52 | .expect("Failed to encode jwt token"); 53 | (tenant_id, jwt_token) 54 | } 55 | -------------------------------------------------------------------------------- /tests/functional/multitenant/tenancy.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{context::EchoServerContext, functional::multitenant::generate_random_tenant_id}, 3 | echo_server::handlers::create_tenant::TenantRegisterBody, 4 | test_context::test_context, 5 | }; 6 | 7 | #[test_context(EchoServerContext)] 8 | #[tokio::test] 9 | async fn tenant_register_get_delete(ctx: &mut EchoServerContext) { 10 | let (tenant_id, jwt_token) = generate_random_tenant_id(&ctx.config.jwt_secret); 11 | 12 | // Register tenant 13 | let client = reqwest::Client::new(); 14 | let response = client 15 | .post(format!("http://{}/tenants", ctx.server.public_addr)) 16 | .bearer_auth(&jwt_token) 17 | .json(&TenantRegisterBody { 18 | id: tenant_id.clone(), 19 | }) 20 | .send() 21 | .await 22 | .expect("Call failed"); 23 | assert_eq!(response.status(), reqwest::StatusCode::OK); 24 | 25 | // Get tenant 26 | let response = client 27 | .get(format!( 28 | "http://{}/tenants/{}", 29 | ctx.server.public_addr, tenant_id 30 | )) 31 | .bearer_auth(&jwt_token) 32 | .send() 33 | .await 34 | .expect("Call failed"); 35 | assert_eq!(response.status(), reqwest::StatusCode::OK); 36 | 37 | // Check for CORS 38 | assert!(response 39 | .headers() 40 | .contains_key("Access-Control-Allow-Origin")); 41 | let allowed_origins = response 42 | .headers() 43 | .get("Access-Control-Allow-Origin") 44 | .unwrap(); 45 | assert_eq!(allowed_origins.to_str().unwrap(), "*"); 46 | 47 | // Delete tenant 48 | let response = client 49 | .delete(format!( 50 | "http://{}/tenants/{}", 51 | ctx.server.public_addr, tenant_id 52 | )) 53 | .bearer_auth(&jwt_token) 54 | .send() 55 | .await 56 | .expect("Call failed"); 57 | assert_eq!(response.status(), reqwest::StatusCode::OK); 58 | 59 | // Get tenant again 60 | let response = client 61 | .get(format!( 62 | "http://{}/tenants/{}", 63 | ctx.server.public_addr, tenant_id 64 | )) 65 | .bearer_auth(&jwt_token) 66 | .send() 67 | .await 68 | .expect("Call failed"); 69 | // TODO: this should be changed to 404 70 | assert_eq!(response.status(), reqwest::StatusCode::BAD_REQUEST); 71 | } 72 | -------------------------------------------------------------------------------- /tests/functional/singletenant/mod.rs: -------------------------------------------------------------------------------- 1 | /// Tests against the handlers 2 | use {crate::context::EchoServerContext, test_context::test_context}; 3 | 4 | mod push; 5 | mod registration; 6 | 7 | #[test_context(EchoServerContext)] 8 | #[tokio::test] 9 | async fn test_health(ctx: &mut EchoServerContext) { 10 | let body = reqwest::get(format!("http://{}/health", ctx.server.public_addr)) 11 | .await 12 | .expect("Failed to call /health") 13 | .status(); 14 | assert!(body.is_success()); 15 | } 16 | -------------------------------------------------------------------------------- /tests/functional/singletenant/registration.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::context::EchoServerContext, 3 | echo_server::handlers::register_client::RegisterBody, 4 | ed25519_dalek::SigningKey, 5 | relay_rpc::domain::{ClientId, DecodedClientId}, 6 | test_context::test_context, 7 | }; 8 | 9 | #[test_context(EchoServerContext)] 10 | #[tokio::test] 11 | async fn test_registration(ctx: &mut EchoServerContext) { 12 | let keypair = SigningKey::generate(&mut rand::thread_rng()); 13 | 14 | let random_client_id = DecodedClientId::from_key(&keypair.verifying_key()); 15 | let client_id = ClientId::from(random_client_id); 16 | let payload = RegisterBody { 17 | client_id: client_id.clone(), 18 | push_type: "noop".to_string(), 19 | token: "test".to_string(), 20 | always_raw: Some(false), 21 | }; 22 | 23 | let jwt = relay_rpc::auth::AuthToken::new(client_id.value().to_string()) 24 | .aud(format!( 25 | "http://127.0.0.1:{}", 26 | ctx.server.public_addr.port() 27 | )) 28 | .as_jwt(&keypair) 29 | .unwrap() 30 | .to_string(); 31 | 32 | // Register client 33 | let client = reqwest::Client::new(); 34 | let response = client 35 | .post(format!("http://{}/clients", ctx.server.public_addr)) 36 | .header("Authorization", jwt.clone()) 37 | .json(&payload) 38 | .send() 39 | .await 40 | .expect("Call failed"); 41 | 42 | assert!( 43 | response.status().is_success(), 44 | "Response was not successful" 45 | ); 46 | 47 | // Update token 48 | let payload = RegisterBody { 49 | client_id, 50 | push_type: "noop".to_string(), 51 | token: "new_token".to_string(), 52 | always_raw: Some(false), 53 | }; 54 | let response = client 55 | .post(format!("http://{}/clients", ctx.server.public_addr)) 56 | .header("Authorization", jwt) 57 | .json(&payload) 58 | .send() 59 | .await 60 | .expect("Call failed"); 61 | 62 | assert!( 63 | response.status().is_success(), 64 | "Response was not successful" 65 | ); 66 | } 67 | 68 | #[test_context(EchoServerContext)] 69 | #[tokio::test] 70 | async fn test_deregistration(ctx: &mut EchoServerContext) { 71 | let keypair = SigningKey::generate(&mut rand::thread_rng()); 72 | 73 | let random_client_id = DecodedClientId::from_key(&keypair.verifying_key()); 74 | let client_id = ClientId::from(random_client_id); 75 | 76 | let jwt = relay_rpc::auth::AuthToken::new(client_id.value().to_string()) 77 | .aud(format!( 78 | "http://127.0.0.1:{}", 79 | ctx.server.public_addr.port() 80 | )) 81 | .as_jwt(&keypair) 82 | .unwrap() 83 | .to_string(); 84 | 85 | let payload = RegisterBody { 86 | client_id: client_id.clone(), 87 | push_type: "noop".to_string(), 88 | token: "test".to_string(), 89 | always_raw: Some(false), 90 | }; 91 | 92 | let client = reqwest::Client::new(); 93 | let register_response = client 94 | .post(format!("http://{}/clients", ctx.server.public_addr)) 95 | .json(&payload) 96 | .header("Authorization", jwt.clone()) 97 | .send() 98 | .await 99 | .expect("Call failed"); 100 | 101 | assert!( 102 | register_response.status().is_success(), 103 | "Failed to register client" 104 | ); 105 | 106 | let client = reqwest::Client::new(); 107 | let delete_response = client 108 | .delete(format!( 109 | "http://{}/clients/{}", 110 | ctx.server.public_addr, client_id 111 | )) 112 | .header("Authorization", jwt) 113 | .send() 114 | .await 115 | .expect("Call failed") 116 | .status(); 117 | 118 | assert!(delete_response.is_success(), "Failed to unregister client"); 119 | } 120 | -------------------------------------------------------------------------------- /tests/functional/stores/mod.rs: -------------------------------------------------------------------------------- 1 | use uuid::Uuid; 2 | 3 | mod client; 4 | mod notification; 5 | /// Tests against the stores 6 | mod tenant; 7 | 8 | pub const TENANT_ID: &str = "000-000-000-000"; 9 | 10 | pub fn gen_id() -> String { 11 | Uuid::new_v4().to_string() 12 | } 13 | -------------------------------------------------------------------------------- /tests/functional/stores/notification.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | context::StoreContext, 4 | functional::stores::{gen_id, TENANT_ID}, 5 | }, 6 | echo_server::{ 7 | handlers::push_message::PushMessageBody, providers::ProviderKind, state::ClientStoreArc, 8 | stores::client::Client, 9 | }, 10 | test_context::test_context, 11 | }; 12 | 13 | pub async fn create_client(client_store: &ClientStoreArc) -> String { 14 | let id = format!("id-{}", gen_id()); 15 | let token = format!("token-{}", gen_id()); 16 | 17 | client_store 18 | .create_client( 19 | TENANT_ID, 20 | &id, 21 | Client { 22 | tenant_id: TENANT_ID.to_string(), 23 | push_type: ProviderKind::Noop, 24 | token, 25 | always_raw: false, 26 | }, 27 | None, 28 | ) 29 | .await 30 | .expect("failed to create client for notification test"); 31 | 32 | id 33 | } 34 | 35 | #[test_context(StoreContext)] 36 | #[tokio::test] 37 | async fn notification(ctx: &mut StoreContext) { 38 | let client_id = create_client(&ctx.clients).await; 39 | 40 | let res = ctx 41 | .notifications 42 | .create_or_update_notification( 43 | &gen_id(), 44 | TENANT_ID, 45 | &client_id, 46 | &PushMessageBody { 47 | raw: None, 48 | legacy: None, 49 | }, 50 | ) 51 | .await; 52 | 53 | assert!(res.is_ok()); 54 | } 55 | 56 | #[test_context(StoreContext)] 57 | #[tokio::test] 58 | async fn notification_multiple_clients_same_payload(ctx: &mut StoreContext) { 59 | let message_id = gen_id(); 60 | let payload = PushMessageBody { 61 | raw: None, 62 | legacy: None, 63 | }; 64 | 65 | let client_id1 = create_client(&ctx.clients).await; 66 | let res = ctx 67 | .notifications 68 | .create_or_update_notification(&message_id, TENANT_ID, &client_id1, &payload) 69 | .await; 70 | assert!(res.is_ok()); 71 | 72 | let client_id2 = create_client(&ctx.clients).await; 73 | let res = ctx 74 | .notifications 75 | .create_or_update_notification(&message_id, TENANT_ID, &client_id2, &payload) 76 | .await; 77 | assert!(res.is_ok()); 78 | 79 | let notification1 = ctx 80 | .notifications 81 | .get_notification(&message_id, &client_id1, TENANT_ID) 82 | .await 83 | .unwrap(); 84 | assert_eq!(notification1.client_id, client_id1); 85 | 86 | let notification2 = ctx 87 | .notifications 88 | .get_notification(&message_id, &client_id2, TENANT_ID) 89 | .await 90 | .unwrap(); 91 | assert_eq!(notification2.client_id, client_id2); 92 | } 93 | -------------------------------------------------------------------------------- /tests/integration.rs: -------------------------------------------------------------------------------- 1 | // mod env; 2 | // mod providers; 3 | // mod store; // Comment this out for now 4 | mod context; 5 | #[cfg(feature = "functional_tests")] 6 | mod functional; 7 | mod unit; 8 | 9 | pub type ErrorResult = Result; 10 | 11 | #[derive(Debug, thiserror::Error)] 12 | pub enum TestError { 13 | #[error(transparent)] 14 | Elapsed(#[from] tokio::time::error::Elapsed), 15 | 16 | #[error(transparent)] 17 | EchoServer(#[from] echo_server::error::Error), 18 | } 19 | -------------------------------------------------------------------------------- /tests/unit/messages.rs: -------------------------------------------------------------------------------- 1 | use echo_server::{ 2 | blob::{DecryptedPayloadBlob, ENCRYPTED_FLAG}, 3 | providers::MessagePayload, 4 | }; 5 | 6 | const EXAMPLE_TOPIC: &str = "example-topic"; 7 | 8 | // base64 encoded json string 9 | const EXAMPLE_CLEARTEXT_ENCODED_BLOB: &str = "eyJ0aXRsZSI6IllvdSBoYXZlIGEgc2lnbiByZXF1ZXN0IiwiYm9keSI6ImV4YW1wbGUtZGFwcCBoYXMgc2VudCB5b3UgYSByZXF1ZXN0IHRvIHNpZ24gYSBtZXNzYWdlIn0="; 10 | 11 | // json string 12 | const EXAMPLE_CLEARTEXT_BLOB_TITLE: &str = "You have a sign request"; 13 | const EXAMPLE_CLEARTEXT_BLOB_BODY: &str = "example-dapp has sent you a request to sign a message"; 14 | 15 | // This can be any text as echo-server doesn't mutate or read it 16 | const EXAMPLE_ENCRYPTED_BLOB: &str = "encrypted-blob"; 17 | 18 | #[test] 19 | pub fn check_payload_encrypted() { 20 | let payload = MessagePayload { 21 | topic: EXAMPLE_TOPIC.to_string().into(), 22 | flags: ENCRYPTED_FLAG, 23 | blob: EXAMPLE_ENCRYPTED_BLOB.to_string().into(), 24 | }; 25 | 26 | assert!(payload.is_encrypted()) 27 | } 28 | 29 | #[test] 30 | pub fn check_payload_not_encrypted() { 31 | let payload = MessagePayload { 32 | topic: EXAMPLE_TOPIC.to_string().into(), 33 | flags: 0, 34 | blob: EXAMPLE_CLEARTEXT_ENCODED_BLOB.to_string().into(), 35 | }; 36 | 37 | assert!(!payload.is_encrypted()); 38 | } 39 | 40 | #[test] 41 | pub fn parse_blob_from_payload() { 42 | let payload = MessagePayload { 43 | topic: EXAMPLE_TOPIC.to_string().into(), 44 | flags: 0, 45 | blob: EXAMPLE_CLEARTEXT_ENCODED_BLOB.to_string().into(), 46 | }; 47 | 48 | let blob = DecryptedPayloadBlob::from_base64_encoded(&payload.blob) 49 | .expect("Failed to parse payload's blob"); 50 | 51 | assert_eq!( 52 | blob, 53 | DecryptedPayloadBlob { 54 | title: EXAMPLE_CLEARTEXT_BLOB_TITLE.to_string(), 55 | body: EXAMPLE_CLEARTEXT_BLOB_BODY.to_string(), 56 | image: None, 57 | url: None 58 | } 59 | ) 60 | } 61 | 62 | #[test] 63 | pub fn parse_encoded_blob() { 64 | let blob = DecryptedPayloadBlob::from_base64_encoded(EXAMPLE_CLEARTEXT_ENCODED_BLOB) 65 | .expect("Failed to parse encoded blob"); 66 | 67 | assert_eq!( 68 | blob, 69 | DecryptedPayloadBlob { 70 | title: EXAMPLE_CLEARTEXT_BLOB_TITLE.to_string(), 71 | body: EXAMPLE_CLEARTEXT_BLOB_BODY.to_string(), 72 | image: None, 73 | url: None 74 | } 75 | ) 76 | } 77 | -------------------------------------------------------------------------------- /tests/unit/middleware/mod.rs: -------------------------------------------------------------------------------- 1 | mod validate_signature; 2 | -------------------------------------------------------------------------------- /tests/unit/middleware/validate_signature.rs: -------------------------------------------------------------------------------- 1 | use { 2 | echo_server::middleware::validate_signature::signature_is_valid, 3 | ed25519_dalek::{Signer, SigningKey, VerifyingKey}, 4 | rand::rngs::OsRng, 5 | }; 6 | 7 | /// Setup for tests by creating a public key and returning a signature, 8 | /// timestamp and body 9 | fn setup() -> (VerifyingKey, String, String, String) { 10 | let mut csprng = OsRng {}; 11 | let keypair: SigningKey = SigningKey::generate(&mut csprng); 12 | 13 | let body = "example_body"; 14 | let timestamp = "1692442800"; 15 | 16 | let sig_body = format!("{}.{}.{}", timestamp, body.len(), body); 17 | let sig = keypair.sign(sig_body.as_bytes()); 18 | let sig_hex = hex::encode(sig.to_bytes()); 19 | 20 | ( 21 | keypair.verifying_key(), 22 | sig_hex, 23 | timestamp.to_string(), 24 | body.to_string(), 25 | ) 26 | } 27 | 28 | #[tokio::test] 29 | pub async fn valid_signature() { 30 | let (pub_key, signature, timestamp, body) = setup(); 31 | 32 | let res = signature_is_valid(&signature, ×tamp, &body, &pub_key).await; 33 | 34 | // Shouldn't error 35 | assert!(res.is_ok()); 36 | 37 | // Should be valid 38 | assert!(res.expect("failed to extract result")) 39 | } 40 | 41 | #[tokio::test] 42 | pub async fn invalid_signature_not_hex() { 43 | let (pub_key, _, timestamp, body) = setup(); 44 | 45 | let res = signature_is_valid("bad-signature", ×tamp, &body, &pub_key).await; 46 | 47 | // Should error 48 | assert!(res.is_err()); 49 | 50 | let error = res.expect_err("Couldn't unwrap error"); 51 | assert!(error.is_hex()); 52 | } 53 | 54 | #[tokio::test] 55 | pub async fn invalid_signature_hex() { 56 | let (pub_key, _, timestamp, body) = setup(); 57 | 58 | let res = signature_is_valid( 59 | // Sig Decoded: invalid-signature 60 | "696e76616c69642d7369676e6174757265", 61 | ×tamp, 62 | &body, 63 | &pub_key, 64 | ) 65 | .await; 66 | 67 | // Should error 68 | assert!(res.is_err()); 69 | 70 | let error = res.expect_err("Couldn't unwrap error"); 71 | // Note: should be a from slice error as the signature 72 | assert!(error.is_ed_25519()); 73 | } 74 | -------------------------------------------------------------------------------- /tests/unit/mod.rs: -------------------------------------------------------------------------------- 1 | mod messages; 2 | mod middleware; 3 | --------------------------------------------------------------------------------