├── terraform ├── outputs.tf ├── monitoring │ ├── outputs.tf │ ├── terraform.tf │ ├── panels │ │ ├── app │ │ │ ├── publishing_workers_count.libsonnet │ │ │ ├── publishing_workers_queued_size.libsonnet │ │ │ ├── publishing_workers_errors.libsonnet │ │ │ ├── publishing_workers_processing_size.libsonnet │ │ │ ├── publishing_workers_published_count.libsonnet │ │ │ ├── subscribe_latency.libsonnet │ │ │ ├── account_not_found.libsonnet │ │ │ ├── dispatched_notifications.libsonnet │ │ │ ├── registry_request_rate.libsonnet │ │ │ ├── relay_subscribe_rate.libsonnet │ │ │ ├── http_request_rate.libsonnet │ │ │ ├── relay_outgoing_message_rate.libsonnet │ │ │ ├── relay_batch_subscribe_rate.libsonnet │ │ │ ├── notify_latency.libsonnet │ │ │ ├── registry_request_latency.libsonnet │ │ │ ├── postgres_query_latency.libsonnet │ │ │ ├── postgres_query_rate.libsonnet │ │ │ ├── send_failed.libsonnet │ │ │ ├── subscribed_topics.libsonnet │ │ │ ├── relay_subscribe_latency.libsonnet │ │ │ ├── relay_outgoing_message_latency.libsonnet │ │ │ ├── relay_batch_subscribe_latency.libsonnet │ │ │ ├── keys_server_request_rate.libsonnet │ │ │ ├── relay_incoming_message_latency.libsonnet │ │ │ ├── keys_server_request_latency.libsonnet │ │ │ ├── relay_subscribe_failures.libsonnet │ │ │ ├── relay_incoming_message_server_errors.libsonnet │ │ │ ├── relay_incoming_message_rate.libsonnet │ │ │ ├── relay_outgoing_message_failures.libsonnet │ │ │ ├── relay_batch_subscribe_failures.libsonnet │ │ │ └── http_request_latency.libsonnet │ │ ├── lb │ │ │ ├── active_connections.libsonnet │ │ │ ├── requests.libsonnet │ │ │ ├── error_5xx_logs.libsonnet │ │ │ ├── healthy_hosts.libsonnet │ │ │ ├── error_5xx.libsonnet │ │ │ └── error_4xx.libsonnet │ │ └── rds │ │ │ ├── database_connections.libsonnet │ │ │ ├── volume_bytes_used.libsonnet │ │ │ ├── cpu.libsonnet │ │ │ └── freeable_memory.libsonnet │ ├── data_sources.tf │ ├── main.tf │ └── variables.tf ├── alerting │ ├── alarms_prometheus.tf │ ├── terraform.tf │ ├── alarms_elb.tf │ ├── main.tf │ ├── alarms_redis.tf │ ├── alarms_ecs.tf │ └── variables.tf ├── ecs │ ├── main.tf │ ├── terraform.tf │ ├── dns.tf │ ├── cluster_logs.tf │ ├── outputs.tf │ └── cluster_autoscaling.tf ├── redis │ ├── terraform.tf │ ├── outputs.tf │ ├── variables.tf │ └── main.tf ├── context.tf ├── docdb │ ├── terraform.tf │ ├── outputs.tf │ ├── password.tf │ └── network.tf ├── postgres │ ├── terraform.tf │ ├── outputs.tf │ ├── password.tf │ ├── main.tf │ └── variables.tf ├── providers.tf ├── .tflint.hcl ├── res_redis.tf ├── res_alerting.tf ├── res_dns.tf ├── terraform.tf ├── res_db.tf ├── inputs.tf ├── res_monitoring.tf ├── deploy-dev.sh ├── .terraform-docs.yml └── main.tf ├── .dockerignore ├── .github ├── codeowners ├── pull_request_template.md ├── workflows │ ├── sub-validate-health.yml │ ├── sub-validate-rust.yml │ ├── dispatch_publish.yml │ ├── sub-validate-swift.yml │ ├── event_release.yml │ ├── event_pr.yml │ ├── event_intake.yml │ └── dispatch_deploy.yml ├── ISSUE_TEMPLATE │ ├── feature_request.yml │ └── bug.yml └── SECRETS.md ├── src ├── model │ ├── mod.rs │ └── types │ │ ├── account_id │ │ ├── erc55.rs │ │ ├── caip10.rs │ │ └── mod.rs │ │ └── mod.rs ├── siwx │ ├── mod.rs │ └── notify_recap.rs ├── rate_limit │ ├── mod.rs │ └── token_bucket.lua ├── services │ ├── private_http_server │ │ ├── handlers │ │ │ ├── mod.rs │ │ │ └── metrics.rs │ │ └── mod.rs │ ├── mod.rs │ ├── public_http_server │ │ └── handlers │ │ │ ├── mod.rs │ │ │ ├── health.rs │ │ │ ├── webhooks │ │ │ ├── mod.rs │ │ │ ├── delete_webhook.rs │ │ │ ├── update_webhook.rs │ │ │ ├── get_webhooks.rs │ │ │ └── register_webhook.rs │ │ │ ├── relay_webhook │ │ │ └── handlers │ │ │ │ └── mod.rs │ │ │ ├── get_subscribers_v0.rs │ │ │ ├── mark_all_as_read.rs │ │ │ ├── get_welcome_notification.rs │ │ │ ├── post_welcome_notification.rs │ │ │ └── notify_v0.rs │ ├── watcher_expiration_job.rs │ ├── publisher_service │ │ └── types.rs │ └── relay_renewal_job │ │ └── mod.rs ├── main.rs ├── registry │ ├── storage │ │ ├── error.rs │ │ └── mod.rs │ └── extractor.rs ├── utils.rs ├── config │ ├── deployed │ │ └── networking.rs │ └── mod.rs ├── notify_keys.rs ├── notify_message.rs ├── analytics │ ├── relay_request.rs │ └── subscriber_notification.rs └── relay_client_helpers.rs ├── migrations ├── new.sh ├── 20240227222554_index_subscription_watcher_account.sql ├── 20240123221209_index-where-status-queued.sql ├── 20240319001421_is_read.sql ├── README.md ├── 20240103051259_welcome_notification.sql ├── 20231026150926_add_notification_trigger.sql ├── 20240111200929_unique_account_address.sql ├── 20231025145134_add_notification.sql ├── ERD.md └── 20231018121518_init.sql ├── Dockerfile-dev ├── .gitmodules ├── .editorconfig ├── renovate.json ├── .env.terraform.example ├── docker-compose.storage.yml ├── rustfmt.toml ├── .pre-commit-config.yaml ├── deny.toml ├── docker-compose.notify-server.yml ├── .terraformignore ├── LICENSE ├── .env.example ├── .gitignore ├── README.md └── Dockerfile /terraform/outputs.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /terraform/monitoring/outputs.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /terraform/alerting/alarms_prometheus.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | terraform 2 | target 3 | .github 4 | -------------------------------------------------------------------------------- /.github/codeowners: -------------------------------------------------------------------------------- 1 | * @geekbrother 2 | * @chris13524 3 | -------------------------------------------------------------------------------- /src/model/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod helpers; 2 | pub mod types; 3 | -------------------------------------------------------------------------------- /src/siwx/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod erc5573; 2 | pub mod notify_recap; 3 | -------------------------------------------------------------------------------- /src/rate_limit/mod.rs: -------------------------------------------------------------------------------- 1 | mod token_bucket; 2 | pub use token_bucket::*; 3 | -------------------------------------------------------------------------------- /src/services/private_http_server/handlers/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod metrics; 2 | -------------------------------------------------------------------------------- /terraform/ecs/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_pet" "this" { 2 | length = 2 3 | } 4 | -------------------------------------------------------------------------------- /migrations/new.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | DIR="$(dirname "$0")" 5 | 6 | DESCRIPTION=$1 7 | touch "$DIR/$(date +%s)_$DESCRIPTION.sql" 8 | -------------------------------------------------------------------------------- /migrations/20240227222554_index_subscription_watcher_account.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX subscription_watcher_address ON subscription_watcher (get_address_lower(account)); 2 | -------------------------------------------------------------------------------- /migrations/20240123221209_index-where-status-queued.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX subscriber_notification_status_queued ON subscriber_notification (status) 2 | WHERE status = 'queued'; 3 | -------------------------------------------------------------------------------- /migrations/20240319001421_is_read.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE subscriber_notification ADD COLUMN is_read BOOLEAN NOT NULL DEFAULT FALSE; 2 | CREATE INDEX subscriber_notification_is_read_idx ON subscriber_notification (is_read); 3 | -------------------------------------------------------------------------------- /src/services/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod private_http_server; 2 | pub mod public_http_server; 3 | pub mod publisher_service; 4 | pub mod relay_mailbox_clearing_service; 5 | pub mod relay_renewal_job; 6 | pub mod watcher_expiration_job; 7 | -------------------------------------------------------------------------------- /terraform/redis/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.7" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /terraform/alerting/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.7" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /Dockerfile-dev: -------------------------------------------------------------------------------- 1 | FROM rust:1.78 2 | 3 | ENV LOG_LEVEL debug 4 | 5 | RUN apt update && \ 6 | apt install \ 7 | gcc 8 | #musl-dev musl-tools 9 | 10 | RUN cargo install cargo-watch 11 | 12 | WORKDIR /notify-server 13 | COPY . . 14 | 15 | CMD ["cargo", "watch", "-x", "run"] -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "terraform/monitoring/grafonnet-lib"] 2 | path = terraform/monitoring/grafonnet-lib 3 | url = git@github.com:WalletConnect/grafonnet-lib.git 4 | [submodule "rs-relay"] 5 | path = rs-relay 6 | url = git@github.com:WalletConnect/rs-relay.git 7 | branch = chore/test-irn-cluster 8 | -------------------------------------------------------------------------------- /terraform/context.tf: -------------------------------------------------------------------------------- 1 | module "this" { 2 | source = "app.terraform.io/wallet-connect/label/null" 3 | version = "0.3.2" 4 | 5 | namespace = "wc" 6 | region = var.region 7 | stage = local.stage 8 | name = var.name 9 | 10 | tags = { 11 | Application = "notify-server" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /terraform/docdb/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.7" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "~> 3.5" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terraform/ecs/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.7" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "3.6.1" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terraform/postgres/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.7" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "~> 3.5" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terraform/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | 4 | default_tags { 5 | tags = module.this.tags 6 | } 7 | } 8 | 9 | provider "grafana" { 10 | url = "https://${data.terraform_remote_state.monitoring.outputs.grafana_workspaces.central.grafana_endpoint}" 11 | auth = var.grafana_auth 12 | } 13 | -------------------------------------------------------------------------------- /migrations/README.md: -------------------------------------------------------------------------------- 1 | # Migrations 2 | 3 | This folder contains migrations for Notify Server and they are automatically run on start-up. 4 | 5 | If you make a change, please also update [ERD.md](./ERD.md). 6 | 7 | ## New Migration 8 | 9 | ```bash 10 | cargo install sqlx-cli 11 | ``` 12 | 13 | ```bash 14 | sqlx migrate add 15 | ``` 16 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | charset = utf-8 5 | end_of_line = lf 6 | indent_size = 4 7 | indent_style = space 8 | insert_final_newline = true 9 | max_line_length = 80 10 | trim_trailing_whitespace = true 11 | 12 | [*.tf] 13 | indent_size = 2 14 | 15 | [*.libsonnet] 16 | indent_size = 2 17 | 18 | [{*.yml,*.yaml}] 19 | indent_size = 2 20 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:recommended", 5 | ":prConcurrentLimit10", 6 | ":prHourlyLimit2", 7 | ":semanticCommits", 8 | ":semanticCommitScope(deps)" 9 | ], 10 | "git-submodules": { 11 | "enabled": true 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /terraform/redis/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_id" { 2 | description = "The ID of the cluster" 3 | value = aws_elasticache_cluster.cache.id 4 | } 5 | 6 | output "endpoint" { 7 | description = "The endpoint of the Redis cluster" 8 | value = "redis://${aws_elasticache_cluster.cache.cache_nodes[0].address}:${aws_elasticache_cluster.cache.cache_nodes[0].port}" 9 | } 10 | -------------------------------------------------------------------------------- /terraform/.tflint.hcl: -------------------------------------------------------------------------------- 1 | config { 2 | format = "default" 3 | module = true 4 | } 5 | 6 | plugin "terraform" { 7 | enabled = true 8 | preset = "all" 9 | } 10 | 11 | plugin "aws" { 12 | enabled = true 13 | version = "0.31.0" 14 | source = "github.com/terraform-linters/tflint-ruleset-aws" 15 | } 16 | 17 | rule "terraform_workspace_remote" { 18 | enabled = false 19 | } 20 | -------------------------------------------------------------------------------- /terraform/ecs/dns.tf: -------------------------------------------------------------------------------- 1 | # DNS Records 2 | resource "aws_route53_record" "dns_load_balancer" { 3 | for_each = var.route53_zones 4 | 5 | zone_id = each.key 6 | name = each.value 7 | type = "A" 8 | 9 | alias { 10 | name = aws_lb.load_balancer.dns_name 11 | zone_id = aws_lb.load_balancer.zone_id 12 | evaluate_target_health = true 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terraform/res_redis.tf: -------------------------------------------------------------------------------- 1 | module "redis_context" { 2 | source = "app.terraform.io/wallet-connect/label/null" 3 | version = "0.3.2" 4 | context = module.this 5 | 6 | attributes = [ 7 | "cache" 8 | ] 9 | } 10 | 11 | module "redis" { 12 | source = "./redis" 13 | context = module.redis_context 14 | 15 | vpc_id = module.vpc.vpc_id 16 | subnets_ids = module.vpc.intra_subnets 17 | } 18 | -------------------------------------------------------------------------------- /src/services/public_http_server/handlers/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod did_json; 2 | pub mod get_subscribers_v0; 3 | pub mod get_subscribers_v1; 4 | pub mod get_welcome_notification; 5 | pub mod health; 6 | pub mod mark_all_as_read; 7 | pub mod notification_link; 8 | pub mod notify_v0; 9 | pub mod notify_v1; 10 | pub mod post_welcome_notification; 11 | pub mod relay_webhook; 12 | pub mod subscribe_topic; 13 | pub mod webhooks; 14 | -------------------------------------------------------------------------------- /terraform/monitoring/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | grafana = { 6 | source = "grafana/grafana" 7 | version = "~> 2.0" 8 | } 9 | jsonnet = { 10 | source = "alxrem/jsonnet" 11 | version = "~> 2.3.0" 12 | } 13 | } 14 | } 15 | 16 | #provider "jsonnet" { 17 | # jsonnet_path = "./grafonnet-lib,./panels" 18 | #} 19 | -------------------------------------------------------------------------------- /.env.terraform.example: -------------------------------------------------------------------------------- 1 | # Obtain from https://g-aa89c04cfd.grafana-workspace.eu-central-1.amazonaws.com/org/apikeys 2 | # Use Admin role 3 | export GRAFANA_AUTH="" 4 | 5 | # Obtain administrator credentials from https://walletconnect.awsapps.com/start#/ 6 | # Use sdlc-dev account 7 | export AWS_ACCESS_KEY_ID="" 8 | export AWS_SECRET_ACCESS_KEY="" 9 | export AWS_SESSION_TOKEN="" 10 | 11 | export AWS_REGION="eu-central-1" 12 | -------------------------------------------------------------------------------- /terraform/res_alerting.tf: -------------------------------------------------------------------------------- 1 | module "alerting" { 2 | source = "./alerting" 3 | context = module.this.context 4 | 5 | webhook_cloudwatch_p2 = var.webhook_cloudwatch_p2 6 | webhook_prometheus_p2 = var.webhook_prometheus_p2 7 | 8 | ecs_cluster_name = module.ecs.ecs_cluster_name 9 | ecs_service_name = module.ecs.ecs_service_name 10 | 11 | elb_load_balancer_arn = module.ecs.load_balancer_arn_suffix 12 | 13 | redis_cluster_id = module.redis.cluster_id 14 | } 15 | -------------------------------------------------------------------------------- /terraform/ecs/cluster_logs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_log_group" "cluster" { 2 | name = "${module.this.id}-app-logs" 3 | kms_key_id = var.cloudwatch_logs_key_arn 4 | retention_in_days = var.cloudwatch_retention_in_days 5 | } 6 | 7 | resource "aws_cloudwatch_log_group" "otel" { 8 | name = "${module.this.id}-aws-otel-sidecar-collector" 9 | kms_key_id = var.cloudwatch_logs_key_arn 10 | retention_in_days = var.cloudwatch_retention_in_days 11 | } 12 | -------------------------------------------------------------------------------- /terraform/res_dns.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | zones = { for k, v in tomap(data.terraform_remote_state.infra_aws.outputs.zones.notify[local.stage]) : v.id => v.name } 3 | zones_certificates = { for k, v in module.dns_certificate : v.zone_id => v.certificate_arn } 4 | } 5 | 6 | module "dns_certificate" { 7 | for_each = local.zones 8 | source = "app.terraform.io/wallet-connect/dns/aws" 9 | version = "0.1.3" 10 | context = module.this 11 | hosted_zone_name = each.value 12 | fqdn = each.value 13 | } 14 | -------------------------------------------------------------------------------- /docker-compose.storage.yml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | services: 3 | # jaeger: 4 | # image: jaegertracing/opentelemetry-all-in-one:latest 5 | # networks: 6 | # - notify-server 7 | # ports: 8 | # - "3001:16686" 9 | 10 | redis: 11 | image: redis:7-alpine 12 | ports: 13 | - "6378:6379" 14 | 15 | postgres: 16 | image: postgres:16 17 | command: -c max_connections=200 18 | environment: 19 | POSTGRES_HOST_AUTH_METHOD: trust 20 | ports: 21 | - "5432:5432" 22 | 23 | networks: 24 | notify-server: 25 | ipam: 26 | driver: default 27 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | 8 | 9 | Resolves # (issue) 10 | 11 | ## How Has This Been Tested? 12 | 13 | 18 | 19 | 20 | 21 | ## Due Diligence 22 | 23 | * [ ] Breaking change 24 | * [ ] Requires a documentation update 25 | * [ ] Requires a e2e/integration test update 26 | -------------------------------------------------------------------------------- /src/services/private_http_server/handlers/metrics.rs: -------------------------------------------------------------------------------- 1 | use { 2 | axum::response::IntoResponse, hyper::StatusCode, tracing::error, wc::metrics::ServiceMetrics, 3 | }; 4 | 5 | pub async fn handler() -> impl IntoResponse { 6 | let result = ServiceMetrics::export(); 7 | 8 | match result { 9 | Ok(content) => (StatusCode::OK, content), 10 | Err(e) => { 11 | error!(?e, "Failed to parse metrics"); 12 | 13 | ( 14 | StatusCode::INTERNAL_SERVER_ERROR, 15 | "Failed to get metrics".to_string(), 16 | ) 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /terraform/terraform.tf: -------------------------------------------------------------------------------- 1 | # Terraform Configuration 2 | terraform { 3 | required_version = ">= 1.0" 4 | 5 | backend "remote" { 6 | hostname = "app.terraform.io" 7 | organization = "wallet-connect" 8 | workspaces { 9 | prefix = "notify-server-" 10 | } 11 | } 12 | 13 | required_providers { 14 | aws = { 15 | source = "hashicorp/aws" 16 | version = ">= 5.7" 17 | } 18 | grafana = { 19 | source = "grafana/grafana" 20 | version = ">= 2.1" 21 | } 22 | random = { 23 | source = "hashicorp/random" 24 | version = "3.6.1" 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /migrations/20240103051259_welcome_notification.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE welcome_notification ( 2 | id uuid PRIMARY KEY DEFAULT gen_random_uuid(), 3 | created_at timestamptz NOT NULL DEFAULT now(), 4 | updated_at timestamptz NOT NULL DEFAULT now(), 5 | project uuid NOT NULL REFERENCES project (id) ON DELETE CASCADE, 6 | enabled bool NOT NULL, 7 | type uuid NOT NULL, 8 | title varchar(255) NOT NULL, 9 | body varchar(255) NOT NULL, 10 | url varchar(255) NULL, 11 | 12 | UNIQUE (project) 13 | ); 14 | -------------------------------------------------------------------------------- /terraform/res_db.tf: -------------------------------------------------------------------------------- 1 | module "db_context" { 2 | source = "app.terraform.io/wallet-connect/label/null" 3 | version = "0.3.2" 4 | context = module.this 5 | 6 | attributes = [ 7 | "db" 8 | ] 9 | } 10 | 11 | module "postgres" { 12 | source = "./postgres" 13 | context = module.db_context 14 | attributes = ["postgres"] 15 | 16 | vpc_id = module.vpc.vpc_id 17 | subnet_ids = module.vpc.intra_subnets 18 | ingress_cidr_blocks = module.vpc.private_subnets_cidr_blocks 19 | 20 | cloudwatch_logs_key_arn = aws_kms_key.cloudwatch_logs.arn 21 | 22 | depends_on = [aws_iam_role.application_role] 23 | } 24 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use { 2 | notify_server::{bootstrap, config::get_configuration, error::NotifyServerError}, 3 | tokio::sync::broadcast, 4 | tracing_subscriber::fmt::format::FmtSpan, 5 | }; 6 | 7 | #[tokio::main] 8 | async fn main() -> Result<(), NotifyServerError> { 9 | let config = get_configuration().await?; 10 | 11 | tracing_subscriber::fmt() 12 | .with_env_filter(&config.log_level) 13 | .with_span_events(FmtSpan::CLOSE) 14 | .with_ansi(std::env::var("ANSI_LOGS").is_ok()) 15 | .init(); 16 | 17 | let (_signal, shutdown) = broadcast::channel(1); 18 | bootstrap(shutdown, config).await 19 | } 20 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | 3 | reorder_imports = true 4 | use_try_shorthand = true 5 | remove_nested_parens = true 6 | reorder_modules = true 7 | use_field_init_shorthand = true 8 | 9 | ## We only use settings available in the stable channel 10 | 11 | #fn_single_line = false 12 | #format_code_in_doc_comments = true 13 | #format_strings = true 14 | #imports_layout = "HorizontalVertical" 15 | #imports_granularity = "One" 16 | #normalize_comments = true 17 | #normalize_doc_attributes = true 18 | #reorder_impl_items = true 19 | #group_imports = "StdExternalCrate" 20 | #wrap_comments = true 21 | #overflow_delimited_expr = true 22 | #unstable_features = true 23 | -------------------------------------------------------------------------------- /src/registry/storage/error.rs: -------------------------------------------------------------------------------- 1 | //! Error typedefs used by this crate 2 | 3 | use thiserror::Error as ThisError; 4 | 5 | /// The error produced from most Storage functions 6 | #[derive(Debug, ThisError)] 7 | pub enum StorageError { 8 | /// Unable to serialize data to store 9 | #[error("error on serialize data")] 10 | Serialize, 11 | /// Unable to deserialize data from store 12 | #[error("error on deserialize data")] 13 | Deserialize, 14 | /// Error on establishing a connection with the storage 15 | #[error("error on open connection")] 16 | Connection(String), 17 | /// An unexpected error occurred 18 | #[error("{0:?}")] 19 | Other(String), 20 | } 21 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/publishing_workers_count.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Worker count', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | .addTarget(targets.prometheus( 15 | datasource = ds.prometheus, 16 | expr = 'publishing_workers_count', 17 | legendFormat = 'r{{aws_ecs_task_revision}}', 18 | )) 19 | } 20 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/publishing_workers_queued_size.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Queue size', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | .addTarget(targets.prometheus( 15 | datasource = ds.prometheus, 16 | expr = 'publishing_queue_queued_size', 17 | legendFormat = "r{{aws_ecs_task_revision}}" 18 | )) 19 | } 20 | -------------------------------------------------------------------------------- /src/services/public_http_server/handlers/health.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::state::AppState, 3 | axum::{extract::State, http::StatusCode, response::IntoResponse}, 4 | std::sync::Arc, 5 | }; 6 | 7 | // No rate limit necessary since returning a fixed string is less computational intensive than tracking the rate limit 8 | 9 | // TODO generate this response at app startup to avoid unnecessary string allocations 10 | pub async fn handler(State(state): State>) -> impl IntoResponse { 11 | ( 12 | StatusCode::OK, 13 | format!( 14 | "OK, {} v{}", 15 | state.build_info.crate_info.name, state.build_info.crate_info.version 16 | ), 17 | ) 18 | } 19 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/publishing_workers_errors.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Error rate', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | .addTarget(targets.prometheus( 15 | datasource = ds.prometheus, 16 | expr = 'rate(publishing_workers_errors_total{}[$__rate_interval])', 17 | legendFormat = 'r{{aws_ecs_task_revision}}', 18 | )) 19 | } 20 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/publishing_workers_processing_size.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'status=processing count', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | .addTarget(targets.prometheus( 15 | datasource = ds.prometheus, 16 | expr = 'publishing_queue_processing_size', 17 | legendFormat = "r{{aws_ecs_task_revision}}" 18 | )) 19 | } 20 | -------------------------------------------------------------------------------- /src/services/public_http_server/handlers/webhooks/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::state::WebhookNotificationEvent, 3 | serde::{Deserialize, Serialize}, 4 | }; 5 | 6 | // FIXME 7 | // pub mod delete_webhook; 8 | // pub mod get_webhooks; 9 | // pub mod register_webhook; 10 | // pub mod update_webhook; 11 | 12 | #[derive(Debug, Deserialize, Serialize)] 13 | pub struct WebhookConfig { 14 | #[serde(rename = "webhook")] 15 | url: String, 16 | events: Vec, 17 | } 18 | 19 | // fn validate_url(url: &str) -> Result<()> { 20 | // let url = url::Url::parse(url)?; 21 | // if url.scheme() != "https" { 22 | // return Err(crate::error::Error::InvalidScheme); 23 | // } 24 | // Ok(()) 25 | // } 26 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/publishing_workers_published_count.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Publish rate', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | .addTarget(targets.prometheus( 15 | datasource = ds.prometheus, 16 | expr = 'rate(publishing_queue_published_count_total{}[$__rate_interval])', 17 | legendFormat = "r{{aws_ecs_task_revision}}" 18 | )) 19 | } 20 | -------------------------------------------------------------------------------- /.github/workflows/sub-validate-health.yml: -------------------------------------------------------------------------------- 1 | name: ❖ Validate Rust 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | stage: 7 | description: 'the environment to validate' 8 | required: true 9 | type: string 10 | stage-url: 11 | description: 'the URL of the environment' 12 | required: true 13 | type: string 14 | 15 | permissions: 16 | contents: read 17 | checks: write 18 | id-token: write 19 | 20 | jobs: 21 | health-check: 22 | name: Health Check - ${{ inputs.stage }} 23 | runs-on: ubuntu-latest 24 | environment: 25 | name: ${{ inputs.stage }} 26 | url: ${{ inputs.stage-url }} 27 | steps: 28 | - name: health-check 29 | run: curl "${{ inputs.stage-url }}" 30 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/antonbabenko/pre-commit-terraform 3 | rev: v1.77.0 4 | hooks: 5 | - id: terraform_fmt 6 | - id: terraform_tflint 7 | - id: terraform_tfsec 8 | - id: terraform_docs 9 | args: 10 | - --args=--config=./terraform/.terraform-docs.yml 11 | 12 | - repo: https://github.com/pre-commit/pre-commit-hooks 13 | rev: v4.4.0 14 | hooks: 15 | - id: check-merge-conflict 16 | - id: check-yaml 17 | - id: end-of-file-fixer 18 | - id: trailing-whitespace 19 | - id: detect-aws-credentials 20 | - id: detect-private-key 21 | - id: forbid-new-submodules 22 | - id: no-commit-to-branch 23 | - id: mixed-line-ending 24 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | [licenses] 2 | unused-allowed-license = "deny" 3 | copyleft = "deny" 4 | allow = [ 5 | "Apache-2.0", 6 | "MIT", 7 | "Unlicense", 8 | "BSD-3-Clause", 9 | "0BSD", 10 | "ISC", 11 | "CC0-1.0" 12 | ] 13 | 14 | exceptions = [{ name = "unicode-ident", allow = ["Unicode-DFS-2016"] }] 15 | 16 | [licenses.private] 17 | ignore = true 18 | 19 | # TODO We should be able to remove `ignore-sources` once we add `publish = false` to all the crates sourced from here. 20 | ignore-sources = [ 21 | "https://github.com/WalletConnect/utils-rs.git", 22 | "https://github.com/WalletConnect/WalletConnectRust.git", 23 | ] 24 | 25 | [[licenses.clarify]] 26 | name = "ring" 27 | expression = "ISC" 28 | license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] 29 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/active_connections.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Active Connections', 11 | datasource = ds.cloudwatch, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.cloudwatch( 16 | datasource = ds.cloudwatch, 17 | namespace = 'AWS/ApplicationELB', 18 | metricName = 'ActiveConnectionCount', 19 | dimensions = { 20 | LoadBalancer: vars.load_balancer 21 | }, 22 | statistic = 'Average', 23 | )) 24 | } 25 | -------------------------------------------------------------------------------- /src/services/private_http_server/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::services::private_http_server::handlers::metrics::handler, 3 | axum::{routing::get, Router}, 4 | std::net::{IpAddr, SocketAddr}, 5 | tokio::net::TcpListener, 6 | tracing::info, 7 | }; 8 | 9 | mod handlers; 10 | 11 | pub async fn start( 12 | bind_ip: IpAddr, 13 | telemetry_prometheus_port: Option, 14 | ) -> Result<(), std::io::Error> { 15 | let private_app = Router::new().route("/metrics", get(handler)); 16 | 17 | let port = telemetry_prometheus_port.unwrap_or(3001); 18 | let addr = SocketAddr::from((bind_ip, port)); 19 | info!("Starting private HTTP server on {}", addr); 20 | 21 | axum::serve( 22 | TcpListener::bind(addr).await?, 23 | private_app.into_make_service(), 24 | ) 25 | .await 26 | } 27 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/rds/database_connections.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Connections', 11 | datasource = ds.cloudwatch, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.cloudwatch( 16 | datasource = ds.cloudwatch, 17 | namespace = 'AWS/RDS', 18 | metricName = 'DatabaseConnections', 19 | dimensions = { 20 | DBClusterIdentifier: vars.rds_cluster_id, 21 | }, 22 | matchExact = true, 23 | statistic = 'Maximum', 24 | )) 25 | } 26 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/subscribe_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Subscribe Latency', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('ms') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum(rate(subscribe_latency_sum[$__rate_interval])) / sum(rate(subscribe_latency_count[$__rate_interval]))', 21 | exemplar = false, 22 | legendFormat = 'Latency', 23 | )) 24 | } 25 | -------------------------------------------------------------------------------- /docker-compose.notify-server.yml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | services: 3 | 4 | notify-server: 5 | networks: 6 | - notify-server 7 | build: 8 | dockerfile: ./Dockerfile-dev 9 | context: . 10 | depends_on: 11 | jaeger: 12 | condition: service_started 13 | ports: 14 | - "3000:3000" 15 | environment: 16 | - PORT=3000 17 | - LOG_LEVEL=INFO 18 | - LOG_LEVEL_OTEL=info,notify-server=4 19 | - TELEMETRY_ENABLED=true 20 | - POSTGRES_URL=postgres://postgres:password@postgres:5432/postgres 21 | - OTEL_SERVICE_NAME=notify-server 22 | - OTEL_EXPORTER_OTLP_ENDPOINT=http://jaeger:4317 23 | volumes: 24 | - ./:/notify-server/ 25 | healthcheck: 26 | test: [ "CMD", "curl", "localhost:3000/health" ] 27 | interval: 5s 28 | timeout: 5s 29 | retries: 5 30 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/account_not_found.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Accounts Not Found', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum by(aws_ecs_task_revision) (increase(dispatched_notifications_total{type="not_found"}[$__rate_interval]))', 18 | legendFormat = 'r{{aws_ecs_task_revision}}', 19 | exemplar = true, 20 | refId = 'NotificationsNotFound', 21 | )) 22 | } 23 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/dispatched_notifications.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Dispatched Notifications', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'sum by(aws_ecs_task_revision) (increase(dispatched_notifications_total{type="sent"}[$__rate_interval]))', 18 | legendFormat = 'r{{aws_ecs_task_revision}}', 19 | exemplar = true, 20 | refId = 'NotificationsSent', 21 | )) 22 | } 23 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/requests.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Requests', 11 | datasource = ds.cloudwatch, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.cloudwatch( 16 | alias = 'Requests', 17 | datasource = ds.cloudwatch, 18 | namespace = 'AWS/ApplicationELB', 19 | metricName = 'RequestCount', 20 | dimensions = { 21 | LoadBalancer: vars.load_balancer 22 | }, 23 | matchExact = true, 24 | statistic = 'Sum', 25 | refId = 'Requests', 26 | )) 27 | } 28 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/registry_request_rate.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Registry Req Rate', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('cps') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (aws_ecs_task_revision) (rate(registry_requests_total[$__rate_interval]))', 21 | legendFormat = 'r{{aws_ecs_task_revision}}', 22 | exemplar = true, 23 | refId = 'RegistryRequestRate', 24 | )) 25 | } 26 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/relay_subscribe_rate.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Relay Subscribe Rate', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('cps') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (aws_ecs_task_revision, tag) (rate(relay_subscribes_total[$__rate_interval]))', 21 | legendFormat = '{{tag}} r{{aws_ecs_task_revision}}', 22 | exemplar = true, 23 | refId = 'RelaySubscribesRate', 24 | )) 25 | } 26 | -------------------------------------------------------------------------------- /terraform/inputs.tf: -------------------------------------------------------------------------------- 1 | data "terraform_remote_state" "org" { 2 | backend = "remote" 3 | config = { 4 | organization = "wallet-connect" 5 | workspaces = { 6 | name = "aws-org" 7 | } 8 | } 9 | } 10 | 11 | data "terraform_remote_state" "datalake" { 12 | backend = "remote" 13 | config = { 14 | organization = "wallet-connect" 15 | workspaces = { 16 | name = "datalake-${local.stage == "dev" ? "staging" : local.stage}" 17 | } 18 | } 19 | } 20 | 21 | data "terraform_remote_state" "infra_aws" { 22 | backend = "remote" 23 | config = { 24 | organization = "wallet-connect" 25 | workspaces = { 26 | name = "infra-aws" 27 | } 28 | } 29 | } 30 | 31 | data "terraform_remote_state" "monitoring" { 32 | backend = "remote" 33 | config = { 34 | organization = "wallet-connect" 35 | workspaces = { 36 | name = "monitoring" 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/http_request_rate.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'HTTP Req Rate', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('reqps') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (aws_ecs_task_revision, method, endpoint) (rate(http_requests_total[$__rate_interval]))', 21 | legendFormat = '{{method}} {{endpoint}} r{{aws_ecs_task_revision}}', 22 | exemplar = true, 23 | refId = 'HttpRequestRate', 24 | )) 25 | } 26 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/relay_outgoing_message_rate.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Msg Out Rate', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('cps') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (aws_ecs_task_revision, tag) (rate(relay_outgoing_messages_total[$__rate_interval]))', 21 | legendFormat = '{{tag}} r{{aws_ecs_task_revision}}', 22 | exemplar = true, 23 | refId = 'RelayOutgoingMessagesRate', 24 | )) 25 | } 26 | -------------------------------------------------------------------------------- /terraform/res_monitoring.tf: -------------------------------------------------------------------------------- 1 | module "monitoring" { 2 | source = "./monitoring" 3 | context = module.this 4 | 5 | monitoring_role_arn = data.terraform_remote_state.monitoring.outputs.grafana_workspaces.central.iam_role_arn 6 | 7 | notification_channels = var.notification_channels 8 | prometheus_endpoint = aws_prometheus_workspace.prometheus.prometheus_endpoint 9 | 10 | ecs_cluster_name = module.ecs.ecs_cluster_name 11 | ecs_service_name = module.ecs.ecs_service_name 12 | rds_cluster_id = module.postgres.rds_cluster_id 13 | ecs_target_group_arn = module.ecs.target_group_arn 14 | redis_cluster_id = module.redis.cluster_id 15 | load_balancer_arn = module.ecs.load_balancer_arn_suffix 16 | log_group_app_name = module.ecs.log_group_app_name 17 | log_group_app_arn = module.ecs.log_group_app_arn 18 | aws_account_id = data.aws_caller_identity.this.account_id 19 | } 20 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/relay_batch_subscribe_rate.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Relay Batch Subscribe Rate', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('cps') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (aws_ecs_task_revision, tag) (rate(relay_batch_subscribes_total[$__rate_interval]))', 21 | legendFormat = '{{tag}} r{{aws_ecs_task_revision}}', 22 | exemplar = true, 23 | refId = 'RelayBatchSubscribesRate', 24 | )) 25 | } 26 | -------------------------------------------------------------------------------- /migrations/20231026150926_add_notification_trigger.sql: -------------------------------------------------------------------------------- 1 | -- Function that sends a pg_notify with the notification id to process 2 | CREATE FUNCTION "notification_for_delivery" () 3 | RETURNS TRIGGER AS $$ 4 | BEGIN 5 | PERFORM pg_notify('notification_for_delivery', NEW.id::text); 6 | RETURN NEW; 7 | END; 8 | $$ LANGUAGE PLPGSQL; 9 | 10 | -- Trigger to notify when a new notification to delivery is created 11 | CREATE TRIGGER "subscriber_notification_insert" AFTER INSERT ON "subscriber_notification" 12 | FOR EACH ROW 13 | EXECUTE FUNCTION "notification_for_delivery" (); 14 | 15 | -- Trigger to notify when a notification delivery status is updated to the `queued` 16 | CREATE TRIGGER "subscriber_notification_update" AFTER UPDATE ON "subscriber_notification" 17 | FOR EACH ROW 18 | WHEN (OLD.status <> NEW.status AND NEW.status = 'queued') 19 | EXECUTE FUNCTION "notification_for_delivery" (); 20 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/notify_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local _configuration = defaults.configuration.timeseries 8 | .withUnit('ms') 9 | .withSoftLimit( 10 | axisSoftMin = 0, 11 | axisSoftMax = 2000, 12 | ); 13 | 14 | { 15 | new(ds, vars):: 16 | panels.timeseries( 17 | title = 'Notify Latency', 18 | datasource = ds.prometheus, 19 | ) 20 | .configure(_configuration) 21 | 22 | .addTarget(targets.prometheus( 23 | datasource = ds.prometheus, 24 | expr = 'sum(rate(notify_latency_sum[$__rate_interval])) / sum(rate(notify_latency_count[$__rate_interval]))', 25 | exemplar = false, 26 | legendFormat = 'Latency', 27 | )) 28 | } 29 | -------------------------------------------------------------------------------- /src/services/watcher_expiration_job.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{metrics::Metrics, model::helpers::delete_expired_subscription_watchers}, 3 | sqlx::PgPool, 4 | std::time::Duration, 5 | tokio::time, 6 | tracing::{error, info, instrument}, 7 | }; 8 | 9 | pub async fn start(postgres: PgPool, metrics: Option) { 10 | let mut interval = time::interval(Duration::from_secs(60 * 60)); 11 | 12 | loop { 13 | interval.tick().await; 14 | info!("Running watcher expiration job"); 15 | if let Err(e) = job(&postgres, metrics.as_ref()).await { 16 | error!("Error running watcher expiration job: {e:?}"); 17 | // TODO metrics 18 | } 19 | } 20 | } 21 | 22 | #[instrument(skip_all)] 23 | async fn job(postgres: &PgPool, metrics: Option<&Metrics>) -> sqlx::Result<()> { 24 | let count = delete_expired_subscription_watchers(postgres, metrics).await?; 25 | info!("Expired {count} watchers"); 26 | Ok(()) 27 | } 28 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/registry_request_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Registry Req Latency', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('ms') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (aws_ecs_task_revision) (rate(registry_request_latency_sum[$__rate_interval])) / sum by (aws_ecs_task_revision) (rate(registry_request_latency_count[$__rate_interval]))', 21 | legendFormat = 'r{{aws_ecs_task_revision}}', 22 | exemplar = false, 23 | refId = 'RegistryRequestLatency', 24 | )) 25 | } 26 | -------------------------------------------------------------------------------- /terraform/alerting/alarms_elb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "elb_5xx" { 2 | alarm_name = "${local.alarm_prefix} - 5XX Threshold Breached" 3 | alarm_description = "${local.alarm_prefix} - The number of 5XX errors was over ${var.elb_5xx_threshold} for the period" 4 | 5 | namespace = module.cloudwatch.namespaces.ApplicationELB 6 | dimensions = { 7 | LoadBalancer : var.elb_load_balancer_arn 8 | } 9 | metric_name = module.cloudwatch.metrics.ApplicationELB.HTTPCode_ELB_5XX_Count 10 | 11 | evaluation_periods = local.evaluation_periods 12 | period = local.period 13 | 14 | statistic = module.cloudwatch.statistics.Sum 15 | comparison_operator = module.cloudwatch.operators.GreaterThanOrEqualToThreshold 16 | threshold = var.elb_5xx_threshold 17 | treat_missing_data = "breaching" 18 | 19 | alarm_actions = [aws_sns_topic.cloudwatch_webhook.arn] 20 | insufficient_data_actions = [aws_sns_topic.cloudwatch_webhook.arn] 21 | } 22 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/postgres_query_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Postgres Query Latency', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('ms') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (aws_ecs_task_revision, name) (rate(postgres_query_latency_sum[$__rate_interval])) / sum by (aws_ecs_task_revision, name) (rate(postgres_query_latency_count[$__rate_interval]))', 21 | legendFormat = '{{name}} r{{aws_ecs_task_revision}}', 22 | exemplar = false, 23 | refId = 'PostgresQueryLatency', 24 | )) 25 | } 26 | -------------------------------------------------------------------------------- /migrations/20240111200929_unique_account_address.sql: -------------------------------------------------------------------------------- 1 | CREATE FUNCTION get_address_lower(account_id text) 2 | RETURNS text AS $$ 3 | BEGIN 4 | RETURN lower(split_part(account_id, ':', 3)); 5 | END; 6 | $$ LANGUAGE plpgsql IMMUTABLE; 7 | 8 | CREATE INDEX subscriber_address ON subscriber (get_address_lower(account)); 9 | 10 | WITH duplicates AS ( 11 | SELECT subscriber.id, 12 | ROW_NUMBER() OVER ( 13 | PARTITION BY project, get_address_lower(account) 14 | ORDER BY count(subscriber_notification.id) DESC 15 | ) as rn 16 | FROM subscriber 17 | LEFT JOIN subscriber_notification ON subscriber_notification.subscriber=subscriber.id 18 | GROUP BY subscriber.id, project, get_address_lower(account) 19 | ) 20 | DELETE FROM subscriber WHERE id IN (SELECT id FROM duplicates WHERE rn > 1); 21 | 22 | ALTER TABLE subscriber DROP CONSTRAINT subscriber_project_account_key; 23 | CREATE UNIQUE INDEX subscriber_project_account_key ON subscriber (project, get_address_lower(account)); 24 | -------------------------------------------------------------------------------- /.terraformignore: -------------------------------------------------------------------------------- 1 | #--------------------------------------- 2 | # General 3 | .DS_Store 4 | .AppleDouble 5 | .LSOverride 6 | [Dd]esktop.ini 7 | .gitignore 8 | .gitmodules 9 | .pre-commit-config.yaml 10 | CHANGELOG.md 11 | LICENSE 12 | README.md 13 | .github/ 14 | ops/ 15 | Dockerfile 16 | justfile 17 | crash.log 18 | 19 | #--------------------------------------- 20 | # Rust/Cargo 21 | 22 | # Generated by Cargo, will have compiled files and executables 23 | src/ 24 | debug/ 25 | target/ 26 | build.rs 27 | cargo.lock 28 | cargo.toml 29 | rustfmt.toml 30 | 31 | # Backup files generated by rustfmt 32 | **/*.rs.bk 33 | 34 | # MSVC Windows builds of rustc generate these, which store debugging information 35 | *.pdb 36 | 37 | #--------------------------------------- 38 | # Environment 39 | .env.example 40 | .direnv 41 | .envrc 42 | 43 | #--------------------------------------- 44 | # JetBrains 45 | .idea/ 46 | out/ 47 | .fleet 48 | *.iws 49 | 50 | #--------------------------------------- 51 | # VSCode 52 | .vscode/ 53 | .history/ 54 | *.code-workspace 55 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::model::types::AccountId, 3 | relay_rpc::{ 4 | auth::ed25519_dalek::VerifyingKey, 5 | domain::{DecodedClientId, Topic}, 6 | }, 7 | }; 8 | 9 | // TODO consider using the key object directly instead of a byte slice 10 | pub fn topic_from_key(key: &[u8]) -> Topic { 11 | sha256::digest(key).into() 12 | } 13 | 14 | pub fn get_client_id(verifying_key: &VerifyingKey) -> DecodedClientId { 15 | DecodedClientId::from_key(verifying_key) 16 | } 17 | 18 | pub fn get_address_from_account(account: &AccountId) -> &str { 19 | let s = account.as_ref(); 20 | let known_skippable_prefix_len = "eip155:1".len(); 21 | let i = s[known_skippable_prefix_len..] 22 | .find(':') 23 | .expect("AccountId should have already been validated to be eip155"); 24 | &s[known_skippable_prefix_len + i + 1..] 25 | } 26 | 27 | pub fn is_same_address(account1: &AccountId, account2: &AccountId) -> bool { 28 | get_address_from_account(account1).eq_ignore_ascii_case(get_address_from_account(account2)) 29 | } 30 | -------------------------------------------------------------------------------- /src/services/public_http_server/handlers/webhooks/delete_webhook.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{error::Result, extractors::AuthedProjectId, state::AppState, types::WebhookInfo}, 3 | axum::{ 4 | extract::{Path, State}, 5 | response::IntoResponse, 6 | }, 7 | mongodb::bson::doc, 8 | std::sync::Arc, 9 | tracing::info, 10 | uuid::Uuid, 11 | }; 12 | 13 | // TODO test idempotency 14 | 15 | pub async fn handler( 16 | AuthedProjectId(project_id, _): AuthedProjectId, 17 | Path((_, webhook_id)): Path<(String, Uuid)>, 18 | State(state): State>, 19 | ) -> Result { 20 | let request_id = uuid::Uuid::new_v4(); 21 | info!("[{request_id}] Deleting webhook: {webhook_id} for project: {project_id}"); 22 | 23 | state 24 | .database 25 | .collection::("webhooks") 26 | .delete_one( 27 | doc! {"project_id": project_id, "id": webhook_id.to_string()}, 28 | None, 29 | ) 30 | .await?; 31 | 32 | Ok(axum::http::StatusCode::NO_CONTENT.into_response()) 33 | } 34 | -------------------------------------------------------------------------------- /terraform/monitoring/data_sources.tf: -------------------------------------------------------------------------------- 1 | module "monitoring-role" { 2 | source = "app.terraform.io/wallet-connect/monitoring-role/aws" 3 | version = "1.0.2" 4 | context = module.this 5 | remote_role_arn = var.monitoring_role_arn 6 | } 7 | 8 | resource "grafana_data_source" "prometheus" { 9 | type = "prometheus" 10 | name = "${module.this.stage}-${module.this.name}-amp" 11 | url = var.prometheus_endpoint 12 | 13 | json_data_encoded = jsonencode({ 14 | httpMethod = "GET" 15 | sigV4Auth = true 16 | sigV4AuthType = "ec2_iam_role" 17 | sigV4Region = module.this.region 18 | sigV4AssumeRoleArn = module.monitoring-role.iam_role_arn 19 | }) 20 | 21 | depends_on = [module.monitoring-role] 22 | } 23 | 24 | resource "grafana_data_source" "cloudwatch" { 25 | type = "cloudwatch" 26 | name = "${module.this.stage}-${module.this.name}-cloudwatch" 27 | 28 | json_data_encoded = jsonencode({ 29 | defaultRegion = module.this.region 30 | assumeRoleArn = module.monitoring-role.iam_role_arn 31 | }) 32 | 33 | depends_on = [module.monitoring-role] 34 | } 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 WalletConnect 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /terraform/monitoring/main.tf: -------------------------------------------------------------------------------- 1 | data "jsonnet_file" "dashboard" { 2 | source = "${path.module}/dashboard.jsonnet" 3 | 4 | ext_str = { 5 | dashboard_title = "Notify Server - ${title(module.this.stage)}" 6 | dashboard_uid = "notify-${module.this.stage}" 7 | 8 | prometheus_uid = grafana_data_source.prometheus.uid 9 | cloudwatch_uid = grafana_data_source.cloudwatch.uid 10 | 11 | environment = module.this.stage 12 | notifications = jsonencode(var.notification_channels) 13 | 14 | ecs_cluster_name = var.ecs_cluster_name 15 | ecs_service_name = var.ecs_service_name 16 | rds_cluster_id = var.rds_cluster_id 17 | redis_cluster_id = var.redis_cluster_id 18 | load_balancer = var.load_balancer_arn 19 | target_group = var.ecs_target_group_arn 20 | log_group_app_name = var.log_group_app_name 21 | log_group_app_arn = var.log_group_app_arn 22 | aws_account_id = var.aws_account_id 23 | } 24 | } 25 | 26 | resource "grafana_dashboard" "main" { 27 | overwrite = true 28 | message = "Updated by Terraform" 29 | config_json = data.jsonnet_file.dashboard.rendered 30 | } 31 | -------------------------------------------------------------------------------- /terraform/deploy-dev.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | TERRAFORM_DIR="$(dirname "$0")" 5 | 6 | accountId="$(aws sts get-caller-identity | jq -r .Account)" 7 | region=$AWS_REGION 8 | 9 | imageRepo="$accountId.dkr.ecr.$region.amazonaws.com/notify" 10 | 11 | aws ecr get-login-password --region eu-central-1 | docker login --username AWS --password-stdin "$imageRepo" 12 | # --platform=linux/amd64: Must target linux/amd64 as that is what ECS runs. 13 | docker build "$TERRAFORM_DIR/.." -t "$imageRepo" --build-arg=release=true --platform=linux/amd64 $BUILD_ARGS 14 | imageVersion="$(docker inspect --format="{{ .Id }}" "$imageRepo" | cut -d: -f2)" 15 | tag="$imageRepo:$imageVersion" 16 | docker tag "$imageRepo" "$tag" 17 | docker push "$tag" 18 | 19 | # TF_VAR_* env vars not supported for remote deployments, so use *.auto.tfvars instead which works 20 | autoTfVars="$TERRAFORM_DIR/dev.auto.tfvars" 21 | trap "rm $autoTfVars" EXIT 22 | echo "image_version=\"$imageVersion\"" > "$autoTfVars" 23 | echo "grafana_auth=\"$GRAFANA_AUTH\"" >> "$autoTfVars" 24 | 25 | terraform -chdir="$TERRAFORM_DIR" workspace select wl-dev 26 | terraform -chdir="$TERRAFORM_DIR" apply -auto-approve 27 | -------------------------------------------------------------------------------- /terraform/docdb/outputs.tf: -------------------------------------------------------------------------------- 1 | output "endpoint" { 2 | description = "The connection endpoint" 3 | value = aws_docdb_cluster.main.endpoint 4 | } 5 | 6 | output "username" { 7 | description = "The master username" 8 | value = aws_docdb_cluster.main.master_username 9 | } 10 | 11 | output "password" { 12 | description = "The master password" 13 | value = aws_docdb_cluster.main.master_password 14 | } 15 | 16 | output "port" { 17 | description = "The connection port" 18 | value = aws_docdb_cluster.main.port 19 | } 20 | 21 | output "connection_url" { 22 | description = "The connection url" 23 | value = "mongodb://${aws_docdb_cluster.main.master_username}:${aws_docdb_cluster.main.master_password}@${aws_docdb_cluster.main.endpoint}:${aws_docdb_cluster.main.port}/${var.default_database}?tls=true&tlsCaFile=rds-combined-ca-bundle.pem&tlsAllowInvalidCertificates=true&replicaSet=rs0&readPreference=secondaryPreferred&retryWrites=false&minPoolSize=32&maxPoolSize=256&maxIdleTimeMS=30000&connectTimeoutMS=30000" 24 | } 25 | 26 | output "cluster_id" { 27 | description = "The cluster identifier" 28 | value = aws_docdb_cluster.main.cluster_identifier 29 | } 30 | -------------------------------------------------------------------------------- /src/services/public_http_server/handlers/relay_webhook/handlers/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::error::RelayMessageClientError, 3 | crate::{rpc::JsonRpcRequest, types::Envelope}, 4 | chacha20poly1305::{aead::Aead, ChaCha20Poly1305, KeyInit}, 5 | serde::de::DeserializeOwned, 6 | sha2::digest::generic_array::GenericArray, 7 | }; 8 | 9 | pub mod notify_delete; 10 | pub mod notify_get_notifications; 11 | pub mod notify_mark_notifications_as_read; 12 | pub mod notify_subscribe; 13 | pub mod notify_update; 14 | pub mod notify_watch_subscriptions; 15 | 16 | fn decrypt_message( 17 | envelope: Envelope, 18 | encryption_key: &[u8; 32], 19 | ) -> Result, RelayMessageClientError> { 20 | let cipher = ChaCha20Poly1305::new(GenericArray::from_slice(encryption_key)); 21 | 22 | let msg = cipher 23 | .decrypt( 24 | GenericArray::from_slice(&envelope.iv), 25 | chacha20poly1305::aead::Payload::from(&*envelope.sealbox), 26 | ) 27 | .map_err(RelayMessageClientError::Decryption)?; 28 | 29 | serde_json::from_slice::>(&msg) 30 | .map_err(RelayMessageClientError::JsonDeserialization) 31 | } 32 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/postgres_query_rate.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Postgres Query Rate', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('cps') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (aws_ecs_task_revision, name) (rate(postgres_queries_total[$__rate_interval]))', 21 | legendFormat = '{{name}} r{{aws_ecs_task_revision}}', 22 | exemplar = true, 23 | refId = 'PostgresQueryRate', 24 | )) 25 | 26 | .addTarget(targets.prometheus( 27 | datasource = ds.prometheus, 28 | expr = 'sum(rate(postgres_queries_total[$__rate_interval]))', 29 | legendFormat = 'r{{aws_ecs_task_revision}}', 30 | exemplar = true, 31 | refId = 'PostgresQueryRateTotal', 32 | )) 33 | } 34 | -------------------------------------------------------------------------------- /terraform/postgres/outputs.tf: -------------------------------------------------------------------------------- 1 | output "database_name" { 2 | description = "The name of the default database in the cluster" 3 | value = var.db_name 4 | } 5 | 6 | output "master_username" { 7 | description = "The username for the master DB user" 8 | value = var.db_master_username 9 | } 10 | 11 | output "master_password_id" { 12 | description = "The ID of the database master password in Secrets Manager" 13 | value = aws_secretsmanager_secret.db_master_password.id 14 | } 15 | 16 | output "rds_cluster_arn" { 17 | description = "The ARN of the cluster" 18 | value = module.db_cluster.cluster_arn 19 | } 20 | 21 | output "rds_cluster_id" { 22 | description = "The ID of the cluster" 23 | value = module.db_cluster.cluster_id 24 | } 25 | 26 | output "rds_cluster_endpoint" { 27 | description = "The cluster endpoint" 28 | value = module.db_cluster.cluster_endpoint 29 | } 30 | 31 | output "database_url" { 32 | description = "The URL used to connect to the cluster" 33 | value = "postgres://${module.db_cluster.cluster_master_username}:${module.db_cluster.cluster_master_password}@${module.db_cluster.cluster_endpoint}:${module.db_cluster.cluster_port}/${var.db_name}" 34 | } 35 | -------------------------------------------------------------------------------- /src/siwx/notify_recap.rs: -------------------------------------------------------------------------------- 1 | pub const NOTIFY_URI: &str = "https://notify.walletconnect.com"; 2 | pub const ABILITY_NAMESPACE_MANAGE: &str = "manage"; 3 | pub const ABILITY_ABILITY_ALL_APPS_MAGIC: &str = "all-apps"; 4 | pub const ABILITY_ABILITY_SUFFIX: &str = "-notifications"; 5 | 6 | pub mod test_utils { 7 | use { 8 | super::*, 9 | crate::siwx::erc5573::{Ability, ReCapDetailsObject}, 10 | serde_json::{Map, Value}, 11 | std::collections::HashMap, 12 | }; 13 | 14 | pub fn build_recap_details_object(domain: Option<&str>) -> ReCapDetailsObject { 15 | ReCapDetailsObject { 16 | att: HashMap::from_iter([( 17 | NOTIFY_URI.to_owned(), 18 | HashMap::from_iter([( 19 | Ability { 20 | namespace: ABILITY_NAMESPACE_MANAGE.to_owned(), 21 | name: format!( 22 | "{}{ABILITY_ABILITY_SUFFIX}", 23 | domain.unwrap_or(ABILITY_ABILITY_ALL_APPS_MAGIC) 24 | ), 25 | }, 26 | vec![Value::Object(Map::from_iter([]))], 27 | )]), 28 | )]), 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/error_5xx_logs.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local cloudwatch_target = import '../../grafonnet-lib/targets/cloudwatch.libsonnet'; 5 | 6 | local panels = grafana.panels; 7 | local targets = grafana.targets; 8 | 9 | { 10 | new(ds, vars):: 11 | panels.table( 12 | title = 'HTTP 5xx Errors', 13 | datasource = ds.cloudwatch, 14 | ) 15 | .configure({ 16 | fieldConfig: {}, 17 | options: { 18 | showHeader: false, 19 | }, 20 | }) 21 | 22 | .addTarget(targets.cloudwatch( 23 | datasource = ds.cloudwatch, 24 | namespace = "", 25 | queryMode = cloudwatch_target.queryModes.Logs, 26 | logGroups = [{ 27 | arn: vars.log_group_app_arn, 28 | name: vars.log_group_app_name, 29 | accountId: vars.aws_account_id, 30 | }], 31 | expression = 'fields @timestamp, @message, @logStream, @log\n| filter @message like /HTTP server error/\n| parse @message /^(?[^\\s]+)/\n| display @message\n| sort LogTimestamp desc', 32 | refId = '5xx_Errors', 33 | )) 34 | } 35 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/rds/volume_bytes_used.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Volume Used', 11 | datasource = ds.cloudwatch, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit(grafana.fieldConfig.units.DecBytes) 16 | ) 17 | 18 | .addTarget(targets.cloudwatch( 19 | datasource = ds.cloudwatch, 20 | namespace = 'AWS/RDS', 21 | metricName = 'VolumeBytesUsed', 22 | dimensions = { 23 | DBClusterIdentifier: vars.rds_cluster_id, 24 | }, 25 | matchExact = true, 26 | statistic = 'Average', 27 | )) 28 | 29 | .addTarget(targets.cloudwatch( 30 | datasource = ds.cloudwatch, 31 | namespace = 'AWS/RDS', 32 | metricName = 'VolumeBytesUsed', 33 | dimensions = { 34 | DBClusterIdentifier: vars.rds_cluster_id, 35 | }, 36 | matchExact = true, 37 | statistic = 'Maximum', 38 | )) 39 | } 40 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/send_failed.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local threshold = 100; 8 | 9 | local _configuration = defaults.configuration.timeseries 10 | .withSoftLimit( 11 | axisSoftMin = 0, 12 | axisSoftMax = threshold * 2, 13 | ) 14 | .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Dashed) 15 | .addThreshold({ 16 | color : defaults.values.colors.critical, 17 | value : threshold, 18 | }) 19 | .withColor(grafana.fieldConfig.colorMode.Thresholds); 20 | 21 | { 22 | new(ds, vars):: 23 | panels.timeseries( 24 | title = 'Failed to Send', 25 | datasource = ds.prometheus, 26 | ) 27 | .configure(_configuration) 28 | 29 | .addTarget(targets.prometheus( 30 | datasource = ds.prometheus, 31 | expr = 'sum by(aws_ecs_task_revision) (increase(dispatched_notifications_total{type="failed"}[$__rate_interval]))', 32 | legendFormat = 'r{{aws_ecs_task_revision}}', 33 | exemplar = true, 34 | refId = 'NotificationsFailed', 35 | )) 36 | } 37 | -------------------------------------------------------------------------------- /src/services/public_http_server/handlers/webhooks/update_webhook.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::WebhookConfig, 3 | crate::{error::Result, extractors::AuthedProjectId, state::AppState, types::WebhookInfo}, 4 | axum::{ 5 | extract::{Path, State}, 6 | response::IntoResponse, 7 | Json, 8 | }, 9 | mongodb::{bson, bson::doc}, 10 | std::sync::Arc, 11 | tracing::info, 12 | uuid::Uuid, 13 | }; 14 | 15 | pub async fn handler( 16 | Path((_, webhook_id)): Path<(String, Uuid)>, 17 | AuthedProjectId(project_id, _): AuthedProjectId, 18 | State(state): State>, 19 | Json(webhook_info): Json, 20 | ) -> Result { 21 | let request_id = uuid::Uuid::new_v4(); 22 | info!("[{request_id}] Updating webhook: {webhook_id} for project: {project_id}"); 23 | state 24 | .database 25 | .collection::("webhooks") 26 | .update_one( 27 | doc! {"project_id": project_id, "id": webhook_id.to_string()}, 28 | doc! {"$set": {"url": webhook_info.url, "events": bson::to_bson(&webhook_info.events)? } }, 29 | None, 30 | ) 31 | .await?; 32 | 33 | Ok(axum::http::StatusCode::NO_CONTENT.into_response()) 34 | } 35 | -------------------------------------------------------------------------------- /terraform/alerting/main.tf: -------------------------------------------------------------------------------- 1 | module "cloudwatch" { 2 | source = "app.terraform.io/wallet-connect/cloudwatch-constants/aws" 3 | version = "1.0.0" 4 | } 5 | 6 | locals { 7 | alarm_prefix = "${title(module.this.name)} - ${title(module.this.stage)}" 8 | evaluation_periods = 1 9 | period = 60 * 5 10 | } 11 | 12 | #tfsec:ignore:aws-sns-enable-topic-encryption 13 | resource "aws_sns_topic" "cloudwatch_webhook" { 14 | name = "cloudwatch-webhook" 15 | display_name = "CloudWatch Webhook forwarding to BetterUptime" 16 | } 17 | 18 | resource "aws_sns_topic_subscription" "cloudwatch_webhook" { 19 | count = var.webhook_cloudwatch_p2 != "" ? 1 : 0 20 | 21 | endpoint = var.webhook_cloudwatch_p2 22 | protocol = "https" 23 | topic_arn = aws_sns_topic.cloudwatch_webhook.arn 24 | } 25 | 26 | #tfsec:ignore:aws-sns-enable-topic-encryption 27 | resource "aws_sns_topic" "prometheus_webhook" { 28 | name = "prometheus-webhook" 29 | display_name = "Prometheus Webhook forwarding to BetterUptime" 30 | } 31 | 32 | resource "aws_sns_topic_subscription" "prometheus_webhook" { 33 | count = var.webhook_prometheus_p2 != "" ? 1 : 0 34 | endpoint = var.webhook_prometheus_p2 35 | protocol = "https" 36 | topic_arn = aws_sns_topic.prometheus_webhook.arn 37 | } 38 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # == Integration tests configuration == 2 | # Only needed if running `just integration` or `just devloop` 3 | # For details see tests/integration.rs#get_vars() 4 | 5 | # Obtain from https://cloud.walletconnect.com 6 | export PROJECT_ID="" 7 | 8 | 9 | # == LOCAL deployment tests configuration == 10 | # Only needed if running `just test-deployment` or `just devloop` 11 | # For details see tests/deployment.rs#get_vars() 12 | 13 | # Obtain from 1Password: cloudflare-workers/prod/internal-api-auth-token 14 | export REGISTRY_AUTH_TOKEN="" 15 | 16 | 17 | # == LOCAL, DEV or PROD deployment tests configuration == 18 | # Only needed if running: 19 | # - `just devloop` 20 | # - `just test-deployment` 21 | # - `ENVIRONMENT=DEV just test-deployment` 22 | # - `ENVIRONMENT=PROD just test-deployment` 23 | # For details see tests/deployment.rs#get_vars() 24 | 25 | # Obtain from https://cloud.walletconnect.com 26 | # Notify API must be enabled 27 | export NOTIFY_PROD_PROJECT_ID="" 28 | export NOTIFY_PROD_PROJECT_SECRET="" 29 | 30 | 31 | # == STAGING deployment tests configuration == 32 | # Only needed if running: 33 | # - `ENVIRONMENT=STAGING just test-deployment` 34 | 35 | # Obtain from https://wc-cloud-staging.vercel.app 36 | # Notify API must be enabled 37 | export NOTIFY_STAGING_PROJECT_ID="" 38 | export NOTIFY_STAGING_PROJECT_SECRET="" 39 | -------------------------------------------------------------------------------- /src/services/public_http_server/handlers/webhooks/get_webhooks.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::WebhookConfig, 3 | crate::{error::Result, extractors::AuthedProjectId, state::AppState, types::WebhookInfo}, 4 | axum::{extract::State, response::IntoResponse, Json}, 5 | futures::TryStreamExt, 6 | mongodb::bson::doc, 7 | std::{collections::HashMap, sync::Arc}, 8 | tracing::info, 9 | }; 10 | 11 | pub async fn handler( 12 | AuthedProjectId(project_id, _): AuthedProjectId, 13 | State(state): State>, 14 | ) -> Result { 15 | let request_id = uuid::Uuid::new_v4(); 16 | info!("[{request_id}] Getting webhooks for project: {project_id}"); 17 | 18 | let cursor = state 19 | .database 20 | .collection::("webhooks") 21 | .find(doc! {"project_id": project_id}, None) 22 | .await?; 23 | 24 | let webhooks: HashMap<_, _> = cursor 25 | .into_stream() 26 | .map_ok(|webhook| { 27 | ( 28 | webhook.id, 29 | WebhookConfig { 30 | url: webhook.url, 31 | events: webhook.events, 32 | }, 33 | ) 34 | }) 35 | .try_collect() 36 | .await?; 37 | 38 | Ok((axum::http::StatusCode::OK, Json(webhooks)).into_response()) 39 | } 40 | -------------------------------------------------------------------------------- /.github/workflows/sub-validate-rust.yml: -------------------------------------------------------------------------------- 1 | name: ❖ Validate Rust 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | stage: 7 | description: 'the environment to validate' 8 | required: true 9 | type: string 10 | stage-url: 11 | description: 'the URL of the environment' 12 | required: true 13 | type: string 14 | 15 | permissions: 16 | contents: read 17 | checks: write 18 | id-token: write 19 | 20 | jobs: 21 | validate-rust: 22 | name: Rust Deployment Tests - ${{ inputs.stage }} 23 | runs-on: ubuntu-latest 24 | environment: 25 | name: ${{ inputs.stage }} 26 | url: ${{ inputs.stage-url }} 27 | steps: 28 | - uses: actions/checkout@v4 29 | 30 | - name: Convert test environment 31 | id: get_test_env 32 | env: 33 | TEST_ENV: ${{ inputs.stage }} 34 | run: | 35 | echo "test_env=${TEST_ENV^^}" >> $GITHUB_OUTPUT 36 | 37 | - name: Run deployment tests 38 | run: cargo test --test deployment 39 | env: 40 | RUST_BACKTRACE: true 41 | ENVIRONMENT: ${{ steps.get_test_env.outputs.test_env }} 42 | PROJECT_ID: ${{ secrets.PROJECT_ID }} 43 | NOTIFY_PROJECT_ID: ${{ secrets.VALIDATION_NOTIFY_PROJECT_ID }} 44 | NOTIFY_PROJECT_SECRET: ${{ secrets.VALIDATION_NOTIFY_PROJECT_SECRET }} 45 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Feature Request 2 | description: Request a new feature be added 3 | title: "feat: " 4 | labels: 5 | - enhancement 6 | body: 7 | - type: markdown 8 | attributes: 9 | value: | 10 | Thanks for taking the time to suggest a new feature for Rust HTTP Starter! ✨ 11 | - type: checkboxes 12 | attributes: 13 | label: Is there an existing issue for this? 14 | description: Please search to see if an issue already exists for the feature you would like. 15 | options: 16 | - label: I have searched the existing issues 17 | required: true 18 | - type: textarea 19 | attributes: 20 | label: Current Behavior 21 | description: A concise description of what you're experiencing. 22 | validations: 23 | required: true 24 | - type: textarea 25 | attributes: 26 | label: Requested Behavior 27 | description: A concise description of what you expected to happen. 28 | validations: 29 | required: true 30 | - type: textarea 31 | attributes: 32 | label: Anything else? 33 | description: | 34 | Links? References? Anything that will give us more context about the issue you are encountering! 35 | 36 | Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. 37 | validations: 38 | required: false 39 | -------------------------------------------------------------------------------- /src/registry/storage/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::registry::storage::error::StorageError, 3 | async_trait::async_trait, 4 | serde::{de::DeserializeOwned, Serialize}, 5 | std::{fmt::Debug, time::Duration}, 6 | }; 7 | 8 | pub mod error; 9 | pub mod redis; 10 | 11 | /// The Result type returned by Storage functions 12 | pub type StorageResult = Result; 13 | 14 | #[async_trait] 15 | pub trait KeyValueStorage: 'static + Send + Sync + Debug 16 | where 17 | T: Serialize + DeserializeOwned + Send + Sync, 18 | { 19 | /// Retrieve the data associated with the given key. 20 | async fn get(&self, key: &str) -> StorageResult>; 21 | 22 | /// Set the value for the given key. 23 | async fn set(&self, key: &str, value: &T, ttl: Option) -> StorageResult<()>; 24 | 25 | /// Delete the value associated with the given key. 26 | async fn del(&self, key: &str) -> StorageResult<()>; 27 | } 28 | 29 | /// Holder the type of data will be serialized to be stored. 30 | pub type Data = Vec; 31 | 32 | pub fn serialize(data: &T) -> StorageResult 33 | where 34 | T: Serialize, 35 | { 36 | rmp_serde::to_vec(data).map_err(|_| StorageError::Serialize) 37 | } 38 | 39 | pub fn deserialize(data: &[u8]) -> StorageResult 40 | where 41 | T: DeserializeOwned, 42 | { 43 | rmp_serde::from_slice(data).map_err(|_| StorageError::Deserialize) 44 | } 45 | -------------------------------------------------------------------------------- /terraform/ecs/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ecs_cluster_name" { 2 | description = "The name of the ECS cluster" 3 | value = aws_ecs_cluster.app_cluster.name 4 | } 5 | 6 | output "ecs_service_name" { 7 | description = "The name of the ECS service" 8 | value = aws_ecs_service.app_service.name 9 | } 10 | 11 | output "ecs_task_family" { 12 | description = "The family of the task definition" 13 | value = aws_ecs_task_definition.app_task.family 14 | } 15 | 16 | output "service_security_group_id" { 17 | description = "The ID of the security group for the service" 18 | value = aws_security_group.app_ingress.id 19 | } 20 | 21 | output "target_group_arn" { 22 | description = "The ARN of the target group" 23 | value = aws_lb_target_group.target_group.arn 24 | } 25 | 26 | output "load_balancer_arn" { 27 | description = "The ARN of the load balancer" 28 | value = aws_lb.load_balancer.arn 29 | } 30 | 31 | output "load_balancer_arn_suffix" { 32 | description = "The ARN suffix of the load balancer" 33 | value = aws_lb.load_balancer.arn_suffix 34 | } 35 | 36 | output "log_group_app_name" { 37 | description = "The name of the log group for the app" 38 | value = aws_cloudwatch_log_group.cluster.name 39 | } 40 | 41 | output "log_group_app_arn" { 42 | description = "The ARN of the log group for the app" 43 | value = aws_cloudwatch_log_group.cluster.arn 44 | } 45 | -------------------------------------------------------------------------------- /terraform/docdb/password.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | master_password = var.master_password == "" ? random_password.master_password[0].result : var.master_password 3 | } 4 | 5 | resource "random_password" "master_password" { 6 | count = var.master_password == "" ? 1 : 0 7 | length = 16 8 | special = false 9 | } 10 | 11 | resource "aws_kms_key" "master_password" { 12 | description = "KMS key for the ${module.this.id} DocumentDB cluster master password" 13 | enable_key_rotation = true 14 | 15 | policy = jsonencode({ 16 | Version = "2012-10-17" 17 | Statement = [ 18 | { 19 | Sid = "Enable IAM User Permissions" 20 | Effect = "Allow" 21 | Principal = { 22 | AWS = data.aws_caller_identity.this.account_id 23 | } 24 | Action = "kms:*" 25 | Resource = "*" 26 | }, 27 | ] 28 | }) 29 | } 30 | 31 | resource "aws_kms_alias" "master_password" { 32 | name = "alias/${module.this.id}-master-password" 33 | target_key_id = aws_kms_key.master_password.id 34 | } 35 | 36 | resource "aws_secretsmanager_secret" "master_password" { 37 | name = "${module.this.id}-master-password" 38 | kms_key_id = aws_kms_key.master_password.arn 39 | } 40 | 41 | resource "aws_secretsmanager_secret_version" "master_password" { 42 | secret_id = aws_secretsmanager_secret.master_password.id 43 | secret_string = local.master_password 44 | } 45 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/subscribed_topics.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Subscribed Topics', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .addTarget(targets.prometheus( 16 | datasource = ds.prometheus, 17 | expr = 'subscribed_project_topics', 18 | legendFormat = 'Projects r{{aws_ecs_task_revision}}', 19 | exemplar = true, 20 | refId = 'SubscribedProjectTopics', 21 | )) 22 | 23 | .addTarget(targets.prometheus( 24 | datasource = ds.prometheus, 25 | expr = 'subscribed_subscriber_topics', 26 | legendFormat = 'Subscribers r{{aws_ecs_task_revision}}', 27 | exemplar = true, 28 | refId = 'SubscribedSubscriberTopics', 29 | )) 30 | 31 | .addTarget(targets.prometheus( 32 | datasource = ds.prometheus, 33 | expr = 'subscribed_project_topics + subscribed_subscriber_topics', 34 | legendFormat = 'Total r{{aws_ecs_task_revision}}', 35 | exemplar = true, 36 | refId = 'SubscribedTopics', 37 | )) 38 | } 39 | -------------------------------------------------------------------------------- /terraform/redis/variables.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------- 2 | # Nodes Configuration 3 | 4 | variable "node_type" { 5 | description = "The instance type to use for the database nodes" 6 | type = string 7 | default = "cache.t4g.micro" # https://aws.amazon.com/elasticache/pricing/?nc=sn&loc=5#On-demand_nodes 8 | } 9 | 10 | variable "num_cache_nodes" { 11 | description = "The number of nodes to create in the cluster" 12 | type = number 13 | default = 1 14 | } 15 | 16 | variable "node_engine_version" { 17 | description = "The version of Redis to use" 18 | type = string 19 | default = "6.x" 20 | } 21 | 22 | #------------------------------------------------------------------------------- 23 | # Networking 24 | 25 | variable "vpc_id" { 26 | description = "The VPC ID to create the security group in" 27 | type = string 28 | } 29 | 30 | variable "subnets_ids" { 31 | description = "The list of subnet IDs to create the cluster in" 32 | type = set(string) 33 | } 34 | 35 | variable "ingress_cidr_blocks" { 36 | description = "The CIDR blocks to allow ingress from, default to VPC only." 37 | type = set(string) 38 | default = null 39 | } 40 | 41 | variable "egress_cidr_blocks" { 42 | description = "The CIDR blocks to allow egress to, default to VPC only." 43 | type = set(string) 44 | default = null 45 | } 46 | -------------------------------------------------------------------------------- /terraform/.terraform-docs.yml: -------------------------------------------------------------------------------- 1 | formatter: 'markdown table' 2 | 3 | recursive: 4 | enabled: true 5 | path: . 6 | 7 | output: 8 | file: README.md 9 | mode: inject 10 | template: |- 11 | 12 | {{ .Content }} 13 | 14 | 15 | content: | 16 | {{ .Header }} 17 | {{ .Requirements }} 18 | {{ .Providers }} 19 | {{ .Modules }} 20 | 21 | ## Inputs 22 | {{- $hideInputs := list "namespace" "region" "stage" "name" "delimiter" "attributes" "tags" "regex_replace_chars" "id_length_limit" "label_key_case" "label_value_case" "label_order" }} 23 | {{- $filteredInputs := list -}} 24 | {{- range .Module.Inputs -}} 25 | {{- if not (has .Name $hideInputs) -}} 26 | {{- $filteredInputs = append $filteredInputs . -}} 27 | {{- end -}} 28 | {{- end -}} 29 | {{ if not $filteredInputs }} 30 | 31 | No inputs. 32 | {{ else }} 33 | | Name | Description | Type | Default | Required | 34 | |------|-------------|------|---------|:--------:| 35 | {{- range $filteredInputs }} 36 | | {{ anchorNameMarkdown "input" .Name }} | {{ tostring .Description | sanitizeMarkdownTbl }} | {{ printf " " }}
{{ tostring .Type | sanitizeMarkdownTbl }}
| {{ printf " " }}
{{ .GetValue | sanitizeMarkdownTbl }}
| {{ printf " " }}{{ ternary .Required "yes" "no" }} | 37 | {{- end }} 38 | {{- end }} 39 | {{ .Outputs }} 40 | {{/** End of file fixer */}} 41 | -------------------------------------------------------------------------------- /.github/workflows/dispatch_publish.yml: -------------------------------------------------------------------------------- 1 | name: ⚙️ Publish 2 | run-name: "Publish: ${{ github.sha }}${{ inputs.deploy-to != 'none' && format(' ❱❱ {0}', inputs.deploy-to) || ''}}" 3 | 4 | on: 5 | workflow_dispatch: 6 | inputs: 7 | deploy-to: 8 | description: "Deploy published image to" 9 | type: choice 10 | options: 11 | - none 12 | - staging 13 | - prod 14 | default: staging 15 | required: true 16 | 17 | concurrency: deploy 18 | 19 | permissions: 20 | contents: write 21 | checks: write 22 | id-token: write 23 | packages: write 24 | issues: read 25 | pull-requests: write 26 | 27 | jobs: 28 | ci: 29 | name: CI 30 | secrets: inherit 31 | uses: ./.github/workflows/sub-ci.yml 32 | with: 33 | check-infra: false 34 | check-app: true 35 | 36 | release: 37 | name: Release 38 | uses: WalletConnect/ci_workflows/.github/workflows/release.yml@0.2.9 39 | secrets: inherit 40 | with: 41 | infra-changed: false 42 | app-changed: true 43 | 44 | cd: 45 | name: CD 46 | needs: [ release ] 47 | if: ${{ inputs.deploy-to == 'staging' || inputs.deploy-to == 'prod' }} 48 | secrets: inherit 49 | uses: ./.github/workflows/sub-cd.yml 50 | with: 51 | deploy-infra: false 52 | deploy-app: true 53 | deploy-prod: ${{ inputs.deploy-to == 'prod' }} 54 | version: ${{ needs.release.outputs.version }} 55 | -------------------------------------------------------------------------------- /terraform/postgres/password.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | db_master_password = var.db_master_password == "" ? random_password.db_master_password[0].result : var.db_master_password 3 | } 4 | 5 | resource "random_password" "db_master_password" { 6 | count = var.db_master_password == "" ? 1 : 0 7 | length = 16 8 | special = false 9 | } 10 | 11 | resource "aws_kms_key" "db_master_password" { 12 | description = "KMS key for the ${module.this.id} RDS cluster master password" 13 | enable_key_rotation = true 14 | 15 | policy = jsonencode({ 16 | Version = "2012-10-17" 17 | Statement = [ 18 | { 19 | Sid = "Enable IAM User Permissions" 20 | Effect = "Allow" 21 | Principal = { 22 | AWS = data.aws_caller_identity.this.account_id 23 | } 24 | Action = "kms:*" 25 | Resource = "*" 26 | }, 27 | ] 28 | }) 29 | } 30 | 31 | resource "aws_kms_alias" "db_master_password" { 32 | name = "alias/${module.this.id}-master-password" 33 | target_key_id = aws_kms_key.db_master_password.id 34 | } 35 | 36 | resource "aws_secretsmanager_secret" "db_master_password" { 37 | name = "${module.this.id}-master-password" 38 | kms_key_id = aws_kms_key.db_master_password.arn 39 | } 40 | 41 | resource "aws_secretsmanager_secret_version" "db_master_password" { 42 | secret_id = aws_secretsmanager_secret.db_master_password.id 43 | secret_string = local.db_master_password 44 | } 45 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/relay_subscribe_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Relay Subscribe Latency', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('ms') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (aws_ecs_task_revision) (rate(relay_subscribe_latency_sum[$__rate_interval])) / sum by (aws_ecs_task_revision) (rate(relay_subscribe_latency_count[$__rate_interval]))', 21 | legendFormat = 'Publish w/ retries r{{aws_ecs_task_revision}}', 22 | exemplar = false, 23 | refId = 'RelaySubscribeLatency', 24 | )) 25 | 26 | .addTarget(targets.prometheus( 27 | datasource = ds.prometheus, 28 | expr = 'sum by (aws_ecs_task_revision) (rate(relay_subscribe_request_latency_sum[$__rate_interval])) / sum by (aws_ecs_task_revision) (rate(relay_subscribe_request_latency_count[$__rate_interval]))', 29 | legendFormat = 'Individual RPC r{{aws_ecs_task_revision}}', 30 | exemplar = false, 31 | refId = 'RelaySubscribeRequestLatency', 32 | )) 33 | } 34 | -------------------------------------------------------------------------------- /src/model/types/account_id/erc55.rs: -------------------------------------------------------------------------------- 1 | use {sha2::Digest, sha3::Keccak256}; 2 | 3 | // https://eips.ethereum.org/EIPS/eip-55 4 | 5 | // Encodes a lowercase hex address without '0x' with ERC-55 checksum 6 | // If a non-lowercase hex value, or a non-address is passed, the behavior is undefined 7 | pub fn erc_55_checksum_encode(s: &str) -> impl Iterator + '_ { 8 | let address_hash = hex::encode(Keccak256::default().chain_update(s).finalize()); 9 | s.chars().enumerate().map(move |(i, c)| { 10 | if !c.is_numeric() && address_hash.as_bytes()[i] > b'7' { 11 | c.to_ascii_uppercase() 12 | } else { 13 | c 14 | } 15 | }) 16 | } 17 | 18 | #[cfg(test)] 19 | mod tests { 20 | use super::*; 21 | 22 | #[test] 23 | fn test() { 24 | fn test(addr: &str) { 25 | let ox = "0x"; 26 | assert_eq!( 27 | addr, 28 | ox.chars() 29 | .chain(erc_55_checksum_encode( 30 | &addr[ox.len()..].to_ascii_lowercase() 31 | )) 32 | .collect::() 33 | ); 34 | } 35 | 36 | test("0x5aAeb6053F3E94C9b9A09f33669435E7Ef1BeAed"); 37 | test("0xfB6916095ca1df60bB79Ce92cE3Ea74c37c5d359"); 38 | test("0xdbF03B407c01E7cD3CBea99509d93f8DDDC8C6FB"); 39 | test("0xD1220A0cf47c7B9Be7A2E6BA89F429762e7b9aDb"); 40 | test("0x9AfEaC202C837df470b5A145e0EfD6a574B21029"); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/relay_outgoing_message_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Msg Out Latency', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('ms') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (aws_ecs_task_revision) (rate(relay_outgoing_message_latency_sum[$__rate_interval])) / sum by (aws_ecs_task_revision) (rate(relay_outgoing_message_latency_count[$__rate_interval]))', 21 | legendFormat = 'Publish w/ retries r{{aws_ecs_task_revision}}', 22 | exemplar = false, 23 | refId = 'RelayOutgoingMessageLatency', 24 | )) 25 | 26 | .addTarget(targets.prometheus( 27 | datasource = ds.prometheus, 28 | expr = 'sum by (aws_ecs_task_revision) (rate(relay_outgoing_message_publish_latency_sum[$__rate_interval])) / sum by (aws_ecs_task_revision) (rate(relay_outgoing_message_publish_latency_count[$__rate_interval]))', 29 | legendFormat = 'Individual RPC r{{aws_ecs_task_revision}}', 30 | exemplar = false, 31 | refId = 'RelayOutgoingMessagePublishLatency', 32 | )) 33 | } 34 | -------------------------------------------------------------------------------- /.github/workflows/sub-validate-swift.yml: -------------------------------------------------------------------------------- 1 | name: ❖ Validate 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | stage: 7 | description: 'the environment to validate' 8 | required: true 9 | type: string 10 | stage-url: 11 | description: 'the URL of the environment' 12 | required: true 13 | type: string 14 | 15 | permissions: 16 | contents: read 17 | checks: write 18 | id-token: write 19 | 20 | jobs: 21 | validate-swift: 22 | name: Swift Integration Tests - ${{ inputs.stage }} 23 | runs-on: macos-latest-xlarge 24 | permissions: 25 | contents: write 26 | environment: 27 | name: ${{ inputs.stage }} 28 | url: ${{ inputs.stage-url }} 29 | steps: 30 | - uses: actions/checkout@v4 31 | with: 32 | repository: WalletConnect/WalletConnectSwiftV2 33 | 34 | - name: Run Notify Tests 35 | uses: ./.github/actions/run_tests_without_building 36 | with: 37 | type: 'notify-tests' 38 | notify-endpoint: ${{ vars.VALIDATION_NOTIFY_ENDPOINT }} 39 | relay-endpoint: ${{ vars.VALIDATION_RELAY_ENDPOINT }} 40 | explorer-endpoint: ${{ vars.VALIDATION_EXPLORER_ENDPOINT }} 41 | project-id: ${{ secrets.VALIDATION_SWIFT_PROJECT_ID }} 42 | gm-dapp-host: ${{ vars.VALIDATION_DAPP_DOMAIN }} 43 | gm-dapp-project-id: ${{ secrets.VALIDATION_SWIFT_DAPP_PROJECT_ID }} 44 | gm-dapp-project-secret: ${{ secrets.VALIDATION_SWIFT_DAPP_PROJECT_SECRET }} 45 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/relay_batch_subscribe_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Relay Batch Subscribe Latency', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('ms') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (aws_ecs_task_revision) (rate(relay_batch_subscribe_latency_sum[$__rate_interval])) / sum by (aws_ecs_task_revision) (rate(relay_batch_subscribe_latency_count[$__rate_interval]))', 21 | legendFormat = 'Publish w/ retries r{{aws_ecs_task_revision}}', 22 | exemplar = false, 23 | refId = 'RelayBatchSubscribeLatency', 24 | )) 25 | 26 | .addTarget(targets.prometheus( 27 | datasource = ds.prometheus, 28 | expr = 'sum by (aws_ecs_task_revision) (rate(relay_batch_subscribe_request_latency_sum[$__rate_interval])) / sum by (aws_ecs_task_revision) (rate(relay_batch_subscribe_request_latency_count[$__rate_interval]))', 29 | legendFormat = 'Individual RPC r{{aws_ecs_task_revision}}', 30 | exemplar = false, 31 | refId = 'RelayBatchSubscribeRequestLatency', 32 | )) 33 | } 34 | -------------------------------------------------------------------------------- /terraform/monitoring/variables.tf: -------------------------------------------------------------------------------- 1 | variable "monitoring_role_arn" { 2 | description = "The ARN of the monitoring role." 3 | type = string 4 | } 5 | 6 | variable "notification_channels" { 7 | description = "The notification channels to send alerts to" 8 | type = list(any) 9 | } 10 | 11 | variable "prometheus_endpoint" { 12 | description = "The endpoint for the Prometheus server." 13 | type = string 14 | } 15 | 16 | variable "ecs_cluster_name" { 17 | description = "The name of the ECS cluster." 18 | type = string 19 | } 20 | 21 | variable "ecs_service_name" { 22 | description = "The name of the ECS service." 23 | type = string 24 | } 25 | 26 | variable "ecs_target_group_arn" { 27 | description = "The ARN of the ECS LB target group." 28 | type = string 29 | } 30 | 31 | variable "rds_cluster_id" { 32 | description = "The cluster ID of the RDS cluster." 33 | type = string 34 | } 35 | 36 | variable "redis_cluster_id" { 37 | description = "The cluster ID of the Redis cluster." 38 | type = string 39 | } 40 | 41 | variable "load_balancer_arn" { 42 | description = "The ARN of the load balancer." 43 | type = string 44 | } 45 | 46 | variable "log_group_app_name" { 47 | description = "The name of the log group for the app" 48 | type = string 49 | } 50 | 51 | variable "log_group_app_arn" { 52 | description = "The ARN of the log group for the app" 53 | type = string 54 | } 55 | 56 | variable "aws_account_id" { 57 | description = "The AWS account ID." 58 | type = string 59 | } 60 | -------------------------------------------------------------------------------- /src/services/public_http_server/handlers/webhooks/register_webhook.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::WebhookConfig, 3 | crate::{ 4 | error::Result, extractors::AuthedProjectId, handlers::webhooks::validate_url, 5 | state::AppState, types::WebhookInfo, 6 | }, 7 | axum::{extract::State, response::IntoResponse, Json}, 8 | mongodb::bson::doc, 9 | serde::Serialize, 10 | std::sync::Arc, 11 | tracing::info, 12 | uuid::Uuid, 13 | }; 14 | 15 | #[derive(Serialize)] 16 | struct RegisterWebhookResponse { 17 | id: String, 18 | } 19 | 20 | pub async fn handler( 21 | AuthedProjectId(project_id, _): AuthedProjectId, 22 | State(state): State>, 23 | Json(webhook_info): Json, 24 | ) -> Result { 25 | let request_id = uuid::Uuid::new_v4(); 26 | info!("[{request_id}] Registering webhook for project: {project_id}"); 27 | let webhook_id = Uuid::new_v4().to_string(); 28 | 29 | validate_url(&webhook_info.url)?; 30 | 31 | let webhook = WebhookInfo { 32 | id: webhook_id.clone(), 33 | url: webhook_info.url, 34 | events: webhook_info.events, 35 | project_id: project_id.clone(), 36 | }; 37 | 38 | state 39 | .database 40 | .collection("webhooks") 41 | .insert_one(webhook, None) 42 | .await?; 43 | 44 | info!("[{request_id}] Webhook registered: {webhook_id} for project:{project_id}"); 45 | 46 | Ok(( 47 | axum::http::StatusCode::CREATED, 48 | Json(RegisterWebhookResponse { id: webhook_id }), 49 | ) 50 | .into_response()) 51 | } 52 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/rds/cpu.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'CPU', 11 | datasource = ds.cloudwatch, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .setAlert( 16 | vars.environment, 17 | defaults.alerts.cpu( 18 | namespace = vars.namespace, 19 | env = vars.environment, 20 | title = 'RDS', 21 | notifications = vars.notifications, 22 | refid = 'CPU', 23 | limit = 90, 24 | reducer = grafana.alertCondition.reducers.Max, 25 | ) 26 | ) 27 | 28 | .addTarget(targets.cloudwatch( 29 | datasource = ds.cloudwatch, 30 | namespace = 'AWS/RDS', 31 | metricName = 'CPUUtilization', 32 | dimensions = { 33 | DBClusterIdentifier: vars.rds_cluster_id, 34 | }, 35 | matchExact = true, 36 | statistic = 'Average', 37 | refId = 'CPU_Avg' 38 | )) 39 | 40 | .addTarget(targets.cloudwatch( 41 | datasource = ds.cloudwatch, 42 | namespace = 'AWS/RDS', 43 | metricName = 'CPUUtilization', 44 | dimensions = { 45 | DBClusterIdentifier: vars.rds_cluster_id, 46 | }, 47 | matchExact = true, 48 | statistic = 'Maximum', 49 | refId = 'CPU_Max' 50 | )) 51 | } 52 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/keys_server_request_rate.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Keys Server Req Rate', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('cps') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (aws_ecs_task_revision) (rate(keys_server_requests_total{source="server"}[$__rate_interval]))', 21 | legendFormat = 'Server r{{aws_ecs_task_revision}}', 22 | exemplar = true, 23 | refId = 'KeysServerRequestRateServer', 24 | )) 25 | 26 | .addTarget(targets.prometheus( 27 | datasource = ds.prometheus, 28 | expr = 'sum by (aws_ecs_task_revision) (rate(keys_server_requests_total{source="cache"}[$__rate_interval]))', 29 | legendFormat = 'Cache r{{aws_ecs_task_revision}}', 30 | exemplar = true, 31 | refId = 'KeysServerRequestRateCache', 32 | )) 33 | 34 | .addTarget(targets.prometheus( 35 | datasource = ds.prometheus, 36 | expr = 'sum by (aws_ecs_task_revision) (rate(keys_server_requests_total[$__rate_interval]))', 37 | legendFormat = 'Total r{{aws_ecs_task_revision}}', 38 | exemplar = true, 39 | refId = 'KeysServerRequestRateTotal', 40 | )) 41 | } 42 | -------------------------------------------------------------------------------- /terraform/redis/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_vpc" "vpc" { 2 | id = var.vpc_id 3 | } 4 | 5 | resource "aws_elasticache_cluster" "cache" { 6 | cluster_id = module.this.id 7 | engine = "redis" 8 | node_type = var.node_type 9 | num_cache_nodes = var.num_cache_nodes 10 | parameter_group_name = "default.redis6.x" 11 | engine_version = var.node_engine_version 12 | port = 6379 13 | subnet_group_name = aws_elasticache_subnet_group.private_subnets.name 14 | security_group_ids = [ 15 | aws_security_group.service_security_group.id 16 | ] 17 | snapshot_retention_limit = 2 18 | } 19 | 20 | resource "aws_elasticache_subnet_group" "private_subnets" { 21 | name = "${module.this.id}-private-subnet-group" 22 | subnet_ids = var.subnets_ids 23 | } 24 | 25 | # Allow only the app to access Redis 26 | resource "aws_security_group" "service_security_group" { 27 | name = "${module.this.id}-redis-service-ingress" 28 | description = "Allow ingress from the application" 29 | vpc_id = var.vpc_id 30 | ingress { 31 | description = "${module.this.id} - ingress from application" 32 | from_port = 6379 33 | to_port = 6379 34 | protocol = "TCP" 35 | cidr_blocks = var.ingress_cidr_blocks == null ? [data.aws_vpc.vpc.cidr_block] : var.ingress_cidr_blocks 36 | } 37 | 38 | egress { 39 | description = "${module.this.id} - egress to application" 40 | from_port = 0 # Allowing any incoming port 41 | to_port = 0 # Allowing any outgoing port 42 | protocol = "-1" # Allowing any outgoing protocol 43 | cidr_blocks = var.egress_cidr_blocks == null ? [data.aws_vpc.vpc.cidr_block] : var.egress_cidr_blocks 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/rate_limit/token_bucket.lua: -------------------------------------------------------------------------------- 1 | -- Adapted from https://github.com/upstash/ratelimit/blob/3a8cfb00e827188734ac347965cb743a75fcb98a/src/single.ts#L311 2 | local keys = KEYS -- identifier including prefixes 3 | local maxTokens = tonumber(ARGV[1]) -- maximum number of tokens 4 | local interval = tonumber(ARGV[2]) -- size of the window in milliseconds 5 | local refillRate = tonumber(ARGV[3]) -- how many tokens are refilled after each interval 6 | local now = tonumber(ARGV[4]) -- current timestamp in milliseconds 7 | 8 | local results = {} 9 | 10 | for i, key in ipairs(keys) do 11 | local bucket = redis.call("HMGET", key, "refilledAt", "tokens") 12 | 13 | local refilledAt 14 | local tokens 15 | 16 | if bucket[1] == false then 17 | refilledAt = now 18 | tokens = maxTokens 19 | else 20 | refilledAt = tonumber(bucket[1]) 21 | tokens = tonumber(bucket[2]) 22 | end 23 | 24 | if now >= refilledAt + interval then 25 | local numRefills = math.floor((now - refilledAt) / interval) 26 | tokens = math.min(maxTokens, tokens + numRefills * refillRate) 27 | 28 | refilledAt = refilledAt + numRefills * interval 29 | end 30 | 31 | if tokens == 0 then 32 | results[key] = {-1, refilledAt + interval} 33 | else 34 | local remaining = tokens - 1 35 | local expireAt = math.ceil(((maxTokens - remaining) / refillRate)) * interval 36 | 37 | redis.call("HSET", key, "refilledAt", refilledAt, "tokens", remaining) 38 | redis.call("PEXPIRE", key, expireAt) 39 | results[key] = {remaining, refilledAt + interval} 40 | end 41 | end 42 | 43 | -- Redis doesn't support Lua table responses: https://stackoverflow.com/a/24302613 44 | return cjson.encode(results) 45 | -------------------------------------------------------------------------------- /.github/SECRETS.md: -------------------------------------------------------------------------------- 1 | # Secrets 2 | 3 | How to set GitHub Actions secrets. 4 | 5 | ## Global 6 | 7 | TF_API_TOKEN 8 | 9 | ## Both envs 10 | 11 | PROJECT_ID 12 | Project ID for tests to connect to relay. 13 | https://cloud.walletconnect.com/app/project?uuid=c2c31233-3630-4d7a-8649-653faeafe898 14 | 15 | ## `staging` env 16 | 17 | VALIDATION_NOTIFY_PROJECT_ID 18 | VALIDATION_NOTIFY_PROJECT_SECRET 19 | https://wc-cloud-staging.vercel.app/app/project?uuid=480ef7cc-a55a-451a-b76a-5f12ea28e077 20 | 21 | VALIDATION_SWIFT_PROJECT_ID 22 | https://cloud.walletconnect.com/app/project?uuid=fa897f4c-83a0-4f50-bd6b-53a9d94fce63 23 | 24 | VALIDATION_SWIFT_DAPP_PROJECT_ID 25 | VALIDATION_SWIFT_DAPP_PROJECT_SECRET 26 | https://wc-cloud-staging.vercel.app/app/project?uuid=317a4b59-f0db-42e9-bffa-b32caf5f7ddd 27 | 28 | ## `prod` env 29 | 30 | VALIDATION_NOTIFY_PROJECT_ID 31 | VALIDATION_NOTIFY_PROJECT_SECRET 32 | https://cloud.walletconnect.com/app/project?uuid=c2c31233-3630-4d7a-8649-653faeafe898 33 | 34 | VALIDATION_SWIFT_PROJECT_ID 35 | https://cloud.walletconnect.com/app/project?uuid=fa897f4c-83a0-4f50-bd6b-53a9d94fce63 36 | 37 | VALIDATION_SWIFT_DAPP_PROJECT_ID 38 | VALIDATION_SWIFT_DAPP_PROJECT_SECRET 39 | https://cloud.walletconnect.com/app/project?uuid=ec020ad1-89bc-4f0f-b7bc-5602990e79b5 40 | 41 | # Terraform 42 | 43 | project_id 44 | Project ID for Notify Server to connect to relay. Should have rate limits disabled. 45 | https://cloud.walletconnect.com/app/project?uuid=5f423bdd-12b2-4544-af6c-8a6ad470e7de 46 | 47 | registry_api_endpoint & registry_api_auth_token 48 | Registry auth token for Notify Server to authenticate project IDs and project secrets for dapps. Get from 1Password. 49 | Staging Notify Server uses staging registry, all other envs use prod registry. 50 | 51 | keypair_seed 52 | Set to a securly random value. 53 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/relay_incoming_message_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Msg In Latency', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('ms') 16 | ) 17 | 18 | .setAlert(vars.environment, grafana.alert.new( 19 | namespace = vars.namespace, 20 | name = '%(env)s - Relay incomming message latency too high' % { env: vars.environment }, 21 | message = '%(env)s - Relay incomming message latency too high' % { env: vars.environment }, 22 | notifications = vars.notifications, 23 | noDataState = 'no_data', 24 | conditions = [ 25 | grafana.alertCondition.new( 26 | evaluatorParams = [ 10000 ], 27 | evaluatorType = 'gt', 28 | operatorType = 'or', 29 | queryRefId = 'RelayIncomingMessageLatency', 30 | queryTimeStart = '5m', 31 | queryTimeEnd = 'now', 32 | reducerType = grafana.alert_reducers.Avg 33 | ), 34 | ], 35 | )) 36 | 37 | .addTarget(targets.prometheus( 38 | datasource = ds.prometheus, 39 | expr = 'sum by (aws_ecs_task_revision, tag) (rate(relay_incoming_message_latency_sum[$__rate_interval])) / sum by (aws_ecs_task_revision, tag) (rate(relay_incoming_message_latency_count[$__rate_interval]))', 40 | legendFormat = '{{tag}} r{{aws_ecs_task_revision}}', 41 | exemplar = false, 42 | refId = 'RelayIncomingMessageLatency', 43 | )) 44 | } 45 | -------------------------------------------------------------------------------- /terraform/postgres/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_caller_identity" "this" {} 2 | 3 | resource "aws_db_subnet_group" "db_subnets" { 4 | name = module.this.id 5 | description = "Subnet group for the ${module.this.id} RDS cluster" 6 | subnet_ids = var.subnet_ids 7 | } 8 | 9 | module "db_cluster" { 10 | source = "terraform-aws-modules/rds-aurora/aws" 11 | version = "9.3.1" 12 | 13 | name = module.this.id 14 | database_name = var.db_name 15 | engine = "aurora-postgresql" 16 | engine_version = "15.4" 17 | engine_mode = "provisioned" 18 | ca_cert_identifier = "rds-ca-ecc384-g1" 19 | instance_class = "db.serverless" 20 | instances = { for i in range(1, var.instances + 1) : i => {} } 21 | 22 | master_username = var.db_master_username 23 | manage_master_user_password = false 24 | master_password = local.db_master_password 25 | 26 | vpc_id = var.vpc_id 27 | db_subnet_group_name = aws_db_subnet_group.db_subnets.name 28 | security_group_rules = { 29 | vpc_ingress = { 30 | cidr_blocks = var.ingress_cidr_blocks 31 | } 32 | } 33 | 34 | performance_insights_enabled = true 35 | storage_encrypted = true 36 | allow_major_version_upgrade = true 37 | apply_immediately = true 38 | skip_final_snapshot = true 39 | deletion_protection = true 40 | 41 | monitoring_interval = 30 42 | enabled_cloudwatch_logs_exports = ["postgresql"] 43 | cloudwatch_log_group_kms_key_id = var.cloudwatch_logs_key_arn 44 | cloudwatch_log_group_retention_in_days = var.cloudwatch_retention_in_days 45 | 46 | serverlessv2_scaling_configuration = { 47 | min_capacity = module.this.stage == "prod" ? var.min_capacity : 0.5 48 | max_capacity = var.max_capacity 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /.github/workflows/event_release.yml: -------------------------------------------------------------------------------- 1 | name: ⚡ Release 2 | run-name: 'Release / ${{ github.event.head_commit.message }}' 3 | 4 | on: 5 | push: 6 | branches: 7 | - main 8 | - master 9 | paths-ignore: 10 | # - '.github/**' 11 | - 'docs/**' 12 | # - 'Cargo.toml' 13 | # - 'Cargo.lock' 14 | - 'README.md' 15 | - 'CHANGELOG.md' 16 | - 'LICENSE' 17 | - 'justfile' 18 | - 'rustfmt.toml' 19 | - '.editorconfig' 20 | - '.pre-commit-config.yaml' 21 | - '.terraformignore' 22 | - '.env.example' 23 | 24 | concurrency: deploy 25 | 26 | permissions: 27 | contents: write 28 | id-token: write 29 | packages: write 30 | checks: write 31 | 32 | jobs: 33 | paths_filter: 34 | name: Paths Filter 35 | runs-on: ubuntu-latest 36 | steps: 37 | - uses: actions/checkout@v4 38 | - uses: WalletConnect/actions/github/paths-filter/@2.4.2 39 | id: filter 40 | with: 41 | path-app: . # release when migrations change 42 | outputs: 43 | infra: ${{ steps.filter.outputs.infra }} 44 | app: ${{ steps.filter.outputs.app }} 45 | 46 | release: 47 | name: Release 48 | needs: [ paths_filter ] 49 | uses: WalletConnect/ci_workflows/.github/workflows/release.yml@0.2.9 50 | secrets: inherit 51 | with: 52 | infra-changed: ${{ needs.paths_filter.outputs.infra == 'true' }} 53 | app-changed: ${{ needs.paths_filter.outputs.app == 'true' }} 54 | 55 | cd: 56 | name: CD 57 | needs: [ paths_filter, release ] 58 | secrets: inherit 59 | uses: ./.github/workflows/sub-cd.yml 60 | with: 61 | deploy-infra: ${{ needs.paths_filter.outputs.infra == 'true' }} 62 | deploy-app: ${{ needs.paths_filter.outputs.app == 'true' }} 63 | deploy-prod: true 64 | version: ${{ needs.release.outputs.version }} 65 | -------------------------------------------------------------------------------- /src/services/public_http_server/handlers/get_subscribers_v0.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::NotifyServerError, 4 | model::helpers::get_subscriber_accounts_by_project_id, 5 | rate_limit::{self, Clock, RateLimitError}, 6 | registry::{extractor::AuthedProjectId, storage::redis::Redis}, 7 | state::AppState, 8 | }, 9 | axum::{ 10 | extract::State, 11 | http::StatusCode, 12 | response::{IntoResponse, Response}, 13 | Json, 14 | }, 15 | relay_rpc::domain::ProjectId, 16 | std::sync::Arc, 17 | tracing::instrument, 18 | }; 19 | 20 | #[instrument(name = "get_subscribers_v0", skip(state))] 21 | pub async fn handler( 22 | State(state): State>, 23 | AuthedProjectId(project_id, _): AuthedProjectId, 24 | ) -> Result { 25 | if let Some(redis) = state.redis.as_ref() { 26 | get_subscribers_rate_limit(redis, &project_id, &state.clock).await?; 27 | } 28 | 29 | let accounts = 30 | get_subscriber_accounts_by_project_id(project_id, &state.postgres, state.metrics.as_ref()) 31 | .await 32 | .map_err(|e| match e { 33 | sqlx::Error::RowNotFound => { 34 | NotifyServerError::UnprocessableEntity("Project not found".into()) 35 | } 36 | e => e.into(), 37 | })?; 38 | 39 | Ok((StatusCode::OK, Json(accounts)).into_response()) 40 | } 41 | pub async fn get_subscribers_rate_limit( 42 | redis: &Arc, 43 | project_id: &ProjectId, 44 | clock: &Clock, 45 | ) -> Result<(), RateLimitError> { 46 | rate_limit::token_bucket( 47 | redis, 48 | format!("subscribers-v0-{project_id}"), 49 | 5, 50 | chrono::Duration::seconds(1), 51 | 1, 52 | clock, 53 | ) 54 | .await 55 | } 56 | -------------------------------------------------------------------------------- /migrations/20231025145134_add_notification.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE notification ( 2 | id UUID PRIMARY KEY DEFAULT gen_random_uuid(), 3 | created_at TIMESTAMPTZ NOT NULL DEFAULT now(), 4 | updated_at TIMESTAMPTZ NOT NULL DEFAULT now(), 5 | project UUID NOT NULL REFERENCES project (id) ON DELETE CASCADE, 6 | notification_id VARCHAR(255) NOT NULL, 7 | type UUID NOT NULL, 8 | title VARCHAR(255) NOT NULL, 9 | body VARCHAR(255) NOT NULL, 10 | icon VARCHAR(255), -- nullable 11 | url VARCHAR(255), -- nullable 12 | 13 | UNIQUE (project, notification_id) 14 | ); 15 | CREATE INDEX notification_project_idx ON notification (project); 16 | CREATE INDEX notification_notification_id_idx ON notification (notification_id); 17 | CREATE INDEX notification_type_idx ON notification (type); 18 | 19 | CREATE TYPE subscriber_notification_status 20 | AS ENUM ('queued', 'processing', 'published', 'failed'); 21 | 22 | CREATE TABLE subscriber_notification ( 23 | id UUID PRIMARY KEY DEFAULT gen_random_uuid(), 24 | created_at TIMESTAMPTZ NOT NULL DEFAULT now(), 25 | updated_at TIMESTAMPTZ NOT NULL DEFAULT now(), 26 | notification UUID NOT NULL REFERENCES notification (id) ON DELETE CASCADE, 27 | subscriber UUID NOT NULL REFERENCES subscriber (id) ON DELETE CASCADE, 28 | status subscriber_notification_status NOT NULL, 29 | 30 | UNIQUE (notification, subscriber) 31 | ); 32 | CREATE INDEX subscriber_notification_notification_idx ON subscriber_notification (notification); 33 | CREATE INDEX subscriber_notification_subscriber_idx ON subscriber_notification (subscriber); 34 | CREATE INDEX subscriber_notification_status_idx ON subscriber_notification (status); 35 | -------------------------------------------------------------------------------- /src/services/public_http_server/handlers/mark_all_as_read.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::NotifyServerError, 4 | model::helpers::{get_project_by_project_id, mark_all_notifications_as_read_for_project}, 5 | rate_limit::{self, Clock, RateLimitError}, 6 | registry::{extractor::AuthedProjectId, storage::redis::Redis}, 7 | state::AppState, 8 | }, 9 | axum::{ 10 | extract::State, 11 | http::StatusCode, 12 | response::{IntoResponse, Response}, 13 | }, 14 | relay_rpc::domain::ProjectId, 15 | std::sync::Arc, 16 | tracing::instrument, 17 | }; 18 | 19 | #[instrument(name = "mark_all_as_read", skip(state))] 20 | pub async fn handler( 21 | State(state): State>, 22 | AuthedProjectId(project_id, _): AuthedProjectId, 23 | ) -> Result { 24 | if let Some(redis) = state.redis.as_ref() { 25 | mark_all_as_read_rate_limit(redis, &project_id, &state.clock).await?; 26 | } 27 | 28 | let project = get_project_by_project_id(project_id, &state.postgres, state.metrics.as_ref()) 29 | .await 30 | .map_err(|e| match e { 31 | sqlx::Error::RowNotFound => { 32 | NotifyServerError::UnprocessableEntity("Project not found".into()) 33 | } 34 | e => e.into(), 35 | })?; 36 | 37 | mark_all_notifications_as_read_for_project(project.id, &state.postgres, state.metrics.as_ref()) 38 | .await?; 39 | 40 | Ok(StatusCode::NO_CONTENT.into_response()) 41 | } 42 | 43 | pub async fn mark_all_as_read_rate_limit( 44 | redis: &Arc, 45 | project_id: &ProjectId, 46 | clock: &Clock, 47 | ) -> Result<(), RateLimitError> { 48 | rate_limit::token_bucket( 49 | redis, 50 | format!("mark_all_as_read-{project_id}"), 51 | 100, 52 | chrono::Duration::minutes(1), 53 | 1, 54 | clock, 55 | ) 56 | .await 57 | } 58 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/keys_server_request_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Keys Server Req Latency', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('ms') 16 | ) 17 | 18 | .addTarget(targets.prometheus( 19 | datasource = ds.prometheus, 20 | expr = 'sum by (aws_ecs_task_revision) (rate(keys_server_request_latency_sum{source="server"}[$__rate_interval])) / sum by (aws_ecs_task_revision) (rate(keys_server_request_latency_count{source="server"}[$__rate_interval]))', 21 | legendFormat = 'Server r{{aws_ecs_task_revision}}', 22 | exemplar = false, 23 | refId = 'KeysServerRequestLatencyServer', 24 | )) 25 | 26 | .addTarget(targets.prometheus( 27 | datasource = ds.prometheus, 28 | expr = 'sum by (aws_ecs_task_revision) (rate(keys_server_request_latency_sum{source="cache"}[$__rate_interval])) / sum by (aws_ecs_task_revision) (rate(keys_server_request_latency_count{source="cache"}[$__rate_interval]))', 29 | legendFormat = 'Cache r{{aws_ecs_task_revision}}', 30 | exemplar = false, 31 | refId = 'KeysServerRequestLatencyCache', 32 | )) 33 | 34 | .addTarget(targets.prometheus( 35 | datasource = ds.prometheus, 36 | expr = 'sum by (aws_ecs_task_revision) (rate(keys_server_request_latency_sum[$__rate_interval])) / sum by (aws_ecs_task_revision) (rate(keys_server_request_latency_count[$__rate_interval]))', 37 | legendFormat = 'Total r{{aws_ecs_task_revision}}', 38 | exemplar = false, 39 | refId = 'KeysServerRequestLatencyTotal', 40 | )) 41 | } 42 | -------------------------------------------------------------------------------- /.github/workflows/event_pr.yml: -------------------------------------------------------------------------------- 1 | name: ⚡ Pull-Request 2 | run-name: 'PR / ${{ github.event.pull_request.title }}' 3 | 4 | on: 5 | pull_request: 6 | types: 7 | - opened # A pull request was created. 8 | - reopened # A closed pull request was reopened. 9 | - edited # A pull request's title, body, or labels are edited. 10 | - synchronize # A pull request's branch was synchronized with its base branch. 11 | - unlocked # Conversation on a pull request was unlocked. 12 | 13 | concurrency: 14 | group: pr-${{ github.event.pull_request.number }} 15 | cancel-in-progress: true 16 | 17 | permissions: 18 | contents: read 19 | id-token: write 20 | issues: read 21 | pull-requests: write 22 | 23 | jobs: 24 | check_pr: 25 | name: Check PR 26 | runs-on: ubuntu-latest 27 | permissions: 28 | statuses: write 29 | steps: 30 | - name: Check PR Title 31 | uses: aslafy-z/conventional-pr-title-action@v3 32 | env: 33 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 34 | 35 | paths-filter: 36 | name: Paths Filter 37 | runs-on: ubuntu-latest 38 | steps: 39 | - uses: actions/checkout@v4 40 | - uses: WalletConnect/actions/github/paths-filter/@2.4.2 41 | id: filter 42 | with: 43 | path-app: . # run CI when tests are changed 44 | outputs: 45 | infra: ${{ steps.filter.outputs.infra }} 46 | app: ${{ steps.filter.outputs.app }} 47 | 48 | ci: 49 | name: CI 50 | needs: [ paths-filter ] 51 | secrets: inherit 52 | uses: ./.github/workflows/sub-ci.yml 53 | with: 54 | check-app: ${{ needs.paths-filter.outputs.app == 'true' }} 55 | check-infra: ${{ needs.paths-filter.outputs.infra == 'true' }} 56 | 57 | merge_check: 58 | name: Merge Check 59 | needs: [ check_pr, ci ] 60 | if: ${{ always() && !cancelled() && !failure() }} 61 | runs-on: ubuntu-latest 62 | steps: 63 | - run: echo "CI is successful" 64 | -------------------------------------------------------------------------------- /terraform/alerting/alarms_redis.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "redis_cpu_utilization" { 2 | alarm_name = "${local.alarm_prefix} - Redis CPU Utilization" 3 | alarm_description = "${local.alarm_prefix} - Redis CPU utilization is high (over ${var.redis_cpu_threshold}%)" 4 | 5 | namespace = module.cloudwatch.namespaces.ElastiCache 6 | dimensions = { 7 | CacheClusterId = var.redis_cluster_id 8 | } 9 | metric_name = module.cloudwatch.metrics.ElastiCache.CPUUtilization 10 | 11 | evaluation_periods = local.evaluation_periods 12 | period = local.period 13 | 14 | statistic = module.cloudwatch.statistics.Average 15 | comparison_operator = module.cloudwatch.operators.GreaterThanOrEqualToThreshold 16 | threshold = var.redis_cpu_threshold 17 | treat_missing_data = "breaching" 18 | 19 | alarm_actions = [aws_sns_topic.cloudwatch_webhook.arn] 20 | insufficient_data_actions = [aws_sns_topic.cloudwatch_webhook.arn] 21 | } 22 | 23 | resource "aws_cloudwatch_metric_alarm" "redis_available_memory" { 24 | alarm_name = "${local.alarm_prefix} - Redis Available Memory" 25 | alarm_description = "${local.alarm_prefix} - Redis available memory is low (less than ${var.redis_memory_threshold}GiB)" 26 | 27 | namespace = module.cloudwatch.namespaces.ElastiCache 28 | dimensions = { 29 | CacheClusterId = var.redis_cluster_id 30 | } 31 | metric_name = module.cloudwatch.metrics.ElastiCache.FreeableMemory 32 | 33 | evaluation_periods = local.evaluation_periods 34 | period = local.period 35 | 36 | statistic = module.cloudwatch.statistics.Average 37 | comparison_operator = module.cloudwatch.operators.LessThanOrEqualToThreshold 38 | threshold = var.redis_memory_threshold * pow(1000, 3) 39 | treat_missing_data = "breaching" 40 | 41 | alarm_actions = [aws_sns_topic.cloudwatch_webhook.arn] 42 | insufficient_data_actions = [aws_sns_topic.cloudwatch_webhook.arn] 43 | } 44 | -------------------------------------------------------------------------------- /terraform/alerting/alarms_ecs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "ecs_cpu_utilization" { 2 | alarm_name = "${local.alarm_prefix} - ECS CPU Utilization" 3 | alarm_description = "${local.alarm_prefix} - ECS CPU utilization is high (over ${var.ecs_cpu_threshold}%)" 4 | 5 | namespace = module.cloudwatch.namespaces.ECS 6 | dimensions = { 7 | ClusterName = var.ecs_cluster_name 8 | ServiceName = var.ecs_service_name 9 | } 10 | metric_name = module.cloudwatch.metrics.ECS.CPUUtilization 11 | 12 | evaluation_periods = local.evaluation_periods 13 | period = local.period 14 | 15 | statistic = module.cloudwatch.statistics.Average 16 | comparison_operator = module.cloudwatch.operators.GreaterThanOrEqualToThreshold 17 | threshold = var.ecs_cpu_threshold 18 | treat_missing_data = "breaching" 19 | 20 | alarm_actions = [aws_sns_topic.cloudwatch_webhook.arn] 21 | insufficient_data_actions = [aws_sns_topic.cloudwatch_webhook.arn] 22 | } 23 | 24 | resource "aws_cloudwatch_metric_alarm" "ecs_mem_utilization" { 25 | alarm_name = "${local.alarm_prefix} - ECS Memory Utilization" 26 | alarm_description = "${local.alarm_prefix} - ECS Memory utilization is high (over ${var.ecs_memory_threshold}%)" 27 | 28 | namespace = module.cloudwatch.namespaces.ECS 29 | dimensions = { 30 | ClusterName = var.ecs_cluster_name 31 | ServiceName = var.ecs_service_name 32 | } 33 | metric_name = module.cloudwatch.metrics.ECS.MemoryUtilization 34 | 35 | evaluation_periods = local.evaluation_periods 36 | period = local.period 37 | 38 | statistic = module.cloudwatch.statistics.Average 39 | comparison_operator = module.cloudwatch.operators.GreaterThanOrEqualToThreshold 40 | threshold = var.ecs_memory_threshold 41 | treat_missing_data = "breaching" 42 | 43 | alarm_actions = [aws_sns_topic.cloudwatch_webhook.arn] 44 | insufficient_data_actions = [aws_sns_topic.cloudwatch_webhook.arn] 45 | } 46 | -------------------------------------------------------------------------------- /terraform/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_caller_identity" "this" {} 2 | 3 | resource "random_pet" "this" { 4 | length = 2 5 | } 6 | 7 | locals { 8 | ecr_repository_url = local.stage == "dev" ? data.terraform_remote_state.org.outputs.accounts.sdlc.dev.ecr-urls.notify : data.terraform_remote_state.org.outputs.accounts.wl.notify[local.stage].ecr-url 9 | 10 | stage = lookup({ 11 | "notify-server-wl-staging" = "staging", 12 | "notify-server-wl-prod" = "prod", 13 | "notify-server-wl-dev" = "dev", 14 | "notify-server-staging" = "staging", 15 | "notify-server-prod" = "prod", 16 | "wl-staging" = "staging", 17 | "wl-prod" = "prod", 18 | "wl-dev" = "dev", 19 | "staging" = "staging", 20 | "prod" = "prod", 21 | }, terraform.workspace, terraform.workspace) 22 | } 23 | 24 | resource "aws_kms_key" "cloudwatch_logs" { 25 | description = "KMS key for encrypting CloudWatch Logs" 26 | enable_key_rotation = true 27 | policy = jsonencode({ 28 | Version = "2012-10-17" 29 | Statement = [ 30 | { 31 | Sid = "Enable IAM User Permissions" 32 | Effect = "Allow" 33 | Principal = { 34 | AWS = data.aws_caller_identity.this.account_id 35 | } 36 | Action = "kms:*" 37 | Resource = "*" 38 | }, 39 | { 40 | Sid = "AllowCloudWatchLogs" 41 | Effect = "Allow" 42 | Principal = { 43 | Service = "logs.${module.this.region}.amazonaws.com" 44 | } 45 | Action = [ 46 | "kms:Encrypt*", 47 | "kms:Decrypt*", 48 | "kms:ReEncrypt*", 49 | "kms:GenerateDataKey*", 50 | "kms:Describe*" 51 | ] 52 | Resource = "*" 53 | }, 54 | ] 55 | }) 56 | } 57 | 58 | resource "aws_kms_alias" "cloudwatch_logs" { 59 | name = "alias/${module.this.id}-cloudwatch-logs" 60 | target_key_id = aws_kms_key.cloudwatch_logs.key_id 61 | } 62 | -------------------------------------------------------------------------------- /src/services/publisher_service/types.rs: -------------------------------------------------------------------------------- 1 | use { 2 | sqlx::FromRow, 3 | std::{fmt, str::FromStr}, 4 | }; 5 | 6 | #[derive(Debug, PartialEq)] 7 | pub enum SubscriberNotificationStatus { 8 | Queued, 9 | Processing, 10 | Published, 11 | Failed, 12 | NotSubscribed, 13 | WrongScope, 14 | RateLimited, 15 | } 16 | 17 | impl fmt::Display for SubscriberNotificationStatus { 18 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 19 | match *self { 20 | SubscriberNotificationStatus::Queued => write!(f, "queued"), 21 | SubscriberNotificationStatus::Processing => write!(f, "processing"), 22 | SubscriberNotificationStatus::Published => write!(f, "published"), 23 | SubscriberNotificationStatus::Failed => write!(f, "failed"), 24 | SubscriberNotificationStatus::NotSubscribed => write!(f, "not-subscribed"), 25 | SubscriberNotificationStatus::WrongScope => write!(f, "wrong-scope"), 26 | SubscriberNotificationStatus::RateLimited => write!(f, "rate-limited"), 27 | } 28 | } 29 | } 30 | 31 | impl FromStr for SubscriberNotificationStatus { 32 | type Err = String; 33 | fn from_str(s: &str) -> Result { 34 | match s { 35 | "queued" => Ok(SubscriberNotificationStatus::Queued), 36 | "processing" => Ok(SubscriberNotificationStatus::Processing), 37 | "published" => Ok(SubscriberNotificationStatus::Published), 38 | "failed" => Ok(SubscriberNotificationStatus::Failed), 39 | "not-subscribed" => Ok(SubscriberNotificationStatus::NotSubscribed), 40 | "wrong-scope" => Ok(SubscriberNotificationStatus::WrongScope), 41 | "rate-limited" => Ok(SubscriberNotificationStatus::RateLimited), 42 | _ => Err(format!("'{}' is not a valid state", s)), 43 | } 44 | } 45 | } 46 | 47 | #[derive(Debug, FromRow)] 48 | pub struct PublishingQueueStats { 49 | pub queued: i64, 50 | pub processing: i64, 51 | } 52 | -------------------------------------------------------------------------------- /.github/workflows/event_intake.yml: -------------------------------------------------------------------------------- 1 | # This workflow moves issues to the Project board when they receive the "accepted" label 2 | # When WalletConnect Org members create issues they are automatically "accepted". 3 | # Otherwise, they need to manually receive that label during intake. 4 | name: ⚡ Intake 5 | 6 | on: 7 | issues: 8 | types: [ opened, labeled ] 9 | 10 | jobs: 11 | add-to-project: 12 | name: Add issue to board 13 | if: github.event_name == 'issues' && github.event.action == 'labeled' && github.event.label.name == 'accepted' 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/add-to-project@v1.0.1 17 | with: 18 | project-url: https://github.com/orgs/WalletConnect/projects/20 19 | github-token: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} 20 | labeled: accepted 21 | label-operator: OR 22 | 23 | auto-promote: 24 | name: auto-promote 25 | if: github.event.action == 'opened' 26 | runs-on: ubuntu-latest 27 | steps: 28 | - name: Check Core Team membership 29 | uses: tspascoal/get-user-teams-membership@v3 30 | id: is-core-team 31 | with: 32 | username: ${{ github.event_name != 'pull_request' && github.event.issue.user.login || github.event.sender.login }} 33 | team: "Core Team" 34 | GITHUB_TOKEN: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} 35 | - name: Print result 36 | env: 37 | CREATOR: ${{ github.event_name != 'pull_request' && github.event.issue.user.login || github.event.sender.login }} 38 | IS_TEAM_MEMBER: ${{ steps.is-core-team.outputs.isTeamMember }} 39 | run: echo "$CREATOR (Core Team Member $IS_TEAM_MEMBER) created this issue/PR" 40 | - name: Label issues 41 | if: ${{ steps.is-core-team.outputs.isTeamMember == 'true' }} 42 | uses: andymckay/labeler@3a4296e9dcdf9576b0456050db78cfd34853f260 43 | with: 44 | add-labels: "accepted" 45 | repo-token: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} 46 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/rds/freeable_memory.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Freeable Memory', 11 | datasource = ds.cloudwatch, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit(grafana.fieldConfig.units.DecBytes) 16 | ) 17 | 18 | .setAlert(vars.environment, grafana.alert.new( 19 | namespace = vars.namespace, 20 | name = '%(env)s - RDS freeable memory low' % { env: vars.environment }, 21 | message = '%(env)s - RDS freeable memory low' % { env: vars.environment }, 22 | notifications = vars.notifications, 23 | conditions = [ 24 | grafana.alertCondition.new( 25 | evaluatorParams = [ 30 ], 26 | evaluatorType = 'lt', 27 | operatorType = 'or', 28 | queryRefId = 'Mem_Min', 29 | queryTimeStart = '5m', 30 | queryTimeEnd = 'now', 31 | reducerType = grafana.alert_reducers.Min 32 | ), 33 | ], 34 | )) 35 | 36 | .addTarget(targets.cloudwatch( 37 | datasource = ds.cloudwatch, 38 | namespace = 'AWS/RDS', 39 | metricName = 'FreeableMemory', 40 | dimensions = { 41 | DBClusterIdentifier: vars.rds_cluster_id, 42 | }, 43 | matchExact = true, 44 | statistic = 'Average', 45 | refId = 'Mem_Avg', 46 | )) 47 | 48 | .addTarget(targets.cloudwatch( 49 | datasource = ds.cloudwatch, 50 | namespace = 'AWS/RDS', 51 | metricName = 'FreeableMemory', 52 | dimensions = { 53 | DBClusterIdentifier: vars.rds_cluster_id, 54 | }, 55 | matchExact = true, 56 | statistic = 'Minimum', 57 | refId = 'Mem_Min', 58 | )) 59 | } 60 | -------------------------------------------------------------------------------- /terraform/alerting/variables.tf: -------------------------------------------------------------------------------- 1 | variable "webhook_cloudwatch_p2" { 2 | description = "The URL of the webhook to be called on CloudWatch P2 alarms" 3 | type = string 4 | } 5 | 6 | variable "webhook_prometheus_p2" { 7 | description = "The URL of the webhook to be called on Prometheus P2 alarms" 8 | type = string 9 | } 10 | 11 | #------------------------------------------------------------------------------- 12 | # ECS 13 | 14 | variable "ecs_cluster_name" { 15 | description = "The name of the ECS cluster running the application" 16 | type = string 17 | } 18 | 19 | variable "ecs_service_name" { 20 | description = "The name of the ECS service running the application" 21 | type = string 22 | } 23 | 24 | variable "ecs_cpu_threshold" { 25 | description = "The ECS CPU utilization alarm threshold in percents" 26 | type = number 27 | default = 80 28 | } 29 | 30 | variable "ecs_memory_threshold" { 31 | description = "The ECS memory utilization alarm threshold in percents" 32 | type = number 33 | default = 80 34 | } 35 | 36 | #------------------------------------------------------------------------------- 37 | # ECS 38 | 39 | variable "elb_load_balancer_arn" { 40 | description = "The ARN of the application load balancer." 41 | type = string 42 | } 43 | 44 | variable "elb_5xx_threshold" { 45 | description = "The ELB 5xx error rate alarm threshold" 46 | type = number 47 | default = 100 48 | } 49 | 50 | #------------------------------------------------------------------------------- 51 | # Redis 52 | 53 | variable "redis_cluster_id" { 54 | description = "The Redis cluster ID" 55 | type = string 56 | } 57 | 58 | variable "redis_cpu_threshold" { 59 | description = "The Redis CPU utilization alarm threshold in percents" 60 | type = number 61 | default = 80 62 | } 63 | 64 | variable "redis_memory_threshold" { 65 | description = "The Redis available memory alarm threshold in GiB" 66 | type = number 67 | default = 3 68 | } 69 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/healthy_hosts.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local _configuration = defaults.configuration.timeseries 8 | .withSoftLimit( 9 | axisSoftMin = 0, 10 | axisSoftMax = 5, 11 | ); 12 | 13 | { 14 | new(ds, vars):: 15 | panels.timeseries( 16 | title = 'Healthy Hosts', 17 | datasource = ds.cloudwatch, 18 | ) 19 | .configure(_configuration) 20 | 21 | .addTarget(targets.cloudwatch( 22 | datasource = ds.cloudwatch, 23 | metricQueryType = grafana.target.cloudwatch.queryTypes.Query, 24 | 25 | dimensions = { 26 | TargetGroup: vars.target_group 27 | }, 28 | metricName = 'HealthyHostCount', 29 | namespace = 'AWS/ApplicationELB', 30 | sql = { 31 | from: { 32 | property: { 33 | name: "AWS/ApplicationELB", 34 | type: "string" 35 | }, 36 | type: "property" 37 | }, 38 | select: { 39 | name: "MAX", 40 | parameters: [ 41 | { 42 | name: "HealthyHostCount", 43 | type: "functionParameter" 44 | } 45 | ], 46 | type: "function" 47 | }, 48 | where: { 49 | expressions: [ 50 | { 51 | operator: { 52 | name: "=", 53 | value: vars.load_balancer 54 | }, 55 | property: { 56 | name: "LoadBalancer", 57 | type: "string" 58 | }, 59 | type: "operator" 60 | } 61 | ], 62 | type: "and" 63 | } 64 | }, 65 | sqlExpression = "SELECT MAX(HealthyHostCount) FROM \"AWS/ApplicationELB\" WHERE LoadBalancer = '%s'" % [vars.load_balancer], 66 | statistic = 'Maximum', 67 | )) 68 | } 69 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | _legacy/ 2 | 3 | #--------------------------------------- 4 | # Migration 5 | .github/_legacy 6 | terraform/_legacy 7 | 8 | #--------------------------------------- 9 | # General 10 | 11 | .DS_Store 12 | .AppleDouble 13 | .LSOverride 14 | [Dd]esktop.ini 15 | 16 | #--------------------------------------- 17 | # Environment 18 | 19 | .direnv 20 | .envrc 21 | .actrc 22 | .env 23 | .env.terraform 24 | 25 | #--------------------------------------- 26 | # Editors 27 | 28 | # JetBrains 29 | .idea/ 30 | out/ 31 | .fleet 32 | *.iws 33 | 34 | # VSCode 35 | .vscode/ 36 | .history/ 37 | *.code-workspace 38 | 39 | #--------------------------------------- 40 | # Rust/Cargo 41 | 42 | # Generated by Cargo, will have compiled files and executables 43 | debug/ 44 | target/ 45 | 46 | # Backup files generated by rustfmt 47 | **/*.rs.bk 48 | 49 | # MSVC Windows builds of rustc generate these, which store debugging information 50 | *.pdb 51 | 52 | #--------------------------------------- 53 | # Terraform 54 | 55 | # Local .terraform directories 56 | **/.terraform/* 57 | 58 | # .tfstate files 59 | *.tfstate 60 | *.tfstate.* 61 | 62 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 63 | # password, private keys, and other secrets. These should not be part of version 64 | # control as they are data points which are potentially sensitive and subject 65 | # to change depending on the environment. 66 | *.tfvars 67 | *.tfvars.json 68 | 69 | # Ignore override files as they are usually used to override resources locally and so are not checked in 70 | override.tf 71 | override.tf.json 72 | *_override.tf 73 | *_override.tf.json 74 | 75 | # Include override files you do wish to add to version control using negated pattern 76 | # 77 | # !example_override.tf 78 | 79 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 80 | *tfplan* 81 | 82 | # Ignore CLI configuration files 83 | .terraformrc 84 | terraform.rc 85 | 86 | #--------------------------------------- 87 | # Integration 88 | 89 | node_modules 90 | *.log 91 | 92 | # test script 93 | test.sh 94 | -------------------------------------------------------------------------------- /src/model/types/account_id/caip10.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::eip155::{validate_eip155, Eip155Error, NAMESPACE_EIP155}, 3 | once_cell::sync::Lazy, 4 | regex::Regex, 5 | thiserror::Error, 6 | }; 7 | 8 | #[derive(Debug, PartialEq, Eq, Error)] 9 | pub enum Caip10Error { 10 | #[error("Account ID is is not a valid CAIP-10 account ID")] 11 | Invalid, 12 | 13 | #[error("Account ID uses an unsupported namespace")] 14 | UnsupportedNamespace, 15 | 16 | #[error("Account ID is eip155 namespace but: {0}")] 17 | Eip155(#[from] Eip155Error), 18 | } 19 | 20 | // https://github.com/ChainAgnostic/CAIPs/blob/main/CAIPs/caip-10.md#syntax 21 | static PATTERN: Lazy = Lazy::new(|| { 22 | Regex::new(r"^([-a-z0-9]{3,8}):([-_a-zA-Z0-9]{1,32}):([-.%a-zA-Z0-9]{1,128})$") 23 | .expect("Safe unwrap: panics should be caught by test cases") 24 | }); 25 | 26 | pub fn validate_caip_10(s: &str) -> Result<(), Caip10Error> { 27 | if let Some(caps) = PATTERN.captures(s) { 28 | let (_, [namespace, reference, address]) = caps.extract(); 29 | 30 | match namespace { 31 | NAMESPACE_EIP155 => validate_eip155(reference, address).map_err(Into::into), 32 | _ => Err(Caip10Error::UnsupportedNamespace), 33 | } 34 | } else { 35 | Err(Caip10Error::Invalid) 36 | } 37 | } 38 | 39 | #[cfg(test)] 40 | mod tests { 41 | use super::*; 42 | 43 | #[test] 44 | fn test() { 45 | assert!(validate_caip_10("eip155:1:0x9AfEaC202C837df470b5A145e0EfD6a574B21029").is_ok()); 46 | assert_eq!(validate_caip_10("eip155:111111111111111111111111111111111:0x9AfEaC202C837df470b5A145e0EfD6a574B21029"), Err(Caip10Error::Invalid)); 47 | assert_eq!(validate_caip_10("junk"), Err(Caip10Error::Invalid)); 48 | } 49 | 50 | #[test] 51 | fn account_id_valid_namespaces() { 52 | assert!(validate_caip_10("eip155:1:0x9AfEaC202C837df470b5A145e0EfD6a574B21029").is_ok()); 53 | assert_eq!( 54 | validate_caip_10("junk:1:1"), 55 | Err(Caip10Error::UnsupportedNamespace) 56 | ); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/model/types/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | rpc::{decode_key, DecodeKeyError}, 4 | utils::get_client_id, 5 | }, 6 | chrono::{DateTime, Utc}, 7 | relay_rpc::{ 8 | auth::ed25519_dalek::VerifyingKey, 9 | domain::{DecodedClientId, ProjectId, Topic}, 10 | }, 11 | sqlx::FromRow, 12 | thiserror::Error, 13 | uuid::Uuid, 14 | }; 15 | 16 | // See /migrations/ERD.md 17 | 18 | mod account_id; 19 | pub use account_id::*; 20 | 21 | #[derive(Debug, FromRow)] 22 | pub struct Project { 23 | pub id: Uuid, 24 | #[sqlx(try_from = "String")] 25 | pub project_id: ProjectId, 26 | pub app_domain: String, 27 | #[sqlx(try_from = "String")] 28 | pub topic: Topic, 29 | pub authentication_public_key: String, 30 | pub authentication_private_key: String, 31 | pub subscribe_public_key: String, 32 | pub subscribe_private_key: String, 33 | } 34 | 35 | #[derive(Debug, Error)] 36 | pub enum GetAuthenticationClientIdError { 37 | #[error("Decode key: {0}")] 38 | DecodeKey(#[from] DecodeKeyError), 39 | 40 | #[error("Parse verifying key: {0}")] 41 | ParseVerifyingKey(#[from] k256::ecdsa::Error), 42 | } 43 | 44 | impl Project { 45 | pub fn get_authentication_client_id( 46 | &self, 47 | ) -> Result { 48 | Ok(get_client_id(&VerifyingKey::from_bytes(&decode_key( 49 | &self.authentication_public_key, 50 | )?)?)) 51 | } 52 | } 53 | 54 | #[derive(Debug, FromRow)] 55 | pub struct Subscriber { 56 | pub id: Uuid, 57 | pub project: Uuid, 58 | /// CAIP-10 account 59 | #[sqlx(try_from = "String")] 60 | pub account: AccountId, 61 | pub sym_key: String, 62 | #[sqlx(try_from = "String")] 63 | pub topic: Topic, 64 | pub expiry: DateTime, 65 | } 66 | 67 | #[derive(Debug)] 68 | pub struct SubscriptionWatcher { 69 | pub account: AccountId, 70 | /// Project the watcher is authorized for. None for all. 71 | pub project: Option, 72 | pub did_key: String, 73 | pub sym_key: String, 74 | pub expiry: DateTime, 75 | } 76 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/relay_subscribe_failures.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Relay Subscribe Errors', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .setAlert(vars.environment, grafana.alert.new( 16 | namespace = vars.namespace, 17 | name = '%(env)s - Failed to subscribe to relay topic' % { env: vars.environment }, 18 | message = '%(env)s - Failed to subscribe to relay topic' % { env: vars.environment }, 19 | notifications = vars.notifications, 20 | noDataState = 'no_data', 21 | period = '0m', 22 | conditions = [ 23 | grafana.alertCondition.new( 24 | evaluatorParams = [ 0 ], 25 | evaluatorType = 'gt', 26 | operatorType = 'or', 27 | queryRefId = 'RelaySubscribePermanentFailures', 28 | queryTimeStart = '5m', 29 | queryTimeEnd = 'now', 30 | reducerType = grafana.alert_reducers.Avg 31 | ), 32 | ], 33 | )) 34 | 35 | .addTarget(targets.prometheus( 36 | datasource = ds.prometheus, 37 | expr = 'sum by (aws_ecs_task_revision) (increase(relay_subscribe_failures_total{is_permanent="true"}[$__rate_interval]))', 38 | legendFormat = 'Permanent r{{aws_ecs_task_revision}}', 39 | exemplar = true, 40 | refId = 'RelaySubscribePermanentFailures', 41 | )) 42 | 43 | .addTarget(targets.prometheus( 44 | datasource = ds.prometheus, 45 | expr = 'sum by (aws_ecs_task_revision) (increase(relay_subscribe_failures_total{is_permanent="false"}[$__rate_interval]))', 46 | legendFormat = 'Temporary r{{aws_ecs_task_revision}}', 47 | exemplar = true, 48 | refId = 'RelaySubscribeTemporaryFailures', 49 | )) 50 | } 51 | -------------------------------------------------------------------------------- /src/services/public_http_server/handlers/get_welcome_notification.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::NotifyServerError, 4 | model::helpers::{get_project_by_project_id, get_welcome_notification}, 5 | rate_limit::{self, Clock, RateLimitError}, 6 | registry::{extractor::AuthedProjectId, storage::redis::Redis}, 7 | state::AppState, 8 | }, 9 | axum::{ 10 | extract::State, 11 | response::{IntoResponse, Response}, 12 | Json, 13 | }, 14 | relay_rpc::domain::ProjectId, 15 | std::sync::Arc, 16 | tracing::instrument, 17 | }; 18 | 19 | #[instrument(name = "get_welcome_notification", skip(state))] 20 | pub async fn handler( 21 | State(state): State>, 22 | AuthedProjectId(project_id, _): AuthedProjectId, 23 | ) -> Result { 24 | if let Some(redis) = state.redis.as_ref() { 25 | get_welcome_notification_rate_limit(redis, &project_id, &state.clock).await?; 26 | } 27 | 28 | let project = get_project_by_project_id(project_id, &state.postgres, state.metrics.as_ref()) 29 | .await 30 | .map_err(|e| match e { 31 | sqlx::Error::RowNotFound => { 32 | NotifyServerError::UnprocessableEntity("Project not found".into()) 33 | } 34 | e => e.into(), 35 | })?; 36 | 37 | // TODO this should lookup by project_id not project UUID, but database can't differentiate between a missing project and a missing welcome notificvation? 38 | let welcome_notification = 39 | get_welcome_notification(project.id, &state.postgres, state.metrics.as_ref()).await?; 40 | 41 | Ok(Json(welcome_notification).into_response()) 42 | } 43 | 44 | pub async fn get_welcome_notification_rate_limit( 45 | redis: &Arc, 46 | project_id: &ProjectId, 47 | clock: &Clock, 48 | ) -> Result<(), RateLimitError> { 49 | rate_limit::token_bucket( 50 | redis, 51 | format!("get_welcome_notification-{project_id}"), 52 | 100, 53 | chrono::Duration::minutes(1), 54 | 1, 55 | clock, 56 | ) 57 | .await 58 | } 59 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/relay_incoming_message_server_errors.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Msg In Server Errors', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .setAlert(vars.environment, grafana.alert.new( 16 | namespace = vars.namespace, 17 | name = '%(env)s - Failed to process incoming relay message' % { env: vars.environment }, 18 | message = '%(env)s - Failed to process incoming relay message' % { env: vars.environment }, 19 | notifications = vars.notifications, 20 | noDataState = 'no_data', 21 | period = '0m', 22 | conditions = [ 23 | grafana.alertCondition.new( 24 | evaluatorParams = [ 100 ], 25 | evaluatorType = 'gt', 26 | operatorType = 'or', 27 | queryRefId = 'RelayIncomingMessagesServerErrorsTotal', 28 | queryTimeStart = '5m', 29 | queryTimeEnd = 'now', 30 | reducerType = grafana.alert_reducers.Avg 31 | ), 32 | ], 33 | )) 34 | 35 | .addTarget(targets.prometheus( 36 | datasource = ds.prometheus, 37 | expr = 'sum by (aws_ecs_task_revision, tag) (increase(relay_incoming_messages_total{status="server_error"}[$__rate_interval]))', 38 | legendFormat = '{{tag}} r{{aws_ecs_task_revision}}', 39 | exemplar = true, 40 | refId = 'RelayIncomingMessagesServerErrors', 41 | )) 42 | 43 | .addTarget(targets.prometheus( 44 | datasource = ds.prometheus, 45 | expr = 'sum(increase(relay_incoming_messages_total{status="server_error"}[$__rate_interval]))', 46 | legendFormat = 'r{{aws_ecs_task_revision}}', 47 | exemplar = true, 48 | refId = 'RelayIncomingMessagesServerErrorsTotal', 49 | )) 50 | } 51 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/relay_incoming_message_rate.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Msg In Rate', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('cps') 16 | ) 17 | 18 | .setAlert(vars.environment, grafana.alert.new( 19 | namespace = vars.namespace, 20 | name = '%(env)s - Not receiving any watch subscriptions requests' % { env: vars.environment }, 21 | message = '%(env)s - Not receiving any watch subscriptions requests' % { env: vars.environment }, 22 | notifications = vars.notifications, 23 | noDataState = 'no_data', 24 | period = '30m', 25 | conditions = [ 26 | grafana.alertCondition.new( 27 | evaluatorParams = [ 1 ], 28 | evaluatorType = 'lt', 29 | operatorType = 'or', 30 | queryRefId = 'RelayIncomingWatchSubscriptionsRate', 31 | queryTimeStart = '5m', 32 | queryTimeEnd = 'now', 33 | reducerType = grafana.alert_reducers.Avg 34 | ), 35 | ], 36 | )) 37 | 38 | .addTarget(targets.prometheus( 39 | datasource = ds.prometheus, 40 | expr = 'sum by (aws_ecs_task_revision, tag) (rate(relay_incoming_messages_total[$__rate_interval]))', 41 | legendFormat = '{{tag}} r{{aws_ecs_task_revision}}', 42 | exemplar = true, 43 | refId = 'RelayIncomingMessagesRate', 44 | )) 45 | 46 | .addTarget(targets.prometheus( 47 | datasource = ds.prometheus, 48 | expr = 'sum(increase(relay_incoming_messages_total{tag="4010"}[$__rate_interval]))', 49 | legendFormat = '{{tag}} r{{aws_ecs_task_revision}}', 50 | exemplar = true, 51 | refId = 'RelayIncomingWatchSubscriptionsRate', 52 | hide = true, 53 | )) 54 | } 55 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/relay_outgoing_message_failures.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Msg Out Publish Errors', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .setAlert(vars.environment, grafana.alert.new( 16 | namespace = vars.namespace, 17 | name = '%(env)s - Failed to publish to relay' % { env: vars.environment }, 18 | message = '%(env)s - Failed to publish message to relay' % { env: vars.environment }, 19 | notifications = vars.notifications, 20 | noDataState = 'no_data', 21 | period = '0m', 22 | conditions = [ 23 | grafana.alertCondition.new( 24 | evaluatorParams = [ 0 ], 25 | evaluatorType = 'gt', 26 | operatorType = 'or', 27 | queryRefId = 'RelayOutgoingMessagePermanentFailures', 28 | queryTimeStart = '5m', 29 | queryTimeEnd = 'now', 30 | reducerType = grafana.alert_reducers.Avg 31 | ), 32 | ], 33 | )) 34 | 35 | .addTarget(targets.prometheus( 36 | datasource = ds.prometheus, 37 | expr = 'sum by (aws_ecs_task_revision) (increase(relay_outgoing_message_failures_total{is_permanent="true"}[$__rate_interval]))', 38 | legendFormat = 'Permanent r{{aws_ecs_task_revision}}', 39 | exemplar = true, 40 | refId = 'RelayOutgoingMessagePermanentFailures', 41 | )) 42 | 43 | .addTarget(targets.prometheus( 44 | datasource = ds.prometheus, 45 | expr = 'sum by (aws_ecs_task_revision) (increase(relay_outgoing_message_failures_total{is_permanent="false"}[$__rate_interval]))', 46 | legendFormat = 'Temporary r{{aws_ecs_task_revision}}', 47 | exemplar = true, 48 | refId = 'RelayOutgoingMessageTermporaryFailures', 49 | )) 50 | } 51 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/relay_batch_subscribe_failures.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'Relay Batch Subscribe Errors', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure(defaults.configuration.timeseries) 14 | 15 | .setAlert(vars.environment, grafana.alert.new( 16 | namespace = vars.namespace, 17 | name = '%(env)s - Failed to batch subscribe to relay topic' % { env: vars.environment }, 18 | message = '%(env)s - Failed to batch subscribe to relay topic' % { env: vars.environment }, 19 | notifications = vars.notifications, 20 | noDataState = 'no_data', 21 | period = '0m', 22 | conditions = [ 23 | grafana.alertCondition.new( 24 | evaluatorParams = [ 0 ], 25 | evaluatorType = 'gt', 26 | operatorType = 'or', 27 | queryRefId = 'RelayBatchSubscribePermanentFailures', 28 | queryTimeStart = '5m', 29 | queryTimeEnd = 'now', 30 | reducerType = grafana.alert_reducers.Avg 31 | ), 32 | ], 33 | )) 34 | 35 | .addTarget(targets.prometheus( 36 | datasource = ds.prometheus, 37 | expr = 'sum by (aws_ecs_task_revision) (increase(relay_batch_subscribe_failures_total{is_permanent="true"}[$__rate_interval]))', 38 | legendFormat = 'Permanent r{{aws_ecs_task_revision}}', 39 | exemplar = true, 40 | refId = 'RelayBatchSubscribePermanentFailures', 41 | )) 42 | 43 | .addTarget(targets.prometheus( 44 | datasource = ds.prometheus, 45 | expr = 'sum by (aws_ecs_task_revision) (increase(relay_batch_subscribe_failures_total{is_permanent="false"}[$__rate_interval]))', 46 | legendFormat = 'Temporary r{{aws_ecs_task_revision}}', 47 | exemplar = true, 48 | refId = 'RelayBatchSubscribeTemporaryFailures', 49 | )) 50 | } 51 | -------------------------------------------------------------------------------- /src/config/deployed/networking.rs: -------------------------------------------------------------------------------- 1 | use {ipnet::IpNet, std::net::IpAddr}; 2 | 3 | #[derive(thiserror::Error, Debug)] 4 | pub enum NetworkInterfaceError { 5 | #[error("machine has no public IP address")] 6 | PublicAddressNotFound, 7 | #[error("machine has multiple public IP addresses")] 8 | MultiplePublicAddresses, 9 | } 10 | 11 | /// Attempts to find the public IP address of this machine. 12 | pub fn find_public_ip_addr() -> Result { 13 | let addrs: Vec<_> = pnet_datalink::interfaces() 14 | .into_iter() 15 | .flat_map(|iface| { 16 | iface 17 | .ips 18 | .into_iter() 19 | .filter(|ip| ip.is_ipv4() && is_public_ip_addr(ip.ip())) 20 | .map(|ip| ip.ip()) 21 | }) 22 | .collect(); 23 | 24 | if addrs.is_empty() { 25 | Err(NetworkInterfaceError::PublicAddressNotFound) 26 | } else if addrs.len() > 1 { 27 | Err(NetworkInterfaceError::MultiplePublicAddresses) 28 | } else { 29 | Ok(addrs[0]) 30 | } 31 | } 32 | 33 | fn is_public_ip_addr(addr: IpAddr) -> bool { 34 | use once_cell::sync::Lazy; 35 | 36 | static RESERVED_NETWORKS: Lazy<[IpNet; 24]> = Lazy::new(|| { 37 | [ 38 | "0.0.0.0/8", 39 | "0.0.0.0/32", 40 | "100.64.0.0/10", 41 | "127.0.0.0/8", 42 | "169.254.0.0/16", 43 | "172.16.0.0/12", 44 | "192.0.0.0/24", 45 | "192.0.0.0/29", 46 | "192.0.0.8/32", 47 | "192.0.0.9/32", 48 | "192.0.0.10/32", 49 | "192.0.0.170/32", 50 | "192.0.0.171/32", 51 | "192.0.2.0/24", 52 | "192.31.196.0/24", 53 | "192.52.193.0/24", 54 | "192.88.99.0/24", 55 | "192.168.0.0/16", 56 | "192.175.48.0/24", 57 | "198.18.0.0/15", 58 | "198.51.100.0/24", 59 | "203.0.113.0/24", 60 | "240.0.0.0/4", 61 | "255.255.255.255/32", 62 | ] 63 | .map(|net| net.parse().unwrap()) 64 | }); 65 | 66 | RESERVED_NETWORKS.iter().all(|range| !range.contains(&addr)) 67 | } 68 | -------------------------------------------------------------------------------- /terraform/postgres/variables.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------- 2 | # Database configuration 3 | 4 | variable "db_name" { 5 | description = "The name of the default database in the cluster" 6 | type = string 7 | default = "postgres" 8 | } 9 | 10 | variable "db_master_username" { 11 | description = "The username for the master DB user" 12 | type = string 13 | default = "pgadmin" 14 | } 15 | 16 | variable "db_master_password" { 17 | description = "The password for the master DB user" 18 | type = string 19 | default = "" 20 | } 21 | 22 | #------------------------------------------------------------------------------- 23 | # Capacity 24 | 25 | variable "instances" { 26 | description = "The number of database instances to create" 27 | type = number 28 | default = 1 29 | } 30 | 31 | variable "min_capacity" { 32 | description = "The minimum capacity for the Aurora cluster (in Aurora Capacity Units)" 33 | type = number 34 | default = 2 35 | } 36 | 37 | variable "max_capacity" { 38 | description = "The maximum capacity for the Aurora cluster (in Aurora Capacity Units)" 39 | type = number 40 | default = 20 41 | } 42 | 43 | #------------------------------------------------------------------------------- 44 | # Logs 45 | 46 | variable "cloudwatch_logs_key_arn" { 47 | description = "The ARN of the KMS key to use for encrypting CloudWatch logs" 48 | type = string 49 | } 50 | 51 | variable "cloudwatch_retention_in_days" { 52 | description = "The number of days to retain CloudWatch logs for the DB instance" 53 | type = number 54 | default = 14 55 | } 56 | 57 | #------------------------------------------------------------------------------- 58 | # Networking 59 | 60 | variable "vpc_id" { 61 | description = "The VPC ID to create the security group in" 62 | type = string 63 | } 64 | 65 | variable "subnet_ids" { 66 | description = "The IDs of the subnets to deploy to" 67 | type = list(string) 68 | } 69 | 70 | variable "ingress_cidr_blocks" { 71 | description = "The CIDR blocks to allow ingress from" 72 | type = list(string) 73 | } 74 | -------------------------------------------------------------------------------- /src/notify_keys.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::NotifyServerError, 4 | utils::{get_client_id, topic_from_key}, 5 | }, 6 | rand_chacha::{ 7 | rand_core::{RngCore, SeedableRng}, 8 | ChaCha20Rng, 9 | }, 10 | relay_rpc::{ 11 | auth::ed25519_dalek::{SigningKey, VerifyingKey}, 12 | domain::{DecodedClientId, Topic}, 13 | }, 14 | url::Url, 15 | }; 16 | 17 | pub struct NotifyKeys { 18 | pub domain: String, 19 | pub key_agreement_secret: x25519_dalek::StaticSecret, 20 | pub key_agreement_public: x25519_dalek::PublicKey, 21 | pub key_agreement_topic: Topic, 22 | pub authentication_secret: SigningKey, 23 | pub authentication_public: VerifyingKey, 24 | pub authentication_client_id: DecodedClientId, 25 | } 26 | 27 | impl NotifyKeys { 28 | pub fn new(notify_url: &Url, keypair_seed: [u8; 32]) -> Result { 29 | let domain = notify_url 30 | .host_str() 31 | .ok_or(NotifyServerError::UrlMissingHost)? 32 | .to_owned(); 33 | 34 | // Use specific RNG instead of StdRng because StdRng can change implementations 35 | // between releases 36 | let get_rng = || ChaCha20Rng::from_seed(keypair_seed); 37 | 38 | let key_agreement_secret = x25519_dalek::StaticSecret::from({ 39 | let mut key_agreement_secret: [u8; 32] = [0; 32]; 40 | get_rng().fill_bytes(&mut key_agreement_secret); 41 | key_agreement_secret 42 | }); 43 | let key_agreement_public = x25519_dalek::PublicKey::from(&key_agreement_secret); 44 | 45 | let authentication_secret = SigningKey::generate(&mut get_rng()); 46 | let authentication_public = VerifyingKey::from(&authentication_secret); 47 | let authentication_client_id = get_client_id(&authentication_public); 48 | 49 | Ok(Self { 50 | domain, 51 | key_agreement_secret, 52 | key_agreement_public, 53 | key_agreement_topic: topic_from_key(key_agreement_public.as_bytes()), 54 | authentication_secret, 55 | authentication_public, 56 | authentication_client_id, 57 | }) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/services/public_http_server/handlers/post_welcome_notification.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::NotifyServerError, 4 | model::helpers::{ 5 | get_project_by_project_id, set_welcome_notification, WelcomeNotification, 6 | }, 7 | rate_limit::{self, Clock, RateLimitError}, 8 | registry::{extractor::AuthedProjectId, storage::redis::Redis}, 9 | state::AppState, 10 | }, 11 | axum::{ 12 | extract::State, 13 | http::StatusCode, 14 | response::{IntoResponse, Response}, 15 | Json, 16 | }, 17 | relay_rpc::domain::ProjectId, 18 | std::sync::Arc, 19 | tracing::instrument, 20 | }; 21 | 22 | #[instrument(name = "post_welcome_notification", skip(state))] 23 | pub async fn handler( 24 | State(state): State>, 25 | AuthedProjectId(project_id, _): AuthedProjectId, 26 | Json(welcome_notification): Json, 27 | ) -> Result { 28 | if let Some(redis) = state.redis.as_ref() { 29 | post_welcome_notification_rate_limit(redis, &project_id, &state.clock).await?; 30 | } 31 | 32 | // TODO combine queries for performance 33 | let project = get_project_by_project_id(project_id, &state.postgres, state.metrics.as_ref()) 34 | .await 35 | .map_err(|e| match e { 36 | sqlx::Error::RowNotFound => { 37 | NotifyServerError::UnprocessableEntity("Project not found".into()) 38 | } 39 | e => e.into(), 40 | })?; 41 | 42 | set_welcome_notification( 43 | project.id, 44 | welcome_notification, 45 | &state.postgres, 46 | state.metrics.as_ref(), 47 | ) 48 | .await?; 49 | 50 | Ok(StatusCode::NO_CONTENT.into_response()) 51 | } 52 | 53 | pub async fn post_welcome_notification_rate_limit( 54 | redis: &Arc, 55 | project_id: &ProjectId, 56 | clock: &Clock, 57 | ) -> Result<(), RateLimitError> { 58 | rate_limit::token_bucket( 59 | redis, 60 | format!("post_welcome_notification-{project_id}"), 61 | 100, 62 | chrono::Duration::minutes(1), 63 | 1, 64 | clock, 65 | ) 66 | .await 67 | } 68 | -------------------------------------------------------------------------------- /src/services/public_http_server/handlers/notify_v0.rs: -------------------------------------------------------------------------------- 1 | use { 2 | super::notify_v1, 3 | crate::{ 4 | error::NotifyServerError, model::types::AccountId, registry::extractor::AuthedProjectId, 5 | state::AppState, types::Notification, 6 | }, 7 | axum::{ 8 | extract::State, 9 | http::StatusCode, 10 | response::{IntoResponse, Response}, 11 | Json, 12 | }, 13 | serde::{Deserialize, Serialize}, 14 | std::{collections::HashSet, sync::Arc}, 15 | tracing::instrument, 16 | }; 17 | 18 | #[derive(Debug, Serialize, Deserialize, Clone)] 19 | pub struct NotifyBody { 20 | #[serde(default)] 21 | pub notification_id: Option, 22 | pub notification: Notification, 23 | pub accounts: Vec, 24 | } 25 | 26 | #[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Debug)] 27 | pub struct SendFailure { 28 | pub account: AccountId, 29 | pub reason: String, 30 | } 31 | 32 | #[derive(Serialize, Deserialize, Debug)] 33 | pub struct ResponseBody { 34 | pub sent: HashSet, 35 | pub failed: HashSet, 36 | pub not_found: HashSet, 37 | } 38 | 39 | #[instrument(name = "notify_v0", skip_all)] 40 | pub async fn handler( 41 | state: State>, 42 | authed_project_id: AuthedProjectId, 43 | Json(notify_args): Json, 44 | ) -> Result { 45 | let response = notify_v1::handler_impl( 46 | state, 47 | authed_project_id, 48 | Json(vec![notify_v1::NotifyBodyNotification { 49 | notification_id: notify_args.notification_id, 50 | notification: notify_args.notification, 51 | accounts: notify_args.accounts, 52 | }]), 53 | ) 54 | .await?; 55 | 56 | let response = ResponseBody { 57 | sent: response.sent, 58 | failed: response 59 | .failed 60 | .into_iter() 61 | .map(|failure| SendFailure { 62 | account: failure.account, 63 | reason: failure.reason, 64 | }) 65 | .collect(), 66 | not_found: response.not_found, 67 | }; 68 | 69 | Ok((StatusCode::OK, Json(response)).into_response()) 70 | } 71 | -------------------------------------------------------------------------------- /src/notify_message.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | auth::{add_ttl, sign_jwt, DidWeb, SignJwtError}, 4 | model::types::AccountId, 5 | spec::{NOTIFY_MESSAGE_ACT, NOTIFY_MESSAGE_TTL}, 6 | }, 7 | chrono::Utc, 8 | relay_rpc::{auth::ed25519_dalek::SigningKey, domain::DecodedClientId}, 9 | serde::{Deserialize, Serialize}, 10 | sqlx::prelude::FromRow, 11 | std::sync::Arc, 12 | uuid::Uuid, 13 | }; 14 | 15 | pub struct ProjectSigningDetails { 16 | pub decoded_client_id: DecodedClientId, 17 | pub private_key: SigningKey, 18 | pub app: DidWeb, 19 | } 20 | 21 | pub fn sign_message( 22 | msg: Arc, 23 | account: AccountId, 24 | ProjectSigningDetails { 25 | decoded_client_id, 26 | private_key, 27 | app, 28 | }: &ProjectSigningDetails, 29 | ) -> Result { 30 | let now = Utc::now(); 31 | let message = NotifyMessage { 32 | iat: now.timestamp(), 33 | exp: add_ttl(now, NOTIFY_MESSAGE_TTL).timestamp(), 34 | iss: decoded_client_id.to_did_key(), 35 | // no `aud` because any client can receive this message 36 | act: NOTIFY_MESSAGE_ACT.to_owned(), 37 | sub: account.to_did_pkh(), 38 | app: app.clone(), 39 | msg, 40 | }; 41 | 42 | sign_jwt(message, private_key) 43 | } 44 | 45 | #[derive(Serialize, Deserialize, Debug)] 46 | pub struct NotifyMessage { 47 | pub iat: i64, // issued at 48 | pub exp: i64, // expiry 49 | // TODO: This was changed from notify pubkey, should be confirmed if we want to keep this 50 | pub iss: String, // dapps identity key 51 | pub act: String, // action intent (must be "notify_message") 52 | pub sub: String, // did:pkh of blockchain account 53 | pub app: DidWeb, // dapp domain url 54 | pub msg: Arc, // message 55 | } 56 | 57 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, FromRow)] 58 | pub struct JwtNotification { 59 | pub id: Uuid, 60 | pub sent_at: i64, 61 | pub r#type: Uuid, 62 | pub title: String, 63 | pub body: String, 64 | pub icon: String, 65 | pub url: String, 66 | pub is_read: bool, 67 | } 68 | -------------------------------------------------------------------------------- /src/config/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{ 3 | error::NotifyServerError, rate_limit::Clock, registry::storage::redis::Addr as RedisAddr, 4 | }, 5 | relay_rpc::domain::ProjectId, 6 | std::{env, net::IpAddr}, 7 | url::Url, 8 | }; 9 | 10 | mod deployed; 11 | mod local; 12 | 13 | #[derive(Debug, Clone)] 14 | pub struct Configuration { 15 | pub public_ip: IpAddr, 16 | pub bind_ip: IpAddr, 17 | pub port: u16, 18 | pub log_level: String, 19 | pub postgres_url: String, 20 | pub postgres_max_connections: u32, 21 | pub keypair_seed: String, 22 | pub project_id: ProjectId, 23 | pub blockchain_api_endpoint: Option, 24 | /// Relay URL e.g. https://relay.walletconnect.com 25 | pub relay_url: Url, 26 | pub relay_public_key: String, 27 | /// General external URL for where the Notify Server is listening on 28 | pub notify_url: Url, 29 | /// External URL for relay to deliver webhooks too 30 | pub webhook_notify_url: Url, 31 | 32 | pub registry_url: Url, 33 | pub registry_auth_token: String, 34 | 35 | pub auth_redis_addr_read: Option, 36 | pub auth_redis_addr_write: Option, 37 | pub redis_pool_size: u32, 38 | 39 | // TELEMETRY 40 | pub telemetry_prometheus_port: Option, 41 | 42 | // AWS 43 | pub s3_endpoint: Option, 44 | 45 | // GeoIP 46 | pub geoip_db_bucket: Option, 47 | pub geoip_db_key: Option, 48 | 49 | // GeoBlocking 50 | pub blocked_countries: Vec, 51 | 52 | // Analytics 53 | pub analytics_export_bucket: Option, 54 | 55 | pub clock: Clock, 56 | } 57 | 58 | impl Configuration { 59 | pub fn auth_redis_addr(&self) -> Option { 60 | match (&self.auth_redis_addr_read, &self.auth_redis_addr_write) { 61 | (None, None) => None, 62 | (addr_read, addr_write) => Some(RedisAddr::from((addr_read, addr_write))), 63 | } 64 | } 65 | } 66 | 67 | pub async fn get_configuration() -> Result { 68 | if env::var("ENVIRONMENT") == Ok("DEPLOYED".to_owned()) { 69 | deployed::get_configuration() 70 | } else { 71 | local::get_configuration().await 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /terraform/ecs/cluster_autoscaling.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | autoscaling_min_capacity = module.this.stage == "prod" ? var.autoscaling_min_capacity : 1 3 | } 4 | 5 | resource "aws_appautoscaling_target" "ecs_target" { 6 | min_capacity = local.autoscaling_min_capacity 7 | max_capacity = var.autoscaling_max_capacity 8 | resource_id = "service/${aws_ecs_cluster.app_cluster.name}/${aws_ecs_service.app_service.name}" 9 | scalable_dimension = "ecs:service:DesiredCount" 10 | service_namespace = "ecs" 11 | } 12 | 13 | resource "aws_appautoscaling_policy" "ecs_target_cpu" { 14 | name = "${module.this.id}-scaling-policy-cpu" 15 | policy_type = "TargetTrackingScaling" 16 | resource_id = aws_appautoscaling_target.ecs_target.resource_id 17 | scalable_dimension = aws_appautoscaling_target.ecs_target.scalable_dimension 18 | service_namespace = aws_appautoscaling_target.ecs_target.service_namespace 19 | 20 | target_tracking_scaling_policy_configuration { 21 | predefined_metric_specification { 22 | predefined_metric_type = "ECSServiceAverageCPUUtilization" 23 | } 24 | target_value = var.autoscaling_cpu_target 25 | scale_in_cooldown = var.autoscaling_cpu_scale_in_cooldown 26 | scale_out_cooldown = var.autoscaling_cpu_scale_out_cooldown 27 | } 28 | depends_on = [aws_appautoscaling_target.ecs_target] 29 | } 30 | 31 | resource "aws_appautoscaling_policy" "ecs_target_memory" { 32 | name = "${module.this.id}-scaling-policy-memory" 33 | policy_type = "TargetTrackingScaling" 34 | resource_id = aws_appautoscaling_target.ecs_target.resource_id 35 | scalable_dimension = aws_appautoscaling_target.ecs_target.scalable_dimension 36 | service_namespace = aws_appautoscaling_target.ecs_target.service_namespace 37 | 38 | target_tracking_scaling_policy_configuration { 39 | predefined_metric_specification { 40 | predefined_metric_type = "ECSServiceAverageMemoryUtilization" 41 | } 42 | target_value = var.autoscaling_memory_target 43 | scale_in_cooldown = var.autoscaling_memory_scale_in_cooldown 44 | scale_out_cooldown = var.autoscaling_memory_scale_out_cooldown 45 | } 46 | depends_on = [aws_appautoscaling_target.ecs_target] 47 | } 48 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Notify Server 2 | 3 | 4 | [Notify Server Specs](https://docs.walletconnect.com/2.0/specs/servers/notify/notify-server-api) 5 | 6 | [Current documentation](https://docs.walletconnect.com/2.0/specs/servers/notify/notify-server-api) 7 | 8 | 9 | 10 | ## Development 11 | 12 | ### Dependencies 13 | 14 | - Rust 15 | - just 16 | - docker 17 | 18 | ### Devloop 19 | 20 | Runs all tests, integration tests, and deployment tests automatically. 21 | 22 | ```bash 23 | just devloop 24 | ``` 25 | 26 | ### Integration tests 27 | 28 | #### Login to ECR for IRN images 29 | 30 | - Get read-only AWS access tokens for Main AWS account. 31 | - Run `./ops/ecr_login.sh` 32 | 33 | ```bash 34 | just run-storage-docker test-integration 35 | ``` 36 | 37 | Run a specific test: 38 | 39 | ```bash 40 | just test=test_one_project test-integration 41 | ``` 42 | 43 | ```bash 44 | just stop-storage-docker 45 | ``` 46 | 47 | ### Deployment tests 48 | 49 | ```bash 50 | cp .env.example .env 51 | nano .env 52 | ``` 53 | 54 | Note: `source .env` is unnecessary because justfile uses `set dotenv-load` 55 | 56 | ```bash 57 | just run-storage-docker unit run 58 | 59 | # With integration tests 60 | just unit run-storage-docker test-integration stop-storage-docker run-storage-docker run 61 | ``` 62 | 63 | ```bash 64 | just test-deployment 65 | ``` 66 | 67 | ```bash 68 | just stop-storage-docker 69 | ``` 70 | 71 | ## Terraform dev deployment 72 | 73 | Make sure you provide some secrets: 74 | 75 | ```bash 76 | cp .env.terraform.example .env.terraform 77 | nano .env.terraform 78 | ``` 79 | 80 | You may need to initialize submodules and Terraform: 81 | 82 | ```bash 83 | git submodule update --init --recursive 84 | terraform login 85 | terraform -chdir=terraform init 86 | ``` 87 | 88 | To deploy 89 | 90 | ```bash 91 | source .env.terraform 92 | ./terraform/deploy-dev.sh 93 | ``` 94 | 95 | If amd64 builds are too slow on your Mac (likely), consider using a remote builder on a linux/amd64 host: 96 | 97 | ```bash 98 | docker buildx create --name=remote-amd64 --driver=docker-container ssh:// 99 | BUILD_ARGS="--builder=remote-amd64 --load" ./terraform/deploy-dev.sh 100 | ``` 101 | -------------------------------------------------------------------------------- /terraform/docdb/network.tf: -------------------------------------------------------------------------------- 1 | resource "aws_docdb_subnet_group" "db_subnets" { 2 | name = module.this.id 3 | description = "Subnet group for the ${module.this.id} DocumentDB cluster" 4 | subnet_ids = var.subnet_ids 5 | } 6 | 7 | resource "aws_security_group" "db_security_group" { 8 | name = module.this.id 9 | description = "Security Group for the ${module.this.id} DocumentDB cluster" 10 | vpc_id = var.vpc_id 11 | } 12 | 13 | resource "aws_security_group_rule" "ingress_from_self" { 14 | count = var.allow_ingress_from_self ? 1 : 0 15 | security_group_id = aws_security_group.db_security_group.id 16 | description = "Allow traffic within the security group" 17 | type = "ingress" 18 | from_port = var.db_port 19 | to_port = var.db_port 20 | protocol = "TCP" 21 | self = true 22 | } 23 | 24 | resource "aws_security_group_rule" "ingress_security_groups" { 25 | count = length(var.allowed_security_groups) 26 | security_group_id = aws_security_group.db_security_group.id 27 | description = "Allow inbound traffic from existing Security Groups" 28 | type = "ingress" 29 | from_port = var.db_port 30 | to_port = var.db_port 31 | protocol = "TCP" 32 | source_security_group_id = element(var.allowed_security_groups, count.index) 33 | } 34 | 35 | resource "aws_security_group_rule" "ingress_cidr_blocks" { 36 | count = length(var.allowed_cidr_blocks) 37 | security_group_id = aws_security_group.db_security_group.id 38 | description = "Allow inbound traffic from CIDR blocks" 39 | type = "ingress" 40 | from_port = var.db_port 41 | to_port = var.db_port 42 | protocol = "TCP" 43 | cidr_blocks = var.allowed_cidr_blocks 44 | } 45 | 46 | resource "aws_security_group_rule" "egress" { 47 | security_group_id = aws_security_group.db_security_group.id 48 | description = "Allow outbound traffic from CIDR blocks" 49 | type = "egress" 50 | from_port = var.egress_from_port 51 | to_port = var.egress_to_port 52 | protocol = var.egress_protocol 53 | cidr_blocks = var.allowed_egress_cidr_blocks 54 | } 55 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug.yml: -------------------------------------------------------------------------------- 1 | name: Bug Report 2 | description: File a bug report 3 | title: "bug: " 4 | labels: 5 | - bug 6 | body: 7 | - type: markdown 8 | attributes: 9 | value: | 10 | Thanks for taking the time to fill out this bug report! 🐛 11 | - type: checkboxes 12 | attributes: 13 | label: Is there an existing issue for this? 14 | description: Please search to see if an issue already exists for the bug you encountered. 15 | options: 16 | - label: I have searched the existing issues 17 | required: true 18 | - type: textarea 19 | attributes: 20 | label: Current Behavior 21 | description: A concise description of what you're experiencing. 22 | validations: 23 | required: true 24 | - type: textarea 25 | attributes: 26 | label: Expected Behavior 27 | description: A concise description of what you expected to happen. 28 | validations: 29 | required: true 30 | - type: textarea 31 | attributes: 32 | label: Steps To Reproduce 33 | description: Steps to reproduce the behavior. 34 | placeholder: | 35 | 1. In this environment... 36 | 2. With this config... 37 | 3. Run '...' 38 | 4. See error... 39 | validations: 40 | required: true 41 | - type: textarea 42 | attributes: 43 | label: Environment 44 | description: | 45 | examples: 46 | - **OS**: MacOS Monterey 12.5 47 | - **rustc**: rustc 1.62.1 (e092d0b6b 2022-07-16) 48 | - **cargo**: cargo 1.62.1 (a748cf5a3 2022-06-08) 49 | 50 | > **Note** 51 | > If using docker image please provide docker version and the image's tag 52 | value: | 53 | - OS: 54 | - rustc: 55 | - cargo: 56 | render: markdown 57 | validations: 58 | required: false 59 | - type: textarea 60 | id: logs 61 | attributes: 62 | label: Relevant log output 63 | description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. 64 | render: shell 65 | validations: 66 | required: false 67 | - type: textarea 68 | attributes: 69 | label: Anything else? 70 | description: | 71 | Links? References? Anything that will give us more context about the issue you are encountering! 72 | 73 | Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. 74 | validations: 75 | required: false 76 | -------------------------------------------------------------------------------- /migrations/ERD.md: -------------------------------------------------------------------------------- 1 | # ERD 2 | 3 | ```mermaid 4 | erDiagram 5 | project { 6 | uuid id PK 7 | string project_id 8 | string app_domain 9 | string topic 10 | string authentication_public_key 11 | string authentication_private_key 12 | string subscribe_public_key 13 | string subscribe_private_key 14 | } 15 | 16 | subscriber { 17 | uuid id PK 18 | uuid project FK 19 | string account 20 | string sym_key 21 | string topic 22 | timestamp expiry 23 | } 24 | subscriber }o--|| project : "subscribed to" 25 | 26 | subscriber_scope { 27 | uuid id PK 28 | uuid subscriber FK 29 | string name 30 | } 31 | subscriber ||--o{ subscriber_scope : "has scope" 32 | 33 | subscription_watcher { 34 | uuid id PK 35 | string account 36 | uuid project FK "NULL is all projects" 37 | string did_key 38 | string sym_key 39 | timestamp expiry 40 | } 41 | subscription_watcher }o--o| project : "watching" 42 | 43 | notification_states { 44 | enum queued 45 | enum processing 46 | enum published 47 | enum not-subscribed 48 | enum wrong-scope 49 | enum rate-limited 50 | } 51 | notification_states }|..|{ notification_status : "uses" 52 | 53 | notification { 54 | uuid id PK 55 | timestamp created_at 56 | string type 57 | string title 58 | string body 59 | string icon 60 | string url 61 | } 62 | notification }o--|| subscriber : "sent to" 63 | 64 | notification_status { 65 | timestamp created_at 66 | timestamp updated_at 67 | enum state notification_states 68 | uuid notification_id FK 69 | uuid subscriber_id FK 70 | } 71 | notification_status }o--|| notification : "for" 72 | 73 | webhook { 74 | uuid id PK 75 | uuid project FK 76 | string url 77 | } 78 | webhook }o--|| project : "watching" 79 | 80 | webhook_type { 81 | uuid id PK 82 | uuid webhook FK 83 | enum type "subscribed, updated, unsubscribed" 84 | } 85 | webhook ||--|{ webhook_type : "has types" 86 | 87 | webhook_message { 88 | uuid id PK 89 | uuid webhook FK 90 | enum event "subscribed, updated, unsubscribed" 91 | string account 92 | timestamp created 93 | timestamp next_send 94 | } 95 | webhook_message }o--|| webhook : "send to" 96 | ``` 97 | -------------------------------------------------------------------------------- /src/analytics/relay_request.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::services::public_http_server::handlers::relay_webhook::RelayIncomingMessage, 3 | chrono::{DateTime, NaiveDateTime, Utc}, 4 | parquet_derive::ParquetRecordWriter, 5 | relay_rpc::domain::Topic, 6 | serde::Serialize, 7 | std::sync::Arc, 8 | }; 9 | 10 | pub struct RelayResponseParams { 11 | pub request: Arc, 12 | 13 | pub response_message_id: Arc, 14 | pub response_topic: Topic, 15 | pub response_tag: u32, 16 | pub response_initiated_at: DateTime, 17 | pub response_finished_at: DateTime, 18 | pub response_success: bool, 19 | } 20 | 21 | #[derive(Debug, Serialize, ParquetRecordWriter)] 22 | pub struct RelayRequest { 23 | /// Time at which the event was generated 24 | pub event_at: NaiveDateTime, 25 | 26 | /// Relay message ID of request 27 | pub request_message_id: Arc, 28 | /// Relay topic of request 29 | pub request_topic: Arc, 30 | /// Relay tag of request 31 | pub request_tag: u32, 32 | /// Time at which the request was received 33 | pub request_received_at: NaiveDateTime, 34 | 35 | /// Relay message ID of response 36 | pub response_message_id: Arc, 37 | /// Relay topic of response 38 | pub response_topic: Arc, 39 | /// Relay tag of response 40 | pub response_tag: u32, 41 | /// Time at which the publish request was initiated 42 | pub response_initiated_at: NaiveDateTime, 43 | /// Time at which the publish request stopped 44 | pub response_finished_at: NaiveDateTime, 45 | /// If the publish request was ultimatly successful or not 46 | pub response_success: bool, 47 | } 48 | 49 | impl From for RelayRequest { 50 | fn from(params: RelayResponseParams) -> Self { 51 | Self { 52 | event_at: wc::analytics::time::now(), 53 | 54 | request_message_id: params.request.message_id.clone(), 55 | request_topic: params.request.topic.value().clone(), 56 | request_tag: params.request.tag, 57 | request_received_at: params.request.received_at.naive_utc(), 58 | 59 | response_message_id: params.response_message_id.clone(), 60 | response_topic: params.response_topic.value().clone(), 61 | response_tag: params.response_tag, 62 | response_initiated_at: params.response_initiated_at.naive_utc(), 63 | response_finished_at: params.response_finished_at.naive_utc(), 64 | response_success: params.response_success, 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/services/relay_renewal_job/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::{error::NotifyServerError, metrics::Metrics}, 3 | chrono::Duration, 4 | relay_client::http::Client, 5 | relay_rpc::{auth::ed25519_dalek::SigningKey, domain::Topic}, 6 | sqlx::PgPool, 7 | std::{future::Future, sync::Arc}, 8 | tokio::{sync::Mutex, time}, 9 | tracing::{error, info, instrument}, 10 | url::Url, 11 | }; 12 | 13 | mod refresh_topic_subscriptions; 14 | mod register_webhook; 15 | 16 | pub async fn start( 17 | key_agreement_topic: Topic, 18 | webhook_notify_url: Url, 19 | keypair: SigningKey, 20 | relay_client: Arc, 21 | postgres: PgPool, 22 | metrics: Option, 23 | ) -> Result, NotifyServerError> { 24 | let period = Duration::days(1); 25 | 26 | let mut interval = time::interval(period.to_std().expect("Should be able to convert to STD")); 27 | 28 | let renew_all_topics_lock = Arc::new(Mutex::new(false)); 29 | 30 | // We must be able to run the job once on startup or we are non-functional 31 | // Call tick() now so that the first tick() inside the loop actually waits for the period 32 | interval.tick().await; 33 | job( 34 | key_agreement_topic.clone(), 35 | renew_all_topics_lock.clone(), 36 | &webhook_notify_url, 37 | &keypair, 38 | &relay_client, 39 | &postgres, 40 | metrics.as_ref(), 41 | ) 42 | .await?; 43 | 44 | Ok(async move { 45 | loop { 46 | interval.tick().await; 47 | info!("Running relay renewal job"); 48 | if let Err(e) = job( 49 | key_agreement_topic.clone(), 50 | renew_all_topics_lock.clone(), 51 | &webhook_notify_url, 52 | &keypair, 53 | &relay_client, 54 | &postgres, 55 | metrics.as_ref(), 56 | ) 57 | .await 58 | { 59 | error!("Error running relay renewal job: {e:?}"); 60 | // TODO metrics 61 | } 62 | } 63 | }) 64 | } 65 | 66 | #[instrument(skip_all)] 67 | async fn job( 68 | key_agreement_topic: Topic, 69 | renew_all_topics_lock: Arc>, 70 | webhook_notify_url: &Url, 71 | keypair: &SigningKey, 72 | relay_client: &Client, 73 | postgres: &PgPool, 74 | metrics: Option<&Metrics>, 75 | ) -> Result<(), NotifyServerError> { 76 | register_webhook::run(webhook_notify_url, keypair, relay_client).await?; 77 | refresh_topic_subscriptions::run( 78 | key_agreement_topic, 79 | renew_all_topics_lock, 80 | relay_client, 81 | postgres, 82 | metrics, 83 | ) 84 | .await?; 85 | Ok(()) 86 | } 87 | -------------------------------------------------------------------------------- /src/relay_client_helpers.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::error::NotifyServerError, 3 | relay_client::{http::Client, ConnectionOptions}, 4 | relay_rpc::{ 5 | auth::{ed25519_dalek::SigningKey, AuthToken}, 6 | domain::ProjectId, 7 | user_agent::ValidUserAgent, 8 | }, 9 | std::time::Duration, 10 | url::Url, 11 | }; 12 | 13 | pub fn create_http_client( 14 | keypair: &SigningKey, 15 | relay_url: Url, 16 | notify_url: Url, 17 | project_id: ProjectId, 18 | ) -> Result { 19 | Ok(Client::new(&create_http_connect_options( 20 | keypair, relay_url, notify_url, project_id, 21 | )?)?) 22 | } 23 | 24 | pub fn create_http_connect_options( 25 | keypair: &SigningKey, 26 | mut relay_url: Url, 27 | notify_url: Url, 28 | project_id: ProjectId, 29 | ) -> Result { 30 | // TODO remove once switched to https 31 | relay_url 32 | .set_scheme(&relay_url.scheme().replace("ws", "http")) 33 | .map_err(|_| NotifyServerError::UrlSetScheme)?; 34 | 35 | let rpc_address = relay_url.join("/rpc")?; 36 | Ok( 37 | // HTTP client cannot currently use an expiring JWT because the same relay client is used for the entire duration of the process 38 | create_connect_options(keypair, &relay_url, notify_url, project_id, None)? 39 | .with_address(rpc_address), 40 | ) 41 | } 42 | 43 | fn create_connect_options( 44 | keypair: &SigningKey, 45 | relay_url: &Url, 46 | notify_url: Url, 47 | project_id: ProjectId, 48 | ttl: Option, 49 | ) -> Result { 50 | let auth = AuthToken::new(notify_url.clone()) 51 | .aud(relay_url.origin().ascii_serialization()) 52 | .ttl(ttl) 53 | .as_jwt(keypair)?; 54 | 55 | let user_agent = relay_rpc::user_agent::UserAgent::ValidUserAgent(ValidUserAgent { 56 | protocol: relay_rpc::user_agent::Protocol { 57 | kind: relay_rpc::user_agent::ProtocolKind::WalletConnect, 58 | version: 2, 59 | }, 60 | sdk: relay_rpc::user_agent::Sdk { 61 | language: relay_rpc::user_agent::SdkLanguage::Rust, 62 | version: env!("CARGO_PKG_VERSION").to_string(), 63 | }, 64 | os: relay_rpc::user_agent::OsInfo { 65 | os_family: "ECS".into(), 66 | ua_family: None, 67 | version: None, 68 | }, 69 | id: Some(relay_rpc::user_agent::Id { 70 | environment: relay_rpc::user_agent::Environment::Unknown("Notify Server".into()), 71 | host: Some(notify_url.to_string()), 72 | }), 73 | }); 74 | 75 | Ok(ConnectionOptions::new(project_id, auth).with_user_agent(user_agent)) 76 | } 77 | -------------------------------------------------------------------------------- /src/analytics/subscriber_notification.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::model::types::AccountId, 3 | parquet_derive::ParquetRecordWriter, 4 | relay_rpc::domain::{ProjectId, Topic}, 5 | serde::Serialize, 6 | std::sync::Arc, 7 | uuid::Uuid, 8 | }; 9 | 10 | pub struct SubscriberNotificationParams { 11 | pub project_pk: Uuid, 12 | pub project_id: ProjectId, 13 | pub subscriber_pk: Uuid, 14 | pub account: AccountId, 15 | pub subscriber_notification_pk: Uuid, 16 | pub notification_pk: Uuid, 17 | pub notification_type: Uuid, 18 | pub notify_topic: Topic, 19 | pub message_id: Arc, 20 | } 21 | 22 | #[derive(Debug, Serialize, ParquetRecordWriter)] 23 | pub struct SubscriberNotification { 24 | /// Time at which the event was generated 25 | pub event_at: chrono::NaiveDateTime, 26 | /// Primary key of the project in the Notify Server database that the notification was sent from and the subscriber is subscribed to 27 | pub project_pk: String, 28 | /// Project ID of the project that the notification was sent from and the subscriber is subscribed to 29 | pub project_id: Arc, 30 | /// Primary key of the subscriber in the Notify Server database that the notificaiton is being sent to 31 | pub subscriber_pk: String, 32 | /// The CAIP-10 account of the subscriber 33 | pub account: String, 34 | /// Hash of the CAIP-10 account of the subscriber 35 | pub account_hash: String, 36 | /// The ID of the subscriber-specific notification 37 | pub subscriber_notification_pk: String, 38 | /// The ID of the notification 39 | pub notification_pk: String, 40 | /// The notification type ID 41 | pub notification_type: String, 42 | /// The topic that the notification was sent on 43 | pub notification_topic: Arc, 44 | /// Relay message ID of the notification 45 | pub message_id: Arc, 46 | } 47 | 48 | impl From for SubscriberNotification { 49 | fn from(params: SubscriberNotificationParams) -> Self { 50 | Self { 51 | event_at: wc::analytics::time::now(), 52 | project_pk: params.project_pk.to_string(), 53 | project_id: params.project_id.into_value(), 54 | subscriber_pk: params.subscriber_pk.to_string(), 55 | account: params.account.to_string(), 56 | account_hash: sha256::digest(params.account.as_ref()), 57 | subscriber_notification_pk: params.subscriber_notification_pk.to_string(), 58 | notification_pk: params.notification_pk.to_string(), 59 | notification_type: params.notification_type.to_string(), 60 | notification_topic: params.notify_topic.into_value(), 61 | message_id: params.message_id, 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/error_5xx.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local threshold = 100; // Reset to 0 after https://github.com/WalletConnect/notify-server/issues/374 is resolved 8 | 9 | local _configuration = defaults.configuration.timeseries 10 | .withSoftLimit( 11 | axisSoftMin = 0, 12 | axisSoftMax = threshold * 1.2, 13 | ) 14 | .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Dashed) 15 | .addThreshold({ 16 | color : defaults.values.colors.critical, 17 | value : threshold, 18 | }); 19 | 20 | local _alert(namespace, env, notifications) = grafana.alert.new( 21 | namespace = namespace, 22 | name = "%(env)s - 5XX alert" % { env: grafana.utils.strings.capitalize(env) }, 23 | message = '%(env)s - Notify - 5XX alert' % { env: grafana.utils.strings.capitalize(env) }, 24 | notifications = notifications, 25 | noDataState = 'no_data', 26 | period = '0m', 27 | conditions = [ 28 | grafana.alertCondition.new( 29 | evaluatorParams = [ 50 ], 30 | evaluatorType = 'gt', 31 | operatorType = 'or', 32 | queryRefId = 'ELB', 33 | queryTimeStart = '5m', 34 | queryTimeEnd = 'now', 35 | reducerType = grafana.alert_reducers.Avg 36 | ), 37 | grafana.alertCondition.new( 38 | evaluatorParams = [ threshold ], 39 | evaluatorType = 'gt', 40 | operatorType = 'or', 41 | queryRefId = 'Target', 42 | queryTimeStart = '5m', 43 | queryTimeEnd = 'now', 44 | reducerType = grafana.alert_reducers.Avg 45 | ), 46 | ], 47 | ); 48 | 49 | { 50 | new(ds, vars):: 51 | panels.timeseries( 52 | title = 'HTTP 5xx Rate', 53 | datasource = ds.cloudwatch, 54 | ) 55 | .configure(_configuration) 56 | .addPanelThreshold( 57 | op = 'gt', 58 | value = threshold, 59 | ) 60 | 61 | .setAlert( 62 | vars.environment, 63 | _alert(vars.namespace, vars.environment, vars.notifications) 64 | ) 65 | 66 | .addTarget(targets.cloudwatch( 67 | alias = 'ELB', 68 | datasource = ds.cloudwatch, 69 | namespace = 'AWS/ApplicationELB', 70 | metricName = 'HTTPCode_ELB_5XX_Count', 71 | dimensions = { 72 | LoadBalancer: vars.load_balancer 73 | }, 74 | statistic = 'Sum', 75 | refId = 'ELB', 76 | )) 77 | .addTarget(targets.cloudwatch( 78 | alias = 'Target', 79 | datasource = ds.cloudwatch, 80 | namespace = 'AWS/ApplicationELB', 81 | metricName = 'HTTPCode_Target_5XX_Count', 82 | dimensions = { 83 | LoadBalancer: vars.load_balancer 84 | }, 85 | statistic = 'Sum', 86 | refId = 'Target', 87 | )) 88 | } 89 | -------------------------------------------------------------------------------- /migrations/20231018121518_init.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE project ( 2 | id UUID PRIMARY KEY DEFAULT gen_random_uuid(), 3 | inserted_at TIMESTAMPTZ NOT NULL DEFAULT now(), 4 | updated_at TIMESTAMPTZ NOT NULL DEFAULT now(), 5 | 6 | project_id VARCHAR(255) NOT NULL UNIQUE, 7 | app_domain VARCHAR(255) NOT NULL UNIQUE, 8 | topic VARCHAR(255) NOT NULL UNIQUE, 9 | 10 | authentication_public_key VARCHAR(255) NOT NULL UNIQUE, 11 | authentication_private_key VARCHAR(255) NOT NULL UNIQUE, 12 | subscribe_public_key VARCHAR(255) NOT NULL UNIQUE, 13 | subscribe_private_key VARCHAR(255) NOT NULL UNIQUE 14 | ); 15 | CREATE INDEX projects_project_id_idx ON project (project_id); 16 | CREATE INDEX projects_app_domain_idx ON project (app_domain); 17 | CREATE INDEX projects_topic_idx ON project (topic); 18 | 19 | CREATE TABLE subscriber ( 20 | id UUID PRIMARY KEY DEFAULT gen_random_uuid(), 21 | inserted_at TIMESTAMPTZ NOT NULL DEFAULT now(), 22 | updated_at TIMESTAMPTZ NOT NULL DEFAULT now(), 23 | 24 | project UUID NOT NULL REFERENCES project (id) ON DELETE CASCADE, 25 | account VARCHAR(255) NOT NULL, 26 | sym_key VARCHAR(255) NOT NULL UNIQUE, 27 | topic VARCHAR(255) NOT NULL UNIQUE, 28 | expiry TIMESTAMPTZ NOT NULL, 29 | 30 | UNIQUE (project, account) 31 | ); 32 | CREATE INDEX subscribers_project_idx ON subscriber (project); 33 | CREATE INDEX subscribers_account_idx ON subscriber (account); 34 | CREATE INDEX subscribers_topic_idx ON subscriber (topic); 35 | CREATE INDEX subscribers_expiry_idx ON subscriber (expiry); 36 | 37 | CREATE TABLE subscriber_scope ( 38 | id UUID PRIMARY KEY DEFAULT gen_random_uuid(), 39 | inserted_at TIMESTAMPTZ NOT NULL DEFAULT now(), 40 | updated_at TIMESTAMPTZ NOT NULL DEFAULT now(), 41 | 42 | subscriber UUID NOT NULL REFERENCES subscriber (id) ON DELETE CASCADE, 43 | name VARCHAR(255) NOT NULL, 44 | 45 | UNIQUE (subscriber, name) 46 | ); 47 | CREATE INDEX subscriber_scope_subscriber_idx ON subscriber_scope (subscriber); 48 | CREATE INDEX subscriber_scope_name_idx ON subscriber_scope (name); 49 | 50 | CREATE TABLE subscription_watcher ( 51 | id UUID PRIMARY KEY DEFAULT gen_random_uuid(), 52 | inserted_at TIMESTAMPTZ NOT NULL DEFAULT now(), 53 | updated_at TIMESTAMPTZ NOT NULL DEFAULT now(), 54 | 55 | account VARCHAR(255) NOT NULL, 56 | project UUID REFERENCES project (id) ON DELETE CASCADE, 57 | did_key VARCHAR(255) NOT NULL UNIQUE, 58 | sym_key VARCHAR(255) NOT NULL, 59 | expiry TIMESTAMPTZ NOT NULL 60 | ); 61 | CREATE INDEX subscription_watcher_account_idx ON subscription_watcher (account); 62 | CREATE INDEX subscription_watcher_project_idx ON subscription_watcher (project); 63 | CREATE INDEX subscription_watcher_did_key_idx ON subscription_watcher (did_key); 64 | CREATE INDEX subscription_watcher_expiry_idx ON subscription_watcher (expiry); 65 | -------------------------------------------------------------------------------- /src/model/types/account_id/mod.rs: -------------------------------------------------------------------------------- 1 | use { 2 | self::caip10::{validate_caip_10, Caip10Error}, 3 | relay_rpc::auth::did::{combine_did_data, extract_did_data, DidError}, 4 | serde::{Deserialize, Serialize}, 5 | std::sync::Arc, 6 | }; 7 | 8 | pub mod caip10; 9 | pub mod eip155; 10 | pub mod erc55; 11 | 12 | #[derive( 13 | Debug, 14 | Hash, 15 | Clone, 16 | PartialEq, 17 | Eq, 18 | ::derive_more::Display, 19 | ::derive_more::From, 20 | ::derive_more::AsRef, 21 | )] 22 | #[doc = "A CAIP-10 account ID."] 23 | #[as_ref(forward)] 24 | pub struct AccountId(Arc); 25 | 26 | impl AccountId { 27 | pub fn value(&self) -> &Arc { 28 | &self.0 29 | } 30 | 31 | pub fn into_value(self) -> Arc { 32 | self.0 33 | } 34 | } 35 | 36 | impl TryFrom for AccountId { 37 | type Error = Caip10Error; 38 | 39 | fn try_from(s: String) -> Result { 40 | Self::try_from(s.as_ref()) 41 | } 42 | } 43 | 44 | impl TryFrom<&str> for AccountId { 45 | type Error = Caip10Error; 46 | 47 | fn try_from(s: &str) -> Result { 48 | validate_caip_10(s)?; 49 | Ok(Self(Arc::from(s))) 50 | } 51 | } 52 | 53 | impl Serialize for AccountId { 54 | fn serialize(&self, serializer: S) -> Result { 55 | serializer.serialize_str(self.as_ref()) 56 | } 57 | } 58 | 59 | impl<'a> Deserialize<'a> for AccountId { 60 | fn deserialize>(deserializer: D) -> Result { 61 | let s = String::deserialize(deserializer)?; 62 | Self::try_from(s).map_err(serde::de::Error::custom) 63 | } 64 | } 65 | 66 | #[derive(Debug, thiserror::Error)] 67 | pub enum AccountIdParseError { 68 | #[error(transparent)] 69 | Caip10(#[from] Caip10Error), 70 | 71 | #[error("DID error: {0}")] 72 | Did(#[from] DidError), 73 | } 74 | 75 | const DID_METHOD_PKH: &str = "pkh"; 76 | 77 | impl AccountId { 78 | pub fn from_did_pkh(did: &str) -> Result { 79 | extract_did_data(did, DID_METHOD_PKH) 80 | .map_err(AccountIdParseError::Did)? 81 | .try_into() 82 | .map_err(AccountIdParseError::Caip10) 83 | } 84 | 85 | pub fn to_did_pkh(&self) -> String { 86 | combine_did_data(DID_METHOD_PKH, self.as_ref()) 87 | } 88 | } 89 | 90 | #[cfg(test)] 91 | mod tests { 92 | use super::*; 93 | 94 | #[test] 95 | fn to_did_pkh() { 96 | let address = "eip155:1:0x9AfEaC202C837df470b5A145e0EfD6a574B21029"; 97 | let account_id = AccountId::try_from(address).unwrap(); 98 | assert_eq!(account_id.to_did_pkh(), format!("did:pkh:{address}")); 99 | } 100 | 101 | #[test] 102 | fn from_did_pkh() { 103 | let address = "eip155:1:0x9AfEaC202C837df470b5A145e0EfD6a574B21029"; 104 | let account_id = AccountId::from_did_pkh(&format!("did:pkh:{address}")).unwrap(); 105 | assert_eq!(account_id.as_ref(), address); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/registry/extractor.rs: -------------------------------------------------------------------------------- 1 | use { 2 | crate::state::AppState, 3 | async_trait::async_trait, 4 | axum::{ 5 | extract::{FromRequestParts, Path}, 6 | http::request::Parts, 7 | }, 8 | axum_extra::{ 9 | headers::{authorization::Bearer, Authorization}, 10 | TypedHeader, 11 | }, 12 | hyper::StatusCode, 13 | relay_rpc::domain::ProjectId, 14 | serde_json::json, 15 | std::{collections::HashMap, sync::Arc}, 16 | tracing::warn, 17 | }; 18 | 19 | /// Extracts project_id from uri and project_secret from Authorization header. 20 | /// Verifies their correctness against registry and returns AuthedProjectId 21 | /// struct. 22 | pub struct AuthedProjectId(pub ProjectId, pub String); 23 | 24 | #[async_trait] 25 | impl FromRequestParts> for AuthedProjectId { 26 | type Rejection = (StatusCode, String); 27 | 28 | async fn from_request_parts( 29 | parts: &mut Parts, 30 | state: &Arc, 31 | ) -> Result { 32 | let Path(path_args) = Path::>::from_request_parts(parts, state) 33 | .await 34 | .map_err(|_| { 35 | ( 36 | StatusCode::BAD_REQUEST, 37 | json!({ 38 | "reason": "Invalid project_id. Please make sure to include project_id in uri. " 39 | }).to_string(), 40 | ) 41 | })?; 42 | 43 | let TypedHeader(project_secret) = TypedHeader::>::from_request_parts(parts, state).await.map_err(|_| { 44 | ( 45 | StatusCode::UNAUTHORIZED, 46 | json!({ 47 | "reason": "Unauthorized. Please make sure to include project secret in Authorization header. " 48 | }).to_string(), 49 | ) 50 | })?; 51 | 52 | let project_id: ProjectId = path_args 53 | .get("project_id") 54 | .ok_or(( 55 | StatusCode::BAD_REQUEST, 56 | json!({"reason": "Missing project_id parameter".to_string()}).to_string(), 57 | ))? 58 | .to_owned() 59 | .into(); 60 | 61 | let authenticated = state 62 | .registry 63 | .is_authenticated(project_id.clone(), project_secret.token()) 64 | .await 65 | .map_err(|e| { 66 | warn!(?e, "Failed to authenticate project"); 67 | ( 68 | StatusCode::BAD_REQUEST, 69 | "Invalid data for authentication".to_string(), 70 | ) 71 | })?; 72 | 73 | if !authenticated { 74 | return Err(( 75 | StatusCode::UNAUTHORIZED, 76 | json!({ 77 | "reason": "Invalid project_secret. Please make sure to include proper project secret in Authorization header." 78 | }) 79 | .to_string(), 80 | )); 81 | }; 82 | 83 | Ok(AuthedProjectId( 84 | project_id, 85 | project_secret.token().to_string(), 86 | )) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /.github/workflows/dispatch_deploy.yml: -------------------------------------------------------------------------------- 1 | name: ⚙️ Deploy 2 | run-name: "Deploy: ${{ github.sha }} ➠ ${{ inputs.version-type }}:${{ inputs.version-tag }}${{ (!inputs.deploy-infra && !inputs.deploy-app) && ' 👀 deploy nothing' || ''}}${{ inputs.deploy-infra && ' ❱❱  infra' || '' }}${{ inputs.deploy-app && ' ❱❱  app' || '' }}" 3 | 4 | on: 5 | workflow_dispatch: 6 | inputs: 7 | deploy-infra: 8 | description: "Deploy Infra" 9 | default: true 10 | required: true 11 | type: boolean 12 | deploy-app: 13 | description: "Deploy App" 14 | default: true 15 | required: true 16 | type: boolean 17 | stage: 18 | description: 'Target Environment' 19 | type: choice 20 | options: 21 | - staging 22 | - prod 23 | default: staging 24 | required: true 25 | version-type: 26 | description: "Release Version" 27 | type: choice 28 | options: 29 | - latest 30 | - current 31 | - manual 32 | default: 'latest' 33 | required: true 34 | version-tag: 35 | description: "Release Version Tag (for manual version)" 36 | type: string 37 | default: '' 38 | 39 | concurrency: deploy 40 | 41 | permissions: 42 | contents: write 43 | checks: write 44 | id-token: write 45 | packages: write 46 | 47 | jobs: 48 | get_deployed_version: 49 | name: Lookup Version 50 | if: ${{ inputs.version-type == 'current' }} 51 | secrets: inherit 52 | uses: WalletConnect/ci_workflows/.github/workflows/release-get_deployed_version.yml@0.2.9 53 | with: 54 | task-name: ${{ vars.IMAGE_NAME }} 55 | aws-region: ${{ vars.AWS_REGION }} 56 | aws-role-arn: ${{vars.AWS_ROLE_PROD}} 57 | 58 | select_version: 59 | name: Select Version 60 | needs: [ get_deployed_version ] 61 | if: ${{ always() && !cancelled() && !failure() }} 62 | runs-on: ubuntu-latest 63 | steps: 64 | - name: Checkout repository 65 | uses: actions/checkout@v4 66 | with: 67 | fetch-depth: 15 68 | fetch-tags: true 69 | - name: Select target version 70 | id: select_version 71 | run: | 72 | if [ "${{ inputs.version-type }}" == "current" ]; then 73 | echo "version=${{ needs.get_deployed_version.outputs.version }}" >> "$GITHUB_OUTPUT" 74 | elif [ "${{ inputs.version-type }}" == "latest" ]; then 75 | echo "version=$(git tag | sort --version-sort | tail -n1)" >> "$GITHUB_OUTPUT" 76 | else 77 | echo "version=${{ inputs.version-tag }}" >> "$GITHUB_OUTPUT" 78 | fi 79 | outputs: 80 | version: ${{ steps.select_version.outputs.version }} 81 | 82 | cd: 83 | name: CD 84 | uses: ./.github/workflows/sub-cd.yml 85 | needs: [ select_version ] 86 | if: ${{ always() && !cancelled() && !failure() }} 87 | secrets: inherit 88 | with: 89 | deploy-infra: ${{ inputs.deploy-infra }} 90 | deploy-app: ${{ inputs.deploy-app }} 91 | deploy-prod: ${{ inputs.stage == 'prod' }} 92 | version: ${{ needs.select_version .outputs.version }} 93 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # Build args 4 | # 5 | ################################################################################ 6 | ARG base="rust:buster" 7 | ARG runtime="debian:buster-slim" 8 | ARG bin="notify-server" 9 | ARG version="unknown" 10 | ARG sha="unknown" 11 | ARG maintainer="WalletConnect" 12 | ARG release="" 13 | 14 | ################################################################################ 15 | # 16 | # Install cargo-chef 17 | # 18 | ################################################################################ 19 | FROM ${base} AS chef 20 | 21 | WORKDIR /app 22 | RUN cargo install cargo-chef 23 | 24 | ################################################################################ 25 | # 26 | # Generate recipe file 27 | # 28 | ################################################################################ 29 | FROM chef AS plan 30 | 31 | WORKDIR /app 32 | COPY Cargo.lock Cargo.toml ./ 33 | COPY src ./src 34 | RUN cargo chef prepare --recipe-path recipe.json 35 | 36 | ################################################################################ 37 | # 38 | # Build the binary 39 | # 40 | ################################################################################ 41 | FROM chef AS build 42 | 43 | ARG release 44 | ENV RELEASE=${release:+--release} 45 | 46 | WORKDIR /app 47 | # Cache dependancies 48 | COPY --from=plan /app/recipe.json recipe.json 49 | RUN cargo chef cook --recipe-path recipe.json ${RELEASE} 50 | # Build the local binary 51 | COPY . . 52 | RUN cargo build --bin notify-server ${RELEASE} 53 | # Certificate file required to use TLS with AWS DocumentDB. 54 | RUN wget https://s3.amazonaws.com/rds-downloads/rds-combined-ca-bundle.pem 55 | ################################################################################ 56 | # 57 | # Runtime image 58 | # 59 | ################################################################################ 60 | FROM ${runtime} AS runtime 61 | 62 | ARG bin 63 | ARG version 64 | ARG sha 65 | ARG maintainer 66 | ARG release 67 | ARG binpath=${release:+release} 68 | 69 | LABEL version=${version} 70 | LABEL sha=${sha} 71 | LABEL maintainer=${maintainer} 72 | 73 | WORKDIR /app 74 | COPY --from=build /app/target/${binpath:-debug}/notify-server /usr/local/bin/notify-server 75 | COPY --from=build /app/rds-combined-ca-bundle.pem /app/rds-combined-ca-bundle.pem 76 | RUN apt-get update \ 77 | && apt-get install -y --no-install-recommends ca-certificates libssl-dev \ 78 | && apt-get clean \ 79 | && rm -rf /var/lib/apt/lists/* 80 | 81 | USER 1001:1001 82 | ENTRYPOINT ["/usr/local/bin/notify-server"] 83 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/lb/error_4xx.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | local threshold = 80; 8 | 9 | local _configuration = defaults.configuration.timeseries 10 | .withSoftLimit( 11 | axisSoftMin = 0, 12 | axisSoftMax = 200, 13 | ); 14 | # no-op 15 | local _alert(namespace, env, notifications) = grafana.alert.new( 16 | namespace = namespace, 17 | name = "%(env)s - 4XX alert" % { env: grafana.utils.strings.capitalize(env) }, 18 | message = '%(env)s - Too many 4XX' % { env: grafana.utils.strings.capitalize(env) }, 19 | notifications = notifications, 20 | noDataState = 'no_data', 21 | conditions = [ 22 | grafana.alertCondition.new( 23 | evaluatorParams = [ threshold ], 24 | evaluatorType = 'gt', 25 | operatorType = 'or', 26 | queryRefId = 'Percent4xx', 27 | queryTimeStart = '30m', 28 | queryTimeEnd = 'now', 29 | reducerType = grafana.alert_reducers.Avg 30 | ), 31 | ], 32 | ); 33 | 34 | { 35 | new(ds, vars):: 36 | panels.timeseries( 37 | title = '4XX', 38 | datasource = ds.cloudwatch, 39 | ) 40 | .configure(_configuration) 41 | .addPanelThreshold( 42 | op = 'gt', 43 | value = threshold, 44 | ) 45 | 46 | // Cannot alert based on math expressions with legacy alerts: 47 | // https://grafana.com/docs/grafana/latest/panels-visualizations/query-transform-data/expression-queries/ 48 | // .setAlert( 49 | // vars.environment, 50 | // _alert(vars.namespace, vars.environment, vars.notifications) 51 | // ) 52 | 53 | .addTarget(targets.cloudwatch( 54 | alias = 'ELB', 55 | datasource = ds.cloudwatch, 56 | namespace = 'AWS/ApplicationELB', 57 | metricName = 'HTTPCode_ELB_4XX_Count', 58 | dimensions = { 59 | LoadBalancer: vars.load_balancer 60 | }, 61 | matchExact = true, 62 | statistic = 'Sum', 63 | refId = 'ELB', 64 | )) 65 | .addTarget(targets.cloudwatch( 66 | alias = 'Target', 67 | datasource = ds.cloudwatch, 68 | namespace = 'AWS/ApplicationELB', 69 | metricName = 'HTTPCode_Target_4XX_Count', 70 | dimensions = { 71 | LoadBalancer: vars.load_balancer 72 | }, 73 | matchExact = true, 74 | statistic = 'Sum', 75 | refId = 'Target', 76 | )) 77 | .addTarget(targets.cloudwatch( 78 | alias = 'Target2xx', 79 | datasource = ds.cloudwatch, 80 | namespace = 'AWS/ApplicationELB', 81 | metricName = 'HTTPCode_Target_2XX_Count', 82 | dimensions = { 83 | LoadBalancer: vars.load_balancer 84 | }, 85 | matchExact = true, 86 | statistic = 'Sum', 87 | refId = 'Target2xx', 88 | hide = true, 89 | )) 90 | .addTarget(targets.math( 91 | expr = '$Target / ($Target2xx + $Target) * 100', 92 | refId = "Percent4xx", 93 | hide = true, 94 | )) 95 | } 96 | -------------------------------------------------------------------------------- /terraform/monitoring/panels/app/http_request_latency.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import '../../grafonnet-lib/grafana.libsonnet'; 2 | local defaults = import '../../grafonnet-lib/defaults.libsonnet'; 3 | 4 | local panels = grafana.panels; 5 | local targets = grafana.targets; 6 | 7 | { 8 | new(ds, vars):: 9 | panels.timeseries( 10 | title = 'HTTP Req Latency', 11 | datasource = ds.prometheus, 12 | ) 13 | .configure( 14 | defaults.configuration.timeseries 15 | .withUnit('ms') 16 | ) 17 | 18 | .setAlert(vars.environment, grafana.alert.new( 19 | namespace = vars.namespace, 20 | name = '%(env)s - HTTP request latency too high' % { env: vars.environment }, 21 | message = '%(env)s - HTTP request latency too high' % { env: vars.environment }, 22 | notifications = vars.notifications, 23 | noDataState = 'no_data', 24 | conditions = [ 25 | grafana.alertCondition.new( 26 | evaluatorParams = [ 10000 ], 27 | evaluatorType = 'gt', 28 | operatorType = 'or', 29 | queryRefId = 'HttpRequestLatency', 30 | queryTimeStart = '5m', 31 | queryTimeEnd = 'now', 32 | reducerType = grafana.alert_reducers.Avg 33 | ), 34 | ], 35 | )) 36 | 37 | .setAlert(vars.environment, grafana.alert.new( 38 | namespace = vars.namespace, 39 | name = '%(env)s - HTTP (filtered) request latency too high' % { env: vars.environment }, 40 | message = '%(env)s - HTTP (filtered) request latency too high' % { env: vars.environment }, 41 | notifications = vars.notifications, 42 | noDataState = 'no_data', 43 | conditions = [ 44 | grafana.alertCondition.new( 45 | evaluatorParams = [ 2000 ], 46 | evaluatorType = 'gt', 47 | operatorType = 'or', 48 | queryRefId = 'FilteredHttpRequestLatency', 49 | queryTimeStart = '5m', 50 | queryTimeEnd = 'now', 51 | reducerType = grafana.alert_reducers.Avg 52 | ), 53 | ], 54 | )) 55 | 56 | .addTarget(targets.prometheus( 57 | datasource = ds.prometheus, 58 | expr = 'sum by (aws_ecs_task_revision, method, endpoint) (rate(http_request_latency_sum[$__rate_interval])) / sum by (aws_ecs_task_revision, method, endpoint) (rate(http_request_latency_count[$__rate_interval]))', 59 | legendFormat = '{{method}} {{endpoint}} r{{aws_ecs_task_revision}}', 60 | exemplar = false, 61 | refId = 'HttpRequestLatency', 62 | )) 63 | 64 | .addTarget(targets.prometheus( 65 | datasource = ds.prometheus, 66 | expr = 'sum by (aws_ecs_task_revision, method, endpoint) (rate(http_request_latency_sum{endpoint!~"^(/:project_id/subscribers|/v1/relay-webhook)"}[$__rate_interval])) / sum by (aws_ecs_task_revision, method, endpoint) (rate(http_request_latency_count{endpoint!="/:project_id/subscribers"}[$__rate_interval]))', 67 | legendFormat = '{{method}} {{endpoint}} r{{aws_ecs_task_revision}}', 68 | exemplar = false, 69 | refId = 'FilteredHttpRequestLatency', 70 | hide = true, 71 | )) 72 | } 73 | --------------------------------------------------------------------------------