├── .dockerignore ├── .env.example ├── .github ├── ISSUE_TEMPLATE │ ├── bug.yml │ ├── docs.yml │ └── feature.yml ├── RELEASE.md ├── actions │ └── prepare │ │ └── action.yml ├── codecov.yml ├── dependabot.yml ├── pr-title-checker-config.json ├── pull_request_template.md ├── release-please │ ├── .config.json │ └── manifest.json └── workflows │ ├── ci.yaml │ ├── cla.yml │ ├── pr-title.yaml │ ├── rc.yml │ ├── release-docker.yml │ ├── release-docs.yml │ ├── release-please.yml │ ├── release-sbom.yml │ ├── rust-docs-url.yml │ ├── scorecard.yml │ └── update-lock.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── .pre-commit-hooks.yaml ├── .yamlfix.toml ├── Brewfile.netlify ├── CHANGELOG.md ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── DOCKER_README.md ├── Dockerfile.development ├── Dockerfile.production ├── LICENSE ├── Makefile.toml ├── README.md ├── SECURITY.md ├── _typos.toml ├── cmd └── prometheus │ ├── dashboards │ ├── dashboard.yml │ └── relayer_app_metrics_dashboard.json │ ├── datasources │ └── prometheus.yml │ ├── grafana.ini │ └── prometheus.yml ├── committed.toml ├── config ├── config.example.json └── networks │ ├── arbitrum.json │ ├── bsc.json │ ├── ethereum.json │ ├── linea.json │ ├── mantle.json │ ├── optimism.json │ ├── other-l1s.json │ ├── polygon.json │ ├── scroll.json │ ├── solana.json │ ├── stellar.json │ └── zksync.json ├── docker-compose.yaml ├── docs ├── README.md ├── antora.yml ├── modules │ └── ROOT │ │ ├── nav.adoc │ │ └── pages │ │ ├── api_reference.adoc │ │ ├── index.adoc │ │ ├── quickstart.adoc │ │ ├── roadmap.adoc │ │ ├── solana.adoc │ │ └── structure.adoc ├── package-lock.json └── package.json ├── examples ├── basic-example-logging │ ├── .env.example │ ├── README.md │ ├── config │ │ └── config.json │ └── docker-compose.yaml ├── basic-example-metrics │ ├── .env.example │ ├── README.md │ ├── config │ │ └── config.json │ └── docker-compose.yaml ├── basic-example │ ├── .env.example │ ├── README.md │ ├── config │ │ └── config.json │ └── docker-compose.yaml ├── evm-turnkey-signer │ ├── README.md │ ├── config │ │ └── config.json │ └── docker-compose.yaml ├── solana-google-cloud-kms-signer │ ├── .env.example │ ├── README.md │ ├── config │ │ └── config.json │ └── docker-compose.yaml ├── solana-turnkey-signer │ ├── .env.example │ ├── README.md │ ├── config │ │ └── config.json │ └── docker-compose.yaml ├── vault-secret-signer │ ├── .env.example │ ├── README.md │ ├── config │ │ └── config.json │ └── docker-compose.yaml └── vault-transit-signer │ ├── .env.example │ ├── README.md │ ├── config │ └── config.json │ └── docker-compose.yaml ├── helpers ├── create_key.rs ├── generate_openapi.rs ├── generate_uuid.rs └── test_tx.rs ├── netlify.toml ├── plugins ├── example.ts ├── package.json ├── pnpm-lock.yaml └── tsconfig.json ├── rust-toolchain.toml ├── rustfmt.toml ├── scripts ├── docker_compose.sh └── rust_antora.sh ├── src ├── api │ ├── controllers │ │ ├── mod.rs │ │ └── relayer.rs │ ├── mod.rs │ └── routes │ │ ├── docs │ │ ├── mod.rs │ │ └── relayer_docs.rs │ │ ├── health.rs │ │ ├── metrics.rs │ │ ├── mod.rs │ │ └── relayer.rs ├── bootstrap │ ├── config_processor.rs │ ├── initialize_app_state.rs │ ├── initialize_relayers.rs │ ├── initialize_workers.rs │ └── mod.rs ├── config │ ├── config_file │ │ ├── mod.rs │ │ ├── network │ │ │ ├── collection.rs │ │ │ ├── common.rs │ │ │ ├── evm.rs │ │ │ ├── file_loading.rs │ │ │ ├── inheritance.rs │ │ │ ├── mod.rs │ │ │ ├── solana.rs │ │ │ ├── stellar.rs │ │ │ └── test_utils.rs │ │ ├── notification.rs │ │ ├── plugin.rs │ │ ├── relayer.rs │ │ └── signer │ │ │ ├── google_cloud_kms.rs │ │ │ ├── local.rs │ │ │ ├── mod.rs │ │ │ ├── turnkey.rs │ │ │ ├── vault.rs │ │ │ ├── vault_cloud.rs │ │ │ └── vault_transit.rs │ ├── error.rs │ ├── mod.rs │ ├── rate_limit.rs │ └── server_config.rs ├── constants │ ├── authorization.rs │ ├── evm_transaction.rs │ ├── mod.rs │ ├── oracles.rs │ ├── public_endpoints.rs │ ├── relayer.rs │ ├── retry.rs │ ├── stellar_transaction.rs │ ├── token.rs │ ├── validation.rs │ └── worker.rs ├── domain │ ├── mod.rs │ ├── relayer │ │ ├── evm │ │ │ ├── evm_relayer.rs │ │ │ ├── mod.rs │ │ │ └── validations.rs │ │ ├── mod.rs │ │ ├── solana │ │ │ ├── dex │ │ │ │ ├── jupiter_swap.rs │ │ │ │ ├── jupiter_ultra.rs │ │ │ │ └── mod.rs │ │ │ ├── mod.rs │ │ │ ├── rpc │ │ │ │ ├── handler.rs │ │ │ │ ├── methods │ │ │ │ │ ├── fee_estimate.rs │ │ │ │ │ ├── get_features_enabled.rs │ │ │ │ │ ├── get_supported_tokens.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ ├── prepare_transaction.rs │ │ │ │ │ ├── sign_and_send_transaction.rs │ │ │ │ │ ├── sign_transaction.rs │ │ │ │ │ ├── test_setup.rs │ │ │ │ │ ├── transfer_transaction.rs │ │ │ │ │ ├── utils.rs │ │ │ │ │ └── validations.rs │ │ │ │ └── mod.rs │ │ │ ├── solana_relayer.rs │ │ │ └── token.rs │ │ ├── stellar │ │ │ ├── mod.rs │ │ │ └── stellar_relayer.rs │ │ └── util.rs │ └── transaction │ │ ├── evm │ │ ├── evm_transaction.rs │ │ ├── mod.rs │ │ ├── price_calculator.rs │ │ ├── status.rs │ │ └── utils.rs │ │ ├── mod.rs │ │ ├── solana │ │ ├── mod.rs │ │ └── solana_transaction.rs │ │ ├── stellar │ │ ├── lane_gate.rs │ │ ├── mod.rs │ │ ├── prepare.rs │ │ ├── status.rs │ │ ├── stellar_transaction.rs │ │ ├── submit.rs │ │ ├── test_helpers.rs │ │ └── utils.rs │ │ └── util.rs ├── jobs │ ├── handlers │ │ ├── mod.rs │ │ ├── notification_handler.rs │ │ ├── solana_swap_request_handler.rs │ │ ├── transaction_request_handler.rs │ │ ├── transaction_status_handler.rs │ │ └── transaction_submission_handler.rs │ ├── job.rs │ ├── job_producer.rs │ ├── mod.rs │ ├── queue.rs │ └── retry_backoff.rs ├── lib.rs ├── logging │ └── mod.rs ├── main.rs ├── metrics │ ├── README.md │ ├── middleware.rs │ └── mod.rs ├── models │ ├── address.rs │ ├── api_response.rs │ ├── app_state.rs │ ├── error │ │ ├── address.rs │ │ ├── api.rs │ │ ├── mod.rs │ │ ├── network.rs │ │ ├── provider.rs │ │ ├── relayer.rs │ │ ├── repository.rs │ │ ├── signer.rs │ │ └── transaction.rs │ ├── mod.rs │ ├── network │ │ ├── evm │ │ │ ├── mod.rs │ │ │ └── network.rs │ │ ├── mod.rs │ │ ├── repository.rs │ │ ├── solana │ │ │ ├── mod.rs │ │ │ └── network.rs │ │ └── stellar │ │ │ ├── mod.rs │ │ │ └── network.rs │ ├── notification │ │ ├── mod.rs │ │ ├── repository.rs │ │ └── webhook_notification.rs │ ├── pagination.rs │ ├── plain_or_env_value.rs │ ├── plugin.rs │ ├── relayer │ │ ├── mod.rs │ │ ├── repository.rs │ │ ├── response.rs │ │ └── rpc_config.rs │ ├── rpc │ │ ├── evm │ │ │ └── mod.rs │ │ ├── mod.rs │ │ ├── solana │ │ │ └── mod.rs │ │ └── stellar │ │ │ └── mod.rs │ ├── secret_string.rs │ ├── signer │ │ ├── mod.rs │ │ └── repository.rs │ ├── transaction │ │ ├── mod.rs │ │ ├── repository.rs │ │ ├── request │ │ │ ├── evm.rs │ │ │ ├── mod.rs │ │ │ ├── solana.rs │ │ │ └── stellar.rs │ │ ├── response.rs │ │ └── stellar_types.rs │ └── types.rs ├── openapi.rs ├── repositories │ ├── mod.rs │ ├── network.rs │ ├── notification.rs │ ├── plugin.rs │ ├── relayer.rs │ ├── signer.rs │ ├── transaction.rs │ └── transaction_counter.rs ├── services │ ├── gas │ │ ├── evm_gas_price.rs │ │ ├── mod.rs │ │ ├── network_extra_fee.rs │ │ └── optimism_extra_fee.rs │ ├── google_cloud_kms │ │ └── mod.rs │ ├── jupiter │ │ └── mod.rs │ ├── mod.rs │ ├── notification │ │ └── mod.rs │ ├── plugins │ │ └── mod.rs │ ├── provider │ │ ├── evm │ │ │ └── mod.rs │ │ ├── mod.rs │ │ ├── retry.rs │ │ ├── rpc_selector.rs │ │ ├── solana │ │ │ └── mod.rs │ │ └── stellar │ │ │ └── mod.rs │ ├── signer │ │ ├── evm │ │ │ ├── local_signer.rs │ │ │ ├── mod.rs │ │ │ └── turnkey_signer.rs │ │ ├── mod.rs │ │ ├── solana │ │ │ ├── google_cloud_kms_signer.rs │ │ │ ├── local_signer.rs │ │ │ ├── mod.rs │ │ │ ├── turnkey_signer.rs │ │ │ └── vault_transit_signer.rs │ │ └── stellar │ │ │ ├── local_signer.rs │ │ │ └── mod.rs │ ├── transaction_counter │ │ └── mod.rs │ ├── turnkey │ │ └── mod.rs │ └── vault │ │ └── mod.rs └── utils │ ├── auth.rs │ ├── base64.rs │ ├── key.rs │ ├── mod.rs │ ├── serde │ ├── field_as_string.rs │ ├── mod.rs │ ├── u128_deserializer.rs │ └── u64_deserializer.rs │ ├── time.rs │ └── transaction.rs ├── tests ├── integration.rs ├── integration │ ├── logging.rs │ └── metrics.rs ├── properties.rs ├── properties │ └── logging.rs └── utils │ └── test_keys │ └── unit-test-local-signer.json └── typos.toml /.dockerignore: -------------------------------------------------------------------------------- 1 | # ignore all .git files and directories 2 | .git* 3 | target/ 4 | logs/ 5 | docs/ 6 | 7 | # plugins dependencies 8 | plugins/**/node_modules 9 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # For defaults see: ./docker-compose.yml 2 | # Create a .env file in the root of the project and add your environment variables there 3 | LOG_LEVEL=debug 4 | CONFIG_DIR=./config 5 | CONFIG_FILE_NAME=config.json 6 | WEBHOOK_SIGNING_KEY= 7 | API_KEY= 8 | RATE_LIMIT_RPS=100 9 | RATE_LIMIT_BURST_SIZE=300 10 | METRICS_ENABLED=false 11 | REDIS_URL=redis://localhost:6379 12 | REDIS_CONNECTION_TIMEOUT_MS=10000 13 | RPC_TIMEOUT_MS=10000 14 | ENABLE_SWAGGER=false 15 | KEYSTORE_PASSPHRASE= # This is the passphrase for the keystore file 16 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | description: Create a bug report 4 | labels: [T-bug, S-needs-triage] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Thanks for taking the time to fill out this bug report! Please provide as much detail as possible. 10 | 11 | If you believe you have found a vulnerability, please provide details [here](mailto:security@openzeppelin.com) instead. 12 | - type: textarea 13 | id: what-happened 14 | attributes: 15 | label: Describe the bug 16 | description: | 17 | A clear and concise description of what the bug is. 18 | 19 | If the bug is in a crate you are using (i.e. you are not running the standard `openzeppelin-relayer` binary) please mention that as well. 20 | validations: 21 | required: true 22 | - type: textarea 23 | id: reproduction-steps 24 | attributes: 25 | label: Steps to reproduce 26 | description: Please provide any steps you think might be relevant to reproduce 27 | the bug. 28 | placeholder: | 29 | Steps to reproduce: 30 | 31 | 1. Start '...' 32 | 2. Then '...' 33 | 3. Check '...' 34 | 4. See error 35 | validations: 36 | required: true 37 | - type: textarea 38 | id: logs 39 | attributes: 40 | label: Application logs 41 | description: | 42 | Please provide the relevant application logs leading up to the bug. 43 | render: text 44 | validations: 45 | required: false 46 | - type: dropdown 47 | id: platform 48 | attributes: 49 | label: Platform(s) 50 | description: What platform(s) did this occur on? 51 | multiple: true 52 | options: 53 | - Linux (x86) 54 | - Linux (ARM) 55 | - Mac (Intel) 56 | - Mac (Apple Silicon) 57 | - Windows (x86) 58 | - Windows (ARM) 59 | - type: dropdown 60 | id: deployment 61 | attributes: 62 | label: Deployment Type 63 | description: How are you running openzeppelin-relayer? 64 | multiple: false 65 | options: 66 | - Binary from releases 67 | - Built from source 68 | - Docker container 69 | - Other 70 | validations: 71 | required: true 72 | - type: textarea 73 | id: version 74 | attributes: 75 | label: Version Information 76 | description: Run `openzeppelin-relayer --version` and paste the output 77 | validations: 78 | required: true 79 | - type: textarea 80 | id: relayer-config 81 | attributes: 82 | label: Monitor Configuration 83 | description: | 84 | Please provide the relevant relayer configuration file(s) from your config directory. 85 | Make sure to remove any sensitive information like private keys or API tokens. 86 | render: json 87 | validations: 88 | required: false 89 | - type: dropdown 90 | id: network-type 91 | attributes: 92 | label: Network Type 93 | description: Which blockchain network(s) are you relaying transactions on? 94 | multiple: true 95 | options: 96 | - EVM 97 | - Stellar 98 | - Solana 99 | - Other 100 | validations: 101 | required: true 102 | - type: input 103 | id: build-command 104 | attributes: 105 | label: Build Command 106 | description: If you built from source, what command did you use? 107 | placeholder: cargo build --release 108 | validations: 109 | required: false 110 | - type: checkboxes 111 | id: terms 112 | attributes: 113 | label: Code of Conduct 114 | description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/openzeppelin/openzeppelin-relayer/blob/main/CONTRIBUTING.md#code-of-conduct) 115 | options: 116 | - label: I agree to follow the Code of Conduct 117 | required: true 118 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/docs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Documentation 3 | description: Suggest a change to our documentation 4 | labels: [T-documentation, S-needs-triage] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | If you are unsure if the docs are relevant or needed, please open up a discussion first. 10 | - type: textarea 11 | attributes: 12 | label: Describe the change 13 | description: | 14 | Please describe the documentation you want to change or add, and if it is for end-users or contributors. 15 | validations: 16 | required: true 17 | - type: textarea 18 | attributes: 19 | label: Additional context 20 | description: Add any other context to the feature (like screenshots, resources) 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | description: Suggest a feature 4 | labels: [T-feature, S-needs-triage] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Please ensure that the feature has not already been requested in the issue tracker. 10 | - type: textarea 11 | attributes: 12 | label: Describe the feature 13 | description: | 14 | Please describe the feature and what it is aiming to solve, if relevant. 15 | 16 | If the feature is for a crate, please include a proposed API surface. 17 | validations: 18 | required: true 19 | - type: textarea 20 | attributes: 21 | label: Additional context 22 | description: Add any other context to the feature (like screenshots, resources) 23 | -------------------------------------------------------------------------------- /.github/RELEASE.md: -------------------------------------------------------------------------------- 1 | # CI/CD Release Workflow 2 | 3 | --- 4 | 5 | ## Workflows 6 | 7 | - To trigger a release, use [rc.yml](workflows/rc.yml) workflow. 8 | - It will need specific commit SHA in long format to start the workflow. 9 | - All the commits until that specific commit sha will be included in the release. 10 | - Checks version from `Cargo.toml` and validates if it needs to creates a new release branch. If there is a release branch that already exists for the same version in `Cargo.toml` the workflow will fail. 11 | - Release branch is created in this pattern `release-v`. 12 | 13 | - Second workflow [release-please.yml](workflows/release-please.yml) will get triggered on push to release branch automatically. 14 | - This workflow checks if there is any "higher versioned" branches than the current one since this workflow will be triggered for any pushes ( eg. hotfixes ). 15 | - We use [release-please](https://github.com/googleapis/release-please) for managing releases. If there are no "higher versioned" branches release-please step will be triggered. 16 | - Release please automatically creates a PR with Changelog notes to release branch which keeps track of all commits in that release branch and adds a label `autorelease: pending`. It uses [config](release-please/.config.json) & [manifest](release-please/manifest.json) files to generate changelog and track versions. If there are any changes to `Cargo.lock` that commit is pushed to the PR. 17 | - Once approved merge the PR. On merging `release-please` automatically creates a github release with changelog notes & tags the release with that version. 18 | - Workflow has a step to unlock conversation in the now closed PR so that release-please can post a comment and update the label `autorelease: tagged`. 19 | - SBOM generation & Docker build and push jobs are triggered. 20 | 21 | - If everything looks good post release, raise a PR and merge the `release-v` branch to main (manual step for now). 22 | -------------------------------------------------------------------------------- /.github/actions/prepare/action.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Cache 3 | description: Caches cargo dependencies 4 | inputs: 5 | components: 6 | description: Additional Rust components to install (comma separated). rustfmt and clippy are always included. 7 | required: false 8 | default: '' 9 | outputs: 10 | cache-hit: 11 | description: Cache Hit 12 | value: ${{ steps.cache.outputs.cache-hit }} 13 | runs: 14 | using: composite 15 | steps: 16 | - name: setup rust tool chain 17 | uses: dtolnay/rust-toolchain@1.86.0 # v1.86.0 18 | with: 19 | components: ${{ (inputs.components != '') && format('{0}, rustfmt, clippy', inputs.components) || 'rustfmt, clippy' }} 20 | - name: Install libsodium 21 | run: sudo apt-get update && sudo apt-get install -y libsodium-dev 22 | shell: bash 23 | - name: Restore cargo dependencies from cache 24 | uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 25 | id: cache 26 | -------------------------------------------------------------------------------- /.github/codecov.yml: -------------------------------------------------------------------------------- 1 | --- 2 | coverage: 3 | range: 90..100 4 | round: down 5 | precision: 1 6 | status: 7 | project: 8 | default: 9 | target: 80% 10 | threshold: 1% 11 | flags: 12 | - integration 13 | - properties 14 | - unittests 15 | patch: 16 | default: 17 | target: 90% 18 | threshold: 1% 19 | 20 | ignore: 21 | - tests/**/* 22 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | # opt in to updates for ecosystems that are not yet GA. 4 | enable-beta-ecosystems: true 5 | updates: 6 | # Maintain dependencies for GitHub Actions 7 | - package-ecosystem: github-actions 8 | directory: / 9 | schedule: 10 | interval: monthly 11 | commit-message: 12 | # Prefix all commit messages with "chore(deps): " 13 | prefix: 'chore(deps): ' 14 | 15 | # Maintain dependencies for cargo 16 | - package-ecosystem: cargo 17 | directory: / 18 | schedule: 19 | interval: monthly 20 | ignore: 21 | - dependency-name: '*' 22 | update-types: 23 | - version-update:semver-major 24 | commit-message: 25 | # Prefix all commit messages 26 | prefix: 'chore(deps): ' 27 | labels: 28 | - dependabot 29 | - dependencies 30 | # Allow up to 10 open pull requests for testing 31 | open-pull-requests-limit: 5 32 | -------------------------------------------------------------------------------- /.github/pr-title-checker-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "LABEL": { 3 | "name": "PR title checker needs attention", 4 | "color": "EEEEEE" 5 | }, 6 | "CHECKS": { 7 | "regexp": "^(fix|feat|docs|chore|refactor|style|ci|revert|test)!?(\\(.*\\))?!?:.*" 8 | }, 9 | "MESSAGES": { 10 | "success": "PR title is valid", 11 | "failure": "PR title is invalid", 12 | "notice": "Title needs to pass regex '^(fix|feat|docs|chore|refactor|style|ci|revert|test)!?(\\(.*\\))?!?:.*" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | ## Testing Process 4 | 5 | ## Checklist 6 | 7 | - [ ] Add a reference to related issues in the PR description. 8 | - [ ] Add unit tests if applicable. 9 | -------------------------------------------------------------------------------- /.github/release-please/.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", 3 | "packages": { 4 | ".": {} 5 | }, 6 | "release-type": "rust", 7 | "include-component-in-tag": false, 8 | "pull-request-title-pattern": "chore: Release version ${version}", 9 | "pull-request-header": "🤖 Created a Release ⚡ ⚡", 10 | "pull-request-footer": "Merge this pull request to trigger the next release.", 11 | "changelog-sections": [ 12 | { 13 | "type": "feat", 14 | "section": "🚀 Features" 15 | }, 16 | { 17 | "type": "fix", 18 | "section": "🐛 Bug Fixes" 19 | }, 20 | { 21 | "type": "revert", 22 | "section": "◀️ Reverts" 23 | }, 24 | { 25 | "type": "chore", 26 | "section": "⚙️ Miscellaneous Chores", 27 | "hidden": true 28 | }, 29 | { 30 | "type": "docs", 31 | "section": "📚 Documentation", 32 | "hidden": true 33 | }, 34 | { 35 | "type": "style", 36 | "section": "🎨 Styles", 37 | "hidden": true 38 | }, 39 | { 40 | "type": "refactor", 41 | "section": "🚜 Code Refactoring", 42 | "hidden": true 43 | }, 44 | { 45 | "type": "test", 46 | "section": "🧪 Tests", 47 | "hidden": true 48 | }, 49 | { 50 | "type": "build", 51 | "section": "🛠️ Build System", 52 | "hidden": true 53 | }, 54 | { 55 | "type": "ci", 56 | "section": "🥏 Continuous Integration", 57 | "hidden": true 58 | } 59 | ], 60 | "extra-files": [ 61 | { 62 | "type": "toml", 63 | "path": "Cargo.toml", 64 | "jsonpath": "package.version" 65 | } 66 | ] 67 | } 68 | -------------------------------------------------------------------------------- /.github/release-please/manifest.json: -------------------------------------------------------------------------------- 1 | {".":"0.2.0"} 2 | -------------------------------------------------------------------------------- /.github/workflows/pr-title.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: PR Title 3 | 4 | on: 5 | pull_request: 6 | branches: 7 | - main 8 | types: [opened, edited, reopened, synchronize] 9 | 10 | jobs: 11 | validate: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: thehanimo/pr-title-checker@7fbfe05602bdd86f926d3fb3bccb6f3aed43bc70 # v1.4.3 15 | with: 16 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 17 | configuration_path: .github/pr-title-checker-config.json 18 | -------------------------------------------------------------------------------- /.github/workflows/rc.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: RC for Major/Minor Releases 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | commit_sha: 7 | description: Long form commit SHA to create release branch from 8 | required: true 9 | type: string 10 | permissions: 11 | contents: write 12 | pull-requests: write 13 | # run concurrency group for the workflow 14 | concurrency: 15 | group: ${{ github.workflow }}-${{ github.ref }} 16 | cancel-in-progress: false 17 | jobs: 18 | create-release-branch: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 22 | id: gh-app-token 23 | with: 24 | app-id: ${{ vars.GH_APP_ID }} 25 | private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} 26 | - name: Validate Commit SHA 27 | run: | 28 | if [[ ! "$INPUT_COMMIT_SHA" =~ ^[0-9a-f]{40}$ ]]; then 29 | echo "Invalid commit SHA: $INPUT_COMMIT_SHA. Please provide the full 40-character SHA." 30 | echo "Provided SHA: $INPUT_COMMIT_SHA" 31 | echo "Length: ${#$INPUT_COMMIT_SHA}" 32 | exit 1 33 | fi 34 | echo "Valid commit SHA: $INPUT_COMMIT_SHA" 35 | env: 36 | INPUT_COMMIT_SHA: ${{ github.event.inputs.commit_sha }} 37 | - name: Checkout repository at commit SHA 38 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 39 | with: 40 | ref: ${{ github.event.inputs.commit_sha }} 41 | fetch-depth: 0 42 | token: ${{ steps.gh-app-token.outputs.token }} 43 | - name: Get version from Cargo.toml 44 | id: get_version 45 | run: | 46 | # Extract the version from manifest.json 47 | version=$(jq -r '.[ "."]' .github/release-please/manifest.json) 48 | if [ -z "$version" ]; then 49 | echo "Error: Version not found in manifest.json" 50 | exit 1 51 | fi 52 | echo "Version found: $version" 53 | # Get current version 54 | IFS='.' read -r major minor patch <<< "$version" 55 | new_minor=$((minor + 1)) 56 | new_version="${major}.${new_minor}.0" 57 | echo "New version: $new_version" 58 | echo "version=$new_version" >> $GITHUB_OUTPUT 59 | - name: Set release branch name 60 | id: set_branch 61 | run: | 62 | branch="release-v${{ steps.get_version.outputs.version }}" 63 | echo "release_branch=$branch" >> $GITHUB_ENV 64 | echo "Release branch will be: $branch" 65 | - name: Check if release branch exists 66 | id: check_branch 67 | run: | 68 | branch="release-v${{ steps.get_version.outputs.version }}" 69 | if git ls-remote --exit-code --heads origin "$branch" > /dev/null 2>&1; then 70 | echo "exists=true" >> $GITHUB_OUTPUT 71 | else 72 | echo "exists=false" >> $GITHUB_OUTPUT 73 | fi 74 | - name: Create release branch 75 | id: update_branch 76 | shell: bash 77 | run: |- 78 | branch="release-v${{ steps.get_version.outputs.version }}" 79 | commit_sha="${{ github.event.inputs.commit_sha }}" 80 | echo "branch=$branch" >> $GITHUB_OUTPUT 81 | if [ "${{ steps.check_branch.outputs.exists }}" == "true" ]; then 82 | echo "Branch '$branch' already exists. Exiting with error." 83 | exit 1 84 | else 85 | echo "Branch '$branch' does not exist. Creating new branch from commit $commit_sha." 86 | git checkout -b $branch $commit_sha 87 | git push -f origin $branch 88 | fi 89 | -------------------------------------------------------------------------------- /.github/workflows/release-sbom.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Generate SBOM 3 | on: 4 | workflow_call: 5 | inputs: 6 | tag: 7 | type: string 8 | description: The tag to use for generating SBOM. 9 | required: true 10 | jobs: 11 | sbom: 12 | name: Generate SBOM 13 | runs-on: ubuntu-latest 14 | environment: release 15 | env: 16 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 17 | SLACK_CHANNEL: '#oss-releases' 18 | steps: 19 | - name: Get github app token 20 | uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 21 | id: gh-app-token 22 | with: 23 | app-id: ${{ vars.GH_APP_ID }} 24 | private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} 25 | - name: Checkout tag 26 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 27 | with: 28 | ref: ${{ inputs.tag }} 29 | token: ${{ steps.gh-app-token.outputs.token }} 30 | - name: Slack notification 31 | uses: act10ns/slack@44541246747a30eb3102d87f7a4cc5471b0ffb7d # v2.1.0 32 | with: 33 | status: starting 34 | steps: ${{ toJson(steps) }} 35 | channel: ${{ env.SLACK_CHANNEL }} 36 | message: Starting generating sbom for ${{ github.repository }} with tag ${{ inputs.tag }}...... 37 | if: always() 38 | - name: Run SBOM 39 | uses: anchore/sbom-action@e11c554f704a0b820cbf8c51673f6945e0731532 # v0.20.0 40 | with: 41 | upload-artifact-retention: 7 42 | upload-release-assets: false 43 | github-token: ${{ steps.gh-app-token.outputs.token }} 44 | output-file: openzeppelin-relayer-${{ inputs.tag }}-spdx.json 45 | artifact-name: openzeppelin-relayer-${{ inputs.tag }}-spdx.json 46 | - name: Upload Release Artifact 47 | env: 48 | GH_TOKEN: ${{ steps.gh-app-token.outputs.token }} 49 | run: gh release upload openzeppelin-relayer-${{ inputs.tag }}-spdx.json 50 | - name: SBOM attestation 51 | uses: actions/attest-build-provenance@db473fddc028af60658334401dc6fa3ffd8669fd # main 52 | with: 53 | subject-path: ./openzeppelin-relayer-${{ inputs.tag }}-spdx.json 54 | github-token: ${{ steps.gh-app-token.outputs.token }} 55 | - name: Slack notification 56 | uses: act10ns/slack@44541246747a30eb3102d87f7a4cc5471b0ffb7d # v2.1.0 57 | with: 58 | status: ${{ job.status }} 59 | steps: ${{ toJson(steps) }} 60 | channel: ${{ env.SLACK_CHANNEL }} 61 | message: Generating sbom ${{ job.status }} 62 | if: always() 63 | -------------------------------------------------------------------------------- /.github/workflows/rust-docs-url.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Update Technical Docs Link 3 | on: 4 | workflow_dispatch: {} 5 | push: 6 | branches: 7 | - docs-v* 8 | workflow_call: 9 | inputs: 10 | branch: 11 | type: string 12 | description: The branch to update the technical docs link. 13 | required: true 14 | permissions: 15 | actions: read 16 | contents: write 17 | pull-requests: write 18 | packages: write 19 | id-token: write 20 | jobs: 21 | update-docs-link: 22 | runs-on: ubuntu-latest 23 | environment: release 24 | steps: 25 | - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 26 | id: gh-app-token 27 | with: 28 | app-id: ${{ vars.GH_APP_ID }} 29 | private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} 30 | - name: Checkout repository 31 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 32 | with: 33 | token: ${{ steps.gh-app-token.outputs.token }} 34 | - name: Use branch input 35 | id: get_branch 36 | run: | 37 | if [ -n "${{ inputs.branch }}" ]; then 38 | echo "Using provided branch input: ${{ inputs.branch }}" 39 | echo "branch=${{ inputs.branch }}" >> $GITHUB_OUTPUT 40 | else 41 | echo "No branch input provided, deriving from GITHUB_REF" 42 | branch="${GITHUB_REF#refs/heads/}" 43 | echo "Derived branch from ref: $branch" 44 | echo "branch=$branch" >> $GITHUB_OUTPUT 45 | fi 46 | - name: Update the technical docs link in nav.adoc 47 | id: update-docs 48 | run: | 49 | branch="${{ steps.get_branch.outputs.branch }}" 50 | # Netlify uses `-` instead of `.` in branch names for constructing the URL 51 | slug="${branch//./-}" 52 | target="https://${slug}%2D%2Dopenzeppelin-relayer.netlify.app" 53 | file="docs/modules/ROOT/nav.adoc" 54 | if grep -q "${target}" "${file}"; then 55 | echo "nav.adoc is already using ${target}" 56 | echo "changed=false" >> $GITHUB_OUTPUT 57 | exit 0 58 | fi 59 | echo "Updating to branch URL: ${target}" 60 | sed -i -E "s|(https://)[^/]*openzeppelin-relayer.netlify.app|${target}|g" "${file}" 61 | echo "Updated nav.adoc to use ${target}" 62 | echo "changed=true" >> $GITHUB_OUTPUT 63 | - name: Create Pull Request to update the technical docs version 64 | if: ${{ steps.get_branch.outputs.branch != '' && steps.update-docs.outputs.changed == 'true' }} 65 | uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8 66 | with: 67 | token: ${{ steps.gh-app-token.outputs.token }} 68 | title: 'docs: Update technical docs version in the nav.adoc file' 69 | body: Automatically generated PR to update technical docs version in the nav.adoc file. 70 | branch-suffix: short-commit-hash 71 | sign-commits: true 72 | commit-message: 'docs: update technical docs version in the nav.adoc file' 73 | -------------------------------------------------------------------------------- /.github/workflows/scorecard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This workflow uses actions that are not certified by GitHub. They are provided 3 | # by a third-party and are governed by separate terms of service, privacy 4 | # policy, and support documentation. 5 | name: Scorecard supply-chain security 6 | on: 7 | # For Branch-Protection check. Only the default branch is supported. See 8 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection 9 | branch_protection_rule: 10 | # To guarantee Maintained check is occasionally updated. See 11 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained 12 | schedule: 13 | - cron: 35 1 * * 6 14 | push: 15 | branches: 16 | - main 17 | # Declare default permissions as read only. 18 | permissions: read-all 19 | jobs: 20 | analysis: 21 | name: Scorecard analysis 22 | runs-on: ubuntu-latest 23 | permissions: 24 | # Needed to upload the results to code-scanning dashboard. 25 | security-events: write 26 | # Needed to publish results and get a badge (see publish_results below). 27 | id-token: write 28 | # Uncomment the permissions below if installing in a private repository. 29 | # contents: read 30 | # actions: read 31 | steps: 32 | - name: Harden Runner 33 | uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 34 | with: 35 | egress-policy: audit 36 | - name: Checkout code 37 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.5.4 38 | with: 39 | persist-credentials: false 40 | - name: Run analysis 41 | uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 42 | with: 43 | results_file: results.sarif 44 | results_format: sarif 45 | publish_results: true 46 | 47 | # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF 48 | # format to the repository Actions tab. 49 | - name: Upload artifact 50 | uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 51 | with: 52 | name: SARIF file 53 | path: results.sarif 54 | retention-days: 5 55 | - name: Upload SARIF to GitHub Code Scanning 56 | uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 57 | with: 58 | sarif_file: results.sarif 59 | -------------------------------------------------------------------------------- /.github/workflows/update-lock.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Nightly Cargo.lock update 3 | on: 4 | schedule: 5 | - cron: 0 12 * * * 6 | workflow_dispatch: {} 7 | permissions: 8 | actions: read 9 | contents: write 10 | pull-requests: write 11 | id-token: write 12 | jobs: 13 | update-lock: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 18 | with: 19 | fetch-depth: 0 20 | token: ${{ secrets.GITHUB_TOKEN }} 21 | ref: main 22 | - name: Prepare 23 | id: init 24 | uses: ./.github/actions/prepare 25 | 26 | # Get the output of the prepare composite action 27 | - name: Get cache-hit output 28 | run: 'echo "Cache hit >>>>>: ${{ steps.init.outputs.cache-hit }}"' 29 | - name: Cargo Update 30 | id: lock-file-commit 31 | run: |- 32 | cargo update 33 | git add Cargo.lock 34 | if ! git diff --cached --quiet Cargo.lock; then 35 | echo "changes=true" >> $GITHUB_OUTPUT 36 | else 37 | echo "Cargo.lock has no changes, skipping commit and push." 38 | exit 0 39 | fi 40 | - name: Create or update pull request 41 | uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8 42 | with: 43 | token: ${{ secrets.GITHUB_TOKEN }} 44 | title: 'chore: Updating lock file' 45 | sign-commits: true 46 | branch: update-cargo-lock 47 | commit-message: 'chore: Updating lock file' 48 | body: |- 49 | This PR is generated automatically by GitHub Actions. 50 | It contains all dependency updates since the last run. 51 | base: main 52 | labels: dependencies, automation 53 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | 7 | # These are backup files generated by rustfmt 8 | **/*.rs.bk 9 | 10 | # MSVC Windows builds of rustc generate these, which store debugging information 11 | *.pdb 12 | 13 | # RustRover 14 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 15 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 16 | # and can be added to the global gitignore or merged into this file. For a more nuclear 17 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 18 | #.idea/ 19 | .DS_Store 20 | 21 | config.json 22 | local-signer.json 23 | 24 | !examples/**/*.json 25 | 26 | 27 | # docs 28 | docs/build/ 29 | rust_docs 30 | 31 | # logs 32 | logs/ 33 | 34 | # env 35 | .env 36 | 37 | # Ignore node_modules 38 | node_modules 39 | .qodo 40 | 41 | # Ignore local signer keystore files everywhere 42 | **/local-signer.json 43 | 44 | openapi.json 45 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | minimum_pre_commit_version: 3.5.0 3 | default_install_hook_types: 4 | - commit-msg 5 | - pre-commit 6 | - pre-push 7 | default_stages: 8 | - pre-commit 9 | - pre-push 10 | ci: 11 | autofix_commit_msg: 'chore(pre-commit): autofix run' 12 | autoupdate_commit_msg: 'chore(pre-commit): autoupdate hooks' 13 | repos: 14 | - repo: local 15 | hooks: 16 | - id: pre-commit 17 | name: Update pre-commit 18 | entry: pre-commit install --install-hooks -t pre-commit -t pre-push 19 | pass_filenames: false 20 | language: system 21 | files: ^\.pre-commit-config.yaml$ 22 | - id: rustfmt 23 | name: rustfmt 24 | entry: cargo fmt 25 | pass_filenames: false 26 | language: system 27 | types: 28 | - rust 29 | - id: clippy 30 | name: clippy 31 | entry: cargo clippy --all-targets --all-features -- -D warnings 32 | pass_filenames: false 33 | language: system 34 | - repo: https://github.com/pre-commit/pre-commit-hooks 35 | rev: v5.0.0 36 | hooks: 37 | - id: check-json 38 | exclude: ^plugins/tsconfig\.json$ 39 | - id: check-toml 40 | - id: check-merge-conflict 41 | - id: check-case-conflict 42 | - id: detect-private-key 43 | - id: trailing-whitespace 44 | - id: end-of-file-fixer 45 | - repo: https://github.com/lyz-code/yamlfix/ 46 | rev: 1.17.0 47 | hooks: 48 | - id: yamlfix 49 | args: 50 | - -c 51 | - .yamlfix.toml 52 | - repo: https://github.com/crate-ci/committed 53 | rev: v1.1.5 54 | hooks: 55 | - id: committed 56 | stages: 57 | - commit-msg 58 | - repo: https://github.com/crate-ci/typos 59 | rev: v1.29.4 60 | hooks: 61 | - id: typos 62 | - repo: https://github.com/compilerla/conventional-pre-commit 63 | rev: v3.4.0 64 | hooks: 65 | - id: conventional-pre-commit 66 | stages: 67 | - commit-msg 68 | args: 69 | - --strict 70 | - build 71 | - chore 72 | - ci 73 | - docs 74 | - feat 75 | - fix 76 | - perf 77 | - refactor 78 | - revert 79 | - style 80 | - test 81 | -------------------------------------------------------------------------------- /.pre-commit-hooks.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - id: conventional-pre-commit 4 | name: Conventional Commit 5 | entry: conventional-pre-commit 6 | language: python 7 | description: Checks commit message for Conventional Commits formatting 8 | always_run: true 9 | stages: [commit-msg] 10 | -------------------------------------------------------------------------------- /.yamlfix.toml: -------------------------------------------------------------------------------- 1 | allow_duplicate_keys = false 2 | line_length = 280 3 | sequence_style = "block_style" 4 | -------------------------------------------------------------------------------- /Brewfile.netlify: -------------------------------------------------------------------------------- 1 | brew 'libsodium' 2 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @OpenZeppelin/defender-sre @OpenZeppelin/defender-dev 2 | 3 | SECURITY.md @OpenZeppelin/product-security @OpenZeppelin/defender-sre @OpenZeppelin/defender-dev 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "openzeppelin-relayer" 3 | version = "0.2.0" 4 | edition = "2021" 5 | rust-version = "1.85" #MSRV 6 | 7 | [profile.release] 8 | opt-level = 0 9 | overflow-checks = false 10 | panic = "unwind" 11 | 12 | [profile.test] 13 | debug = true 14 | opt-level = 0 15 | overflow-checks = true 16 | panic = "unwind" 17 | 18 | [dependencies] 19 | actix-web = "4" 20 | log = "0.4" 21 | simplelog = "0.12" 22 | prometheus = "0.14" 23 | lazy_static = "1.5" 24 | dotenvy = "0.15" 25 | thiserror = "2" 26 | async-trait = "0.1" 27 | actix-rt = "2.0.0" 28 | alloy = { version = "0.9", features = ["full"] } 29 | serde_json = "1" 30 | strum = { version = "0.27", default-features = false, features = ["derive"] } 31 | strum_macros = "0.27" 32 | serde = { version = "1.0", features = ["derive", "alloc"] } 33 | num_enum = { version = "0.7", default-features = false } 34 | once_cell = "1.17" 35 | regex = "1" 36 | futures = "0.3" 37 | uuid = { version = "1.11", features = ["v4"] } 38 | chrono = "0.4" 39 | eyre = "0.6" 40 | color-eyre = "0.6" 41 | apalis = { version = "0.7", features = ["limit", "retry", "catch-panic", "timeout"] } 42 | apalis-redis = { version = "0.7" } 43 | apalis-cron = { version = "0.7" } 44 | redis = { version = "0.31" } 45 | tokio = { version = "1.43", features = ["sync", "io-util", "time"] } 46 | rand = "0.9" 47 | parking_lot = "0.12" 48 | tower = "0.5" 49 | oz-keystore = { version = "0.1.4"} 50 | hex = { version = "0.4"} 51 | bytes = { version = "1.9" } 52 | reqwest = { version = "0.12", features = ["json"] } 53 | base64 = { version = "0.22" } 54 | hmac = { version = "0.12" } 55 | sha2 = { version = "0.10" } 56 | dashmap = { version = "6.1" } 57 | actix-governor = "0.8" 58 | solana-sdk = { version = "2.2" } 59 | solana-client = { version = "2.2" } 60 | spl-token = { version = "8" } 61 | spl-token-2022 = { version = "8" } 62 | mpl-token-metadata = { version = "5.1" } 63 | sysinfo = "0.35" 64 | bincode = { version = "1.3" } 65 | bs58 = "0.5" 66 | spl-associated-token-account = "6.0.0" 67 | itertools = "0.14.0" 68 | validator = { version = "0.20", features = ["derive"] } 69 | vaultrs = { version = "0.7.4" } 70 | utoipa = { version = "5.3", features = ["actix_extras"] } 71 | secrets = { version = "1.2"} 72 | libsodium-sys = "0.2.7" 73 | zeroize = "1.8" 74 | subtle = "2.6" 75 | ed25519-dalek = "2.1" 76 | stellar-strkey = "0.0.13" 77 | soroban-rs = "0.2.4" 78 | p256 = { version = "0.13.2" } 79 | google-cloud-auth = "0.20.0" 80 | http = { version = "1.3.1" } 81 | pem = { version = "3" } 82 | simple_asn1 = { version = "0.6" } 83 | k256 = { version = "0.13" } 84 | 85 | [dev-dependencies] 86 | cargo-llvm-cov = "0.6" 87 | mockall = { version = "0.13" } 88 | mockito = "1.6.1" 89 | proptest = "1.6.0" 90 | rand = "0.9.0" 91 | tempfile = "3.2" 92 | serial_test = "3.2" 93 | clap = { version = "4.4", features = ["derive"] } 94 | wiremock = "0.6" 95 | 96 | [[bin]] 97 | name = "openzeppelin-relayer" 98 | path = "src/main.rs" 99 | doc = true 100 | doctest = true 101 | 102 | [[example]] 103 | name = "test_tx" 104 | path = "helpers/test_tx.rs" 105 | 106 | [[example]] 107 | name = "create_key" 108 | path = "helpers/create_key.rs" 109 | 110 | [[example]] 111 | name = "generate_uuid" 112 | path = "helpers/generate_uuid.rs" 113 | 114 | [[example]] 115 | name = "generate_openapi" 116 | path = "helpers/generate_openapi.rs" 117 | 118 | [lib] 119 | path = "src/lib.rs" 120 | -------------------------------------------------------------------------------- /Dockerfile.development: -------------------------------------------------------------------------------- 1 | # Base image 2 | FROM --platform=${BUILDPLATFORM} cgr.dev/chainguard/rust:latest-dev@sha256:faf49718aaa95c798ed1dfdf3e4edee2cdbc3790c8994705ca6ef35972128459 AS base 3 | 4 | USER root 5 | RUN apk update && apk --no-cache add \ 6 | openssl-dev \ 7 | perl \ 8 | libsodium-dev 9 | 10 | ENV PKG_CONFIG_PATH=/usr/lib/pkgconfig 11 | 12 | WORKDIR /usr/app 13 | 14 | # Copy 15 | COPY . . 16 | 17 | RUN --mount=type=cache,target=/usr/local/cargo/registry \ 18 | --mount=type=cache,target=/app/target \ 19 | cargo install --root /usr/app --path . --debug --locked 20 | 21 | # Setting up build directories 22 | FROM --platform=${BUILDPLATFORM} cgr.dev/chainguard/wolfi-base 23 | 24 | WORKDIR /app 25 | 26 | COPY --from=base --chown=nonroot:nonroot /usr/app/bin/openzeppelin-relayer /app/openzeppelin-relayer 27 | 28 | # Install plugin dependencies 29 | ARG TARGETARCH 30 | ARG NODE_VERSION=20.19 31 | 32 | # Install Node.js 33 | USER root 34 | RUN apk add --no-cache nodejs=~${NODE_VERSION} npm 35 | 36 | ENV PATH="/usr/local/bin:$PATH" 37 | 38 | # Install pnpm and ts-node 39 | RUN npm install -g pnpm ts-node typescript 40 | 41 | # removes apk and unneeded wolfi-base tools. 42 | RUN apk del wolfi-base apk-tools 43 | 44 | # Copy plugins folder and install dependencies 45 | COPY --chown=nonroot:nonroot ./plugins /app/plugins 46 | 47 | USER nonroot 48 | WORKDIR /app/plugins 49 | RUN pnpm install --frozen-lockfile 50 | 51 | # Return to app root 52 | WORKDIR /app 53 | 54 | ENV APP_PORT=8080 55 | ENV METRICS_PORT=8081 56 | 57 | EXPOSE ${APP_PORT}/tcp ${METRICS_PORT}/tcp 58 | 59 | # starting up 60 | ENTRYPOINT ["/app/openzeppelin-relayer"] 61 | -------------------------------------------------------------------------------- /Dockerfile.production: -------------------------------------------------------------------------------- 1 | # Base image 2 | FROM --platform=${BUILDPLATFORM} cgr.dev/chainguard/rust:latest-dev@sha256:faf49718aaa95c798ed1dfdf3e4edee2cdbc3790c8994705ca6ef35972128459 AS base 3 | 4 | USER root 5 | RUN apk update && apk --no-cache add \ 6 | openssl-dev \ 7 | perl \ 8 | libsodium-dev 9 | 10 | WORKDIR /usr/app 11 | 12 | COPY . . 13 | RUN --mount=type=cache,target=/usr/local/cargo/registry \ 14 | --mount=type=cache,target=/app/target \ 15 | cargo install --root /usr/app --path . --debug --locked 16 | 17 | # Setting up build directories 18 | FROM --platform=${BUILDPLATFORM} cgr.dev/chainguard/wolfi-base 19 | 20 | WORKDIR /app 21 | COPY --from=base --chown=nonroot:nonroot /usr/app/bin/openzeppelin-relayer /app/openzeppelin-relayer 22 | COPY --from=base /usr/lib/libssl.so.3 /usr/lib/libssl.so.3 23 | COPY --from=base /usr/lib/libcrypto.so.3 /usr/lib/libcrypto.so.3 24 | 25 | # Install plugin dependencies 26 | ARG TARGETARCH 27 | ARG NODE_VERSION=20.19 28 | 29 | # Install Node.js 30 | USER root 31 | RUN apk add --no-cache nodejs=~${NODE_VERSION} npm 32 | ENV PATH="/usr/local/bin:$PATH" 33 | 34 | RUN npm install -g pnpm ts-node typescript 35 | 36 | # removes apk and unneeded wolfi-base tools. 37 | RUN apk del wolfi-base apk-tools 38 | 39 | # Copy plugins folder and install dependencies 40 | COPY --chown=nonroot:nonroot ./plugins /app/plugins 41 | 42 | USER nonroot 43 | WORKDIR /app/plugins 44 | RUN pnpm install --frozen-lockfile 45 | 46 | # Return to app root 47 | WORKDIR /app 48 | 49 | ENV APP_PORT=8080 50 | ENV METRICS_PORT=8081 51 | 52 | EXPOSE ${APP_PORT}/tcp ${METRICS_PORT}/tcp 53 | 54 | # starting up 55 | ENTRYPOINT ["/app/openzeppelin-relayer"] 56 | -------------------------------------------------------------------------------- /Makefile.toml: -------------------------------------------------------------------------------- 1 | [tasks.rust-antora] 2 | description = "Build Antora site and copy rust_docs" 3 | script = [ 4 | "cargo doc --target-dir docs/rust_docs --release --no-deps --quiet --locked", 5 | "cargo run --example generate_openapi", 6 | "npx -y @redocly/cli@latest build-docs openapi.json --output docs/api_docs.html", 7 | "bash scripts/rust_antora.sh" 8 | ] 9 | [tasks.docker-compose-up] 10 | description = "Run docker compose up according to the user defined settings" 11 | script = [ 12 | "chmod +x ./scripts/docker_compose.sh", 13 | "./scripts/docker_compose.sh up" 14 | ] 15 | [tasks.docker-compose-down] 16 | description = "Run docker compose down according to the user defined settings" 17 | script = [ 18 | "chmod +x ./scripts/docker_compose.sh", 19 | "./scripts/docker_compose.sh down" 20 | ] 21 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | Security vulnerabilities should be [disclosed](#reporting-a-vulnerability) to the [project maintainers](./CODEOWNERS), or alternatively by email to security@openzeppelin.com. 4 | 5 | ## Supported Versions 6 | 7 | The following versions are currently supported and receive security updates. Alpha, Beta and Release candidates will not receive security updates. 8 | 9 | Security patches will be released for the latest minor of a given major release. For example, if an issue is found in versions >=1.13.0 and the latest is 1.14.0, the patch will be released only in version 1.14.1. 10 | 11 | Only critical severity bug fixes will be backported to past major releases. 12 | 13 | | Version | Supported | 14 | | --------- | ------------------ | 15 | | >= 0.1.x | :white_check_mark: | 16 | | <= 0.0.9 | :x: | 17 | 18 | ## Reporting a Vulnerability 19 | 20 | We're extremely grateful for security researchers and users that report vulnerabilities to us. 21 | All reports are thoroughly investigated by the project's security team. 22 | 23 | Vulnerabilities are reported privately via GitHub's [Security Advisories](https://docs.github.com/en/code-security/security-advisories) feature. 24 | Please use the following link to submit your vulnerability: [Report a vulnerability](https://github.com/openzeppelin/openzeppelin-relayer/security/advisories/new) 25 | 26 | Please see 27 | [Privately reporting a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability) 28 | for more information on how to submit a vulnerability using GitHub's interface. 29 | 30 | ## Legal 31 | 32 | OpenZeppelin Relayer is made available under the GNU AGPL 3.0 License, which disclaims all warranties in relation to the project and which limits the liability of those that contribute and maintain the project, including OpenZeppelin. Your use of the project is also governed by the terms found at www.openzeppelin.com/tos (the "Terms"). As set out in the Terms, you are solely responsible for any use of OpenZeppelin Relayer and you assume all risks associated with any such use. This Security Policy in no way evidences or represents an on-going duty by any contributor, including OpenZeppelin, to correct any flaws or alert you to all or any of the potential risks of utilizing the project. 33 | -------------------------------------------------------------------------------- /_typos.toml: -------------------------------------------------------------------------------- 1 | [default.extend-words] 2 | # Technical term for "no operation" transactions 3 | NOOP = "NOOP" 4 | noop = "noop" 5 | -------------------------------------------------------------------------------- /cmd/prometheus/dashboards/dashboard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: 1 3 | 4 | providers: 5 | # an unique provider name 6 | - name: OpenZeppelin Relayer 7 | # org id. will default to orgId 1 if not specified 8 | orgId: 1 9 | # name of the dashboard folder. Required 10 | folder: '' 11 | # folder UID. will be automatically generated if not specified 12 | folderUid: '' 13 | # provider type. Required 14 | type: file 15 | # disable dashboard deletion 16 | disableDeletion: false 17 | # enable dashboard editing 18 | editable: true 19 | # how often Grafana will scan for changed dashboards 20 | updateIntervalSeconds: 10 21 | # allow updating provisioned dashboards from the UI 22 | allowUiUpdates: true 23 | options: 24 | # path to dashboard files on disk. Required 25 | path: /etc/grafana/provisioning/dashboards 26 | foldersFromFilesStructure: true 27 | -------------------------------------------------------------------------------- /cmd/prometheus/datasources/prometheus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # config file version 3 | apiVersion: 1 4 | 5 | # list of datasources that should be deleted from the database 6 | deleteDatasources: 7 | - name: Prometheus 8 | orgId: 1 9 | 10 | # list of datasources to insert/update depending 11 | # whats available in the database 12 | datasources: 13 | # name of the datasource. Required 14 | - name: Prometheus 15 | # datasource type. Required 16 | type: prometheus 17 | # access mode. direct or proxy. Required 18 | access: proxy 19 | # org id. will default to orgId 1 if not specified 20 | orgId: 1 21 | # url 22 | url: http://prometheus:9090 23 | # database password, if used 24 | password: 25 | # database user, if used 26 | user: 27 | # database name, if used 28 | database: 29 | # enable/disable basic auth 30 | basicAuth: false 31 | # basic auth username, if used 32 | basicAuthUser: 33 | # basic auth password, if used 34 | basicAuthPassword: 35 | # enable/disable with credentials headers 36 | withCredentials: 37 | # mark as default datasource. Max one per org 38 | isDefault: true 39 | # fields that will be converted to json and stored in json_data 40 | jsonData: 41 | graphiteVersion: '1.1' 42 | tlsAuth: false 43 | tlsAuthWithCACert: false 44 | # json object of data that will be encrypted. 45 | secureJsonData: 46 | tlsCACert: '...' 47 | tlsClientCert: '...' 48 | tlsClientKey: '...' 49 | version: 1 50 | # allow users to edit datasources from the UI. 51 | editable: true 52 | -------------------------------------------------------------------------------- /cmd/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | global: 3 | scrape_interval: 10s 4 | scrape_timeout: 3s 5 | evaluation_interval: 5s 6 | 7 | scrape_configs: 8 | - job_name: relayer 9 | # Prometheus uses this api path to scrape metrics from the relayer container 10 | metrics_path: /debug/metrics/scrape 11 | scheme: http 12 | static_configs: 13 | - targets: 14 | - relayer:8081 15 | - job_name: redis_exporter 16 | static_configs: 17 | - targets: 18 | - redis-exporter:9121 19 | -------------------------------------------------------------------------------- /committed.toml: -------------------------------------------------------------------------------- 1 | style="conventional" 2 | ignore_author_re="(dependabot|renovate)" 3 | merge_commit = false 4 | -------------------------------------------------------------------------------- /config/config.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "relayers": [ 3 | { 4 | "id": "sepolia-example", 5 | "name": "Sepolia Example", 6 | "network": "sepolia", 7 | "paused": false, 8 | "notification_id": "notification-example", 9 | "signer_id": "local-signer", 10 | "network_type": "evm", 11 | "policies": { 12 | "min_balance": 0 13 | } 14 | }, 15 | { 16 | "id": "solana-example", 17 | "name": "Solana Example", 18 | "network": "devnet", 19 | "paused": false, 20 | "notification_id": "notification-example", 21 | "signer_id": "local-signer", 22 | "network_type": "solana", 23 | "policies": { 24 | "fee_payment_strategy": "user", 25 | "min_balance": 0, 26 | "swap_config": { 27 | "strategy": "jupiter-swap", 28 | "cron_schedule": "0 0 * * *", 29 | "min_balance_threshold": 0 30 | }, 31 | "allowed_programs": [ 32 | "11111111111111111111111111111111", 33 | "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA" 34 | ], 35 | "allowed_tokens": [ 36 | { 37 | "mint": "Gh9ZwEmdLJ8DscKNTkTqPbNwLNNBjuSzaG9Vp2KGtKJr", 38 | "max_allowed_fee": 100000000, 39 | "swap_config": { 40 | "min_amount": 0, 41 | "max_amount": 0, 42 | "retain_min_amount": 0 43 | } 44 | }, 45 | { 46 | "mint": "So11111111111111111111111111111111111111112" 47 | } 48 | ] 49 | } 50 | }, 51 | { 52 | "id": "solana-mainnet-example", 53 | "name": "Solana Mainnet Example", 54 | "network": "mainnet-beta", 55 | "paused": false, 56 | "notification_id": "notification-example", 57 | "signer_id": "local-signer", 58 | "network_type": "solana", 59 | "policies": { 60 | "min_balance": 0, 61 | "allowed_tokens": [ 62 | { 63 | "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", 64 | "max_allowed_fee": 100000000 65 | }, 66 | { 67 | "mint": "So11111111111111111111111111111111111111112" 68 | } 69 | ] 70 | } 71 | } 72 | ], 73 | "notifications": [ 74 | { 75 | "id": "notification-example", 76 | "type": "webhook", 77 | "url": "", 78 | "signing_key": { 79 | "type": "env", 80 | "value": "WEBHOOK_SIGNING_KEY" 81 | } 82 | } 83 | ], 84 | "signers": [ 85 | { 86 | "id": "local-signer", 87 | "type": "local", 88 | "config": { 89 | "path": "config/keys/local-signer.json", 90 | "passphrase": { 91 | "type": "env", 92 | "value": "KEYSTORE_PASSPHRASE" 93 | } 94 | } 95 | } 96 | ], 97 | "networks": "./config/networks" 98 | } 99 | -------------------------------------------------------------------------------- /config/networks/arbitrum.json: -------------------------------------------------------------------------------- 1 | { 2 | "networks": [ 3 | { 4 | "average_blocktime_ms": 260, 5 | "chain_id": 42161, 6 | "explorer_urls": [ 7 | "https://api.arbiscan.io/api", 8 | "https://arbiscan.io" 9 | ], 10 | "features": [ 11 | "eip1559" 12 | ], 13 | "is_testnet": false, 14 | "network": "arbitrum-one", 15 | "required_confirmations": 1, 16 | "rpc_urls": [ 17 | "https://arb1.arbitrum.io/rpc", 18 | "https://arbitrum.drpc.org", 19 | "https://1rpc.io/arb", 20 | "https://arbitrum-one-rpc.publicnode.com", 21 | "https://arbitrum-one-public.nodies.app" 22 | ], 23 | "symbol": "ETH", 24 | "tags": [ 25 | "rollup" 26 | ], 27 | "type": "evm" 28 | }, 29 | { 30 | "average_blocktime_ms": 260, 31 | "chain_id": 421614, 32 | "explorer_urls": [ 33 | "https://api-sepolia.arbiscan.io/api", 34 | "https://sepolia.arbiscan.io" 35 | ], 36 | "features": [ 37 | "eip1559" 38 | ], 39 | "is_testnet": true, 40 | "network": "arbitrum-sepolia", 41 | "required_confirmations": 1, 42 | "rpc_urls": [ 43 | "https://sepolia-rollup.arbitrum.io/rpc", 44 | "https://arbitrum-sepolia.drpc.org", 45 | "https://arbitrum-sepolia-rpc.publicnode.com" 46 | ], 47 | "symbol": "ETH", 48 | "tags": [ 49 | "deprecated", 50 | "rollup" 51 | ], 52 | "type": "evm" 53 | }, 54 | { 55 | "average_blocktime_ms": 260, 56 | "chain_id": 42170, 57 | "explorer_urls": [ 58 | "https://api-nova.arbiscan.io/api", 59 | "https://nova.arbiscan.io" 60 | ], 61 | "features": [ 62 | "eip1559" 63 | ], 64 | "is_testnet": false, 65 | "network": "arbitrum-nova", 66 | "required_confirmations": 1, 67 | "rpc_urls": [ 68 | "https://nova.arbitrum.io/rpc", 69 | "https://arbitrum-nova.drpc.org", 70 | "https://arbitrum-nova-rpc.publicnode.com" 71 | ], 72 | "symbol": "ETH", 73 | "tags": [ 74 | "rollup" 75 | ], 76 | "type": "evm" 77 | } 78 | ] 79 | } 80 | -------------------------------------------------------------------------------- /config/networks/bsc.json: -------------------------------------------------------------------------------- 1 | { 2 | "networks": [ 3 | { 4 | "average_blocktime_ms": 3000, 5 | "chain_id": 56, 6 | "explorer_urls": [ 7 | "https://api.bscscan.com/api", 8 | "https://bscscan.com" 9 | ], 10 | "is_testnet": false, 11 | "network": "binance-smart-chain", 12 | "required_confirmations": 1, 13 | "rpc_urls": [ 14 | "https://bsc-dataseed.bnbchain.org", 15 | "https://bsc-dataseed.nariox.org", 16 | "https://bsc-dataseed.defibit.io", 17 | "https://bsc-dataseed.ninicoin.io", 18 | "https://bsc.nodereal.io", 19 | "https://bsc-dataseed-public.bnbchain.org", 20 | "https://bnb.rpc.subquery.network/public", 21 | "https://bsc.drpc.org", 22 | "https://1rpc.io/bnb", 23 | "https://bsc-rpc.publicnode.com", 24 | "https://binance-smart-chain-public.nodies.app" 25 | ], 26 | "symbol": "BNB", 27 | "type": "evm" 28 | }, 29 | { 30 | "average_blocktime_ms": 3000, 31 | "chain_id": 97, 32 | "explorer_urls": [ 33 | "https://api-testnet.bscscan.com/api", 34 | "https://testnet.bscscan.com" 35 | ], 36 | "is_testnet": true, 37 | "network": "binance-smart-chain-testnet", 38 | "required_confirmations": 1, 39 | "rpc_urls": [ 40 | "https://bsc-testnet-dataseed.bnbchain.org", 41 | "https://bsc-testnet.bnbchain.org", 42 | "https://bsc-prebsc-dataseed.bnbchain.org", 43 | "https://bsc-testnet.drpc.org", 44 | "https://bsc-testnet-rpc.publicnode.com" 45 | ], 46 | "symbol": "BNB", 47 | "tags": [ 48 | "deprecated" 49 | ], 50 | "type": "evm" 51 | } 52 | ] 53 | } 54 | -------------------------------------------------------------------------------- /config/networks/ethereum.json: -------------------------------------------------------------------------------- 1 | { 2 | "networks": [ 3 | { 4 | "average_blocktime_ms": 12000, 5 | "chain_id": 1, 6 | "explorer_urls": [ 7 | "https://api.etherscan.io/api", 8 | "https://etherscan.io" 9 | ], 10 | "features": [ 11 | "eip1559" 12 | ], 13 | "is_testnet": false, 14 | "network": "mainnet", 15 | "required_confirmations": 12, 16 | "rpc_urls": [ 17 | "https://eth.drpc.org", 18 | "https://1rpc.io/eth", 19 | "https://ethereum-rpc.publicnode.com", 20 | "https://ethereum-public.nodies.app" 21 | ], 22 | "symbol": "ETH", 23 | "type": "evm" 24 | }, 25 | { 26 | "average_blocktime_ms": 12000, 27 | "chain_id": 11155111, 28 | "explorer_urls": [ 29 | "https://api-sepolia.etherscan.io/api", 30 | "https://sepolia.etherscan.io" 31 | ], 32 | "features": [ 33 | "eip1559" 34 | ], 35 | "is_testnet": true, 36 | "network": "sepolia", 37 | "required_confirmations": 6, 38 | "rpc_urls": [ 39 | "https://sepolia.drpc.org", 40 | "https://1rpc.io/sepolia", 41 | "https://ethereum-sepolia-rpc.publicnode.com", 42 | "https://ethereum-sepolia-public.nodies.app" 43 | ], 44 | "symbol": "ETH", 45 | "tags": [ 46 | "deprecated" 47 | ], 48 | "type": "evm" 49 | }, 50 | { 51 | "average_blocktime_ms": 12000, 52 | "chain_id": 17000, 53 | "explorer_urls": [ 54 | "https://api-holesky.etherscan.io/api", 55 | "https://holesky.etherscan.io" 56 | ], 57 | "features": [ 58 | "eip1559" 59 | ], 60 | "is_testnet": true, 61 | "network": "holesky", 62 | "required_confirmations": 6, 63 | "rpc_urls": [ 64 | "https://holesky.drpc.org", 65 | "https://1rpc.io/holesky", 66 | "https://ethereum-holesky-rpc.publicnode.com" 67 | ], 68 | "symbol": "ETH", 69 | "tags": [ 70 | "deprecated" 71 | ], 72 | "type": "evm" 73 | } 74 | ] 75 | } 76 | -------------------------------------------------------------------------------- /config/networks/linea.json: -------------------------------------------------------------------------------- 1 | { 2 | "networks": [ 3 | { 4 | "average_blocktime_ms": 12000, 5 | "chain_id": 59144, 6 | "explorer_urls": [ 7 | "https://api.lineascan.build/api", 8 | "https://lineascan.build" 9 | ], 10 | "features": [ 11 | "eip1559" 12 | ], 13 | "is_testnet": false, 14 | "network": "linea", 15 | "required_confirmations": 1, 16 | "rpc_urls": [ 17 | "https://rpc.linea.build", 18 | "https://linea.drpc.org", 19 | "https://1rpc.io/linea", 20 | "https://linea-rpc.publicnode.com" 21 | ], 22 | "symbol": "ETH", 23 | "tags": [ 24 | "rollup" 25 | ], 26 | "type": "evm" 27 | }, 28 | { 29 | "average_blocktime_ms": 12000, 30 | "chain_id": 59141, 31 | "explorer_urls": [ 32 | "https://api-sepolia.lineascan.build/api", 33 | "https://sepolia.lineascan.build" 34 | ], 35 | "features": [ 36 | "eip1559" 37 | ], 38 | "is_testnet": true, 39 | "network": "linea-sepolia", 40 | "required_confirmations": 1, 41 | "rpc_urls": [ 42 | "https://rpc.sepolia.linea.build", 43 | "https://linea-sepolia.drpc.org", 44 | "https://linea-sepolia-rpc.publicnode.com" 45 | ], 46 | "symbol": "ETH", 47 | "tags": [ 48 | "deprecated", 49 | "rollup" 50 | ], 51 | "type": "evm" 52 | } 53 | ] 54 | } 55 | -------------------------------------------------------------------------------- /config/networks/mantle.json: -------------------------------------------------------------------------------- 1 | { 2 | "networks": [ 3 | { 4 | "average_blocktime_ms": 2000, 5 | "chain_id": 5000, 6 | "explorer_urls": [ 7 | "https://explorer.mantle.xyz/api", 8 | "https://explorer.mantle.xyz" 9 | ], 10 | "features": [ 11 | "eip1559" 12 | ], 13 | "is_testnet": false, 14 | "network": "mantle", 15 | "required_confirmations": 1, 16 | "rpc_urls": [ 17 | "https://rpc.mantle.xyz", 18 | "https://mantle.drpc.org", 19 | "https://1rpc.io/mantle", 20 | "https://mantle-rpc.publicnode.com", 21 | "https://mantle-public.nodies.app" 22 | ], 23 | "symbol": "MNT", 24 | "tags": [ 25 | "rollup" 26 | ], 27 | "type": "evm" 28 | }, 29 | { 30 | "average_blocktime_ms": 2000, 31 | "chain_id": 5003, 32 | "explorer_urls": [ 33 | "https://explorer.sepolia.mantle.xyz/api", 34 | "https://explorer.sepolia.mantle.xyz" 35 | ], 36 | "features": [ 37 | "eip1559" 38 | ], 39 | "is_testnet": true, 40 | "network": "mantle-sepolia", 41 | "required_confirmations": 1, 42 | "rpc_urls": [ 43 | "https://rpc.sepolia.mantle.xyz", 44 | "https://mantle-sepolia.drpc.org" 45 | ], 46 | "symbol": "MNT", 47 | "tags": [ 48 | "deprecated", 49 | "rollup" 50 | ], 51 | "type": "evm" 52 | } 53 | ] 54 | } 55 | -------------------------------------------------------------------------------- /config/networks/polygon.json: -------------------------------------------------------------------------------- 1 | { 2 | "networks": [ 3 | { 4 | "average_blocktime_ms": 2100, 5 | "chain_id": 137, 6 | "explorer_urls": [ 7 | "https://api.polygonscan.com/api", 8 | "https://polygonscan.com" 9 | ], 10 | "features": [ 11 | "eip1559" 12 | ], 13 | "is_testnet": false, 14 | "network": "polygon", 15 | "required_confirmations": 1, 16 | "rpc_urls": [ 17 | "https://polygon-rpc.com", 18 | "https://polygon.drpc.org", 19 | "https://1rpc.io/matic", 20 | "https://polygon-bor-rpc.publicnode.com", 21 | "https://polygon-public.nodies.app", 22 | "https://polygon.meowrpc.com" 23 | ], 24 | "symbol": "POL", 25 | "type": "evm" 26 | }, 27 | { 28 | "average_blocktime_ms": 2100, 29 | "chain_id": 80002, 30 | "explorer_urls": [ 31 | "https://api-amoy.polygonscan.com/api", 32 | "https://amoy.polygonscan.com" 33 | ], 34 | "features": [ 35 | "eip1559" 36 | ], 37 | "is_testnet": true, 38 | "network": "polygon-amoy", 39 | "required_confirmations": 1, 40 | "rpc_urls": [ 41 | "https://rpc-amoy.polygon.technology", 42 | "https://polygon-amoy.drpc.org", 43 | "https://polygon-amoy-bor-rpc.publicnode.com" 44 | ], 45 | "symbol": "POL", 46 | "tags": [ 47 | "deprecated" 48 | ], 49 | "type": "evm" 50 | }, 51 | { 52 | "average_blocktime_ms": 5000, 53 | "chain_id": 1101, 54 | "explorer_urls": [ 55 | "https://api-zkevm.polygonscan.com/api", 56 | "https://zkevm.polygonscan.com" 57 | ], 58 | "is_testnet": false, 59 | "network": "polygon-zkevm", 60 | "required_confirmations": 1, 61 | "rpc_urls": [ 62 | "https://zkevm-rpc.com", 63 | "https://polygon-zkevm.drpc.org", 64 | "https://1rpc.io/polygon/zkevm", 65 | "https://polygon-zkevm-public.nodies.app" 66 | ], 67 | "symbol": "ETH", 68 | "type": "evm" 69 | }, 70 | { 71 | "average_blocktime_ms": 5000, 72 | "chain_id": 1442, 73 | "explorer_urls": [ 74 | "https://api-testnet-zkevm.polygonscan.com/api", 75 | "https://testnet-zkevm.polygonscan.com" 76 | ], 77 | "is_testnet": true, 78 | "network": "polygon-zkevm-testnet", 79 | "required_confirmations": 1, 80 | "rpc_urls": [ 81 | "https://rpc.cardona.zkevm-rpc.com", 82 | "https://polygon-zkevm-cardona.drpc.org", 83 | "https://testnet-zkevm.polygonscan.com" 84 | ], 85 | "symbol": "ETH", 86 | "tags": [ 87 | "deprecated" 88 | ], 89 | "type": "evm" 90 | } 91 | ] 92 | } 93 | -------------------------------------------------------------------------------- /config/networks/scroll.json: -------------------------------------------------------------------------------- 1 | { 2 | "networks": [ 3 | { 4 | "average_blocktime_ms": 3000, 5 | "chain_id": 534352, 6 | "explorer_urls": [ 7 | "https://api.scrollscan.com/api", 8 | "https://scrollscan.com" 9 | ], 10 | "features": [ 11 | "eip1559" 12 | ], 13 | "is_testnet": false, 14 | "network": "scroll", 15 | "required_confirmations": 1, 16 | "rpc_urls": [ 17 | "https://rpc.scroll.io/", 18 | "https://scroll.drpc.org", 19 | "https://1rpc.io/scroll", 20 | "https://scroll-rpc.publicnode.com", 21 | "https://scroll-public.nodies.app" 22 | ], 23 | "symbol": "ETH", 24 | "type": "evm" 25 | }, 26 | { 27 | "average_blocktime_ms": 3000, 28 | "chain_id": 534351, 29 | "explorer_urls": [ 30 | "https://api-sepolia.scrollscan.com/api", 31 | "https://sepolia.scrollscan.com" 32 | ], 33 | "features": [ 34 | "eip1559" 35 | ], 36 | "is_testnet": true, 37 | "network": "scroll-sepolia", 38 | "required_confirmations": 1, 39 | "rpc_urls": [ 40 | "https://sepolia-rpc.scroll.io/", 41 | "https://scroll-sepolia.drpc.org", 42 | "https://scroll-sepolia-rpc.publicnode.com", 43 | "https://scroll-sepolia-public.nodies.app" 44 | ], 45 | "symbol": "ETH", 46 | "tags": [ 47 | "deprecated" 48 | ], 49 | "type": "evm" 50 | } 51 | ] 52 | } 53 | -------------------------------------------------------------------------------- /config/networks/solana.json: -------------------------------------------------------------------------------- 1 | { 2 | "networks": [ 3 | { 4 | "type": "solana", 5 | "network": "mainnet-beta", 6 | "rpc_urls": ["https://api.mainnet-beta.solana.com"], 7 | "explorer_urls": ["https://explorer.solana.com"], 8 | "average_blocktime_ms": 400, 9 | "is_testnet": false 10 | }, 11 | { 12 | "type": "solana", 13 | "network": "testnet", 14 | "rpc_urls": ["https://api.testnet.solana.com"], 15 | "explorer_urls": ["https://explorer.solana.com?cluster=testnet"], 16 | "average_blocktime_ms": 400, 17 | "is_testnet": true 18 | }, 19 | { 20 | "type": "solana", 21 | "network": "devnet", 22 | "rpc_urls": ["https://api.devnet.solana.com"], 23 | "explorer_urls": ["https://explorer.solana.com?cluster=devnet"], 24 | "average_blocktime_ms": 400, 25 | "is_testnet": true 26 | } 27 | ] 28 | } 29 | -------------------------------------------------------------------------------- /config/networks/stellar.json: -------------------------------------------------------------------------------- 1 | { 2 | "networks": [ 3 | { 4 | "type": "stellar", 5 | "network": "mainnet", 6 | "rpc_urls": ["https://horizon.stellar.org"], 7 | "explorer_urls": ["https://stellar.expert/explorer/public"], 8 | "average_blocktime_ms": 5000, 9 | "is_testnet": false, 10 | "passphrase": "Public Global Stellar Network ; September 2015" 11 | }, 12 | { 13 | "type": "stellar", 14 | "network": "testnet", 15 | "rpc_urls": ["https://soroban-testnet.stellar.org"], 16 | "explorer_urls": ["https://stellar.expert/explorer/testnet"], 17 | "average_blocktime_ms": 5000, 18 | "is_testnet": true, 19 | "passphrase": "Test SDF Network ; September 2015" 20 | } 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /config/networks/zksync.json: -------------------------------------------------------------------------------- 1 | { 2 | "networks": [ 3 | { 4 | "average_blocktime_ms": 5000, 5 | "chain_id": 324, 6 | "explorer_urls": [ 7 | "https://api-era.zksync.network/api", 8 | "https://era.zksync.network" 9 | ], 10 | "is_testnet": false, 11 | "network": "zk-sync", 12 | "required_confirmations": 1, 13 | "rpc_urls": [ 14 | "https://mainnet.era.zksync.io", 15 | "https://zksync.drpc.org", 16 | "https://1rpc.io/zksync2-era" 17 | ], 18 | "symbol": "ETH", 19 | "tags": [ 20 | "rollup" 21 | ], 22 | "type": "evm" 23 | }, 24 | { 25 | "average_blocktime_ms": 5000, 26 | "chain_id": 300, 27 | "explorer_urls": [ 28 | "https://api-sepolia-era.zksync.network/api", 29 | "https://sepolia-era.zksync.network" 30 | ], 31 | "is_testnet": true, 32 | "network": "zk-sync-testnet", 33 | "required_confirmations": 1, 34 | "rpc_urls": [ 35 | "https://sepolia.era.zksync.dev", 36 | "https://zksync-sepolia.drpc.org" 37 | ], 38 | "symbol": "ETH", 39 | "tags": [ 40 | "deprecated", 41 | "rollup" 42 | ], 43 | "type": "evm" 44 | } 45 | ] 46 | } 47 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Antora Documentation 2 | 3 | ## Generate Documentation 4 | 5 | - To generate documentation locally, run the following command 6 | 7 | ```sh 8 | yarn docs:watch 9 | ``` 10 | 11 | - In separate terminal from root of the repo run: 12 | 13 | ```sh 14 | cargo make rust-antora 15 | ``` 16 | 17 | - You can view the site `localhost:8080` or other port if it's in use. 18 | -------------------------------------------------------------------------------- /docs/antora.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: relayer 3 | title: Relayer 4 | version: 0.2.x 5 | nav: 6 | - modules/ROOT/nav.adoc 7 | -------------------------------------------------------------------------------- /docs/modules/ROOT/nav.adoc: -------------------------------------------------------------------------------- 1 | * xref:index.adoc[User Documentation] 2 | * xref:quickstart.adoc[Quickstart] 3 | * link:https://openzeppelin-relayer.netlify.app/api_docs.html[API Reference^] 4 | * xref:structure.adoc[Project Structure] 5 | * xref:roadmap.adoc[Project Roadmap] 6 | * xref:solana.adoc[Solana Integration] 7 | * link:https://release-v0-2-0%2D%2Dopenzeppelin-relayer.netlify.app/openzeppelin_relayer/[Technical Rust Documentation^] 8 | -------------------------------------------------------------------------------- /docs/modules/ROOT/pages/structure.adoc: -------------------------------------------------------------------------------- 1 | = Project Structure 2 | :description: Detailed information about the OpenZeppelin Relayer project structure 3 | 4 | This document provides detailed information about each directory in the OpenZeppelin Relayer project. 5 | 6 | == Source Code Organization 7 | 8 | === `src/` Directory 9 | The main source code directory contains the core implementation files organized into several modules: 10 | 11 | * `api/`: Route and controllers logic 12 | ** Manages HTTP routing and delegates incoming requests to controllers 13 | 14 | * `bootstrap/`: Service initialization 15 | ** Bootstraps and initializes application services 16 | 17 | * `config/`: Configuration management 18 | ** Handles system configuration and environment settings 19 | 20 | * `constants/`: Global constants 21 | ** Provides static values used across the application 22 | 23 | * `domain/`: Business domain logic 24 | ** Encapsulates core business rules and domain-specific functionality 25 | 26 | * `jobs/`: Asynchronous job processing 27 | ** Manages background task queueing and execution 28 | 29 | * `logging/`: Logging and file rotation 30 | ** Implements logging functionalities and log file management 31 | 32 | * `metrics/`: Metrics collection 33 | ** Collects and reports application performance and usage metrics 34 | 35 | * `models/`: Core data models and types 36 | ** Defines data structures and type definitions for the system 37 | 38 | * `repositories/`: Configuration storage 39 | ** Provides interfaces for storing and retrieving configuration data 40 | 41 | * `services/`: Business service logic 42 | ** Implements core business functionalities and service operations 43 | 44 | * `utils/`: Utility functions 45 | ** Offers helper functions and common utilities for the application 46 | 47 | 48 | == Documentation 49 | 50 | === `docs/` Directory 51 | Project documentation: 52 | 53 | * User guides 54 | * API documentation 55 | * Configuration examples 56 | * Architecture diagrams 57 | 58 | 59 | == Configuration 60 | 61 | === `config/` Directory 62 | 63 | Houses system configuration file and keys: 64 | 65 | * `config.json` configuration file 66 | * keystore files referenced from config.json file 67 | 68 | == Tests 69 | 70 | === `test/` Directory 71 | 72 | Includes comprehensive testing suites to ensure system reliability: 73 | 74 | * End-to-end tests that simulate real-world user scenarios 75 | 76 | 77 | == Scripts 78 | 79 | === `scripts/` Directory 80 | 81 | Utility scripts. 82 | 83 | 84 | == Examples 85 | 86 | === `examples/` Directory 87 | 88 | Provides practical examples and sample configurations to help users get started: 89 | 90 | * Demonstrates typical service configurations for various environments 91 | * Acts as a quick-start guide for customizing and deploying the relayer 92 | * Serves as a reference for best practices in configuration and deployment 93 | 94 | == Development Tools 95 | 96 | === Pre-commit Hooks 97 | Located in the project root: 98 | 99 | * Code formatting checks 100 | * Linting rules 101 | * Commit message validation 102 | 103 | === Build Configuration 104 | Core build files: 105 | 106 | * `Cargo.toml`: Project dependencies and metadata 107 | * `rustfmt.toml`: Code formatting rules 108 | * `rust-toolchain.toml`: Rust version and components 109 | 110 | == Docker Support 111 | 112 | The project includes Docker configurations for different environments: 113 | 114 | * `Dockerfile.development`: Development container setup 115 | * `Dockerfile.production`: Production-ready container 116 | 117 | [TIP] 118 | ==== 119 | For detailed information about running the relayers in containers, see the Docker deployment section in the main documentation. 120 | ==== 121 | -------------------------------------------------------------------------------- /docs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "docs", 3 | "version": "0.1.0", 4 | "scripts": { 5 | "docs": "oz-docs -c .", 6 | "docs:watch": "npm run docs watch" 7 | }, 8 | "keywords": [], 9 | "author": "", 10 | "devDependencies": { 11 | "@openzeppelin/docs-utils": "^0.1.3" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/basic-example-logging/.env.example: -------------------------------------------------------------------------------- 1 | API_KEY= 2 | WEBHOOK_SIGNING_KEY= 3 | KEYSTORE_PASSPHRASE= 4 | LOG_LEVEL=info 5 | LOG_MODE=file 6 | LOG_DATA_DIR=./logs 7 | LOG_MAX_SIZE=1073741824 8 | -------------------------------------------------------------------------------- /examples/basic-example-logging/config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "relayers": [ 3 | { 4 | "id": "sepolia-example", 5 | "name": "Sepolia Example", 6 | "network": "sepolia", 7 | "paused": false, 8 | "notification_id": "notification-example", 9 | "signer_id": "local-signer", 10 | "network_type": "evm", 11 | "policies": { 12 | "min_balance": 0 13 | } 14 | } 15 | ], 16 | "notifications": [ 17 | { 18 | "id": "notification-example", 19 | "type": "webhook", 20 | "url": "", 21 | "signing_key": { 22 | "type": "env", 23 | "value": "WEBHOOK_SIGNING_KEY" 24 | } 25 | } 26 | ], 27 | "signers": [ 28 | { 29 | "id": "local-signer", 30 | "type": "local", 31 | "config": { 32 | "path": "config/keys/local-signer.json", 33 | "passphrase": { 34 | "type": "env", 35 | "value": "KEYSTORE_PASSPHRASE" 36 | } 37 | } 38 | } 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /examples/basic-example-logging/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | relayer: 4 | build: 5 | context: ../../ 6 | dockerfile: Dockerfile.development 7 | secrets: 8 | - api_key 9 | - webhook_signing_key 10 | - keystore_passphrase 11 | ports: 12 | - 8080:8080/tcp 13 | environment: 14 | REDIS_URL: redis://redis:6379 15 | WEBHOOK_SIGNING_KEY: ${WEBHOOK_SIGNING_KEY} 16 | API_KEY: ${API_KEY} 17 | KEYSTORE_PASSPHRASE: ${KEYSTORE_PASSPHRASE} 18 | # Options: trace, debug, info, warn, error 19 | # Default: info 20 | LOG_LEVEL: ${LOG_LEVEL} 21 | # Options: stdout, file 22 | # Default: stdout 23 | LOG_MODE: ${LOG_MODE} 24 | # Set max log file size to rotate to new log file 25 | # Only used if LOG_MODE is file 26 | # Default: 1GB (1073741824 bytes) 27 | LOG_MAX_SIZE: ${LOG_MAX_SIZE} 28 | security_opt: 29 | - no-new-privileges 30 | networks: 31 | - relayer-network 32 | - metrics-network 33 | volumes: 34 | - ${CONFIG_DIR:-./config}:/app/config/:ro 35 | # Mount logs folder to persist logs 36 | - ${LOG_DATA_DIR:-./logs}:/app/logs 37 | depends_on: 38 | - redis 39 | restart: on-failure:5 40 | 41 | 42 | redis: 43 | image: redis:bookworm 44 | ports: 45 | - 6379:6379/tcp 46 | security_opt: 47 | - no-new-privileges 48 | volumes: 49 | - redis_data:/data 50 | command: [redis-server, --appendonly, yes, --save, '60', '1'] 51 | networks: 52 | - relayer-network 53 | - metrics-network 54 | restart: on-failure:5 55 | 56 | 57 | networks: 58 | metrics-network: 59 | internal: true 60 | relayer-network: 61 | driver: bridge 62 | 63 | volumes: 64 | redis_data: 65 | driver: local 66 | 67 | secrets: 68 | api_key: 69 | environment: API_KEY 70 | webhook_signing_key: 71 | environment: WEBHOOK_SIGNING_KEY 72 | keystore_passphrase: 73 | environment: KEYSTORE_PASSPHRASE 74 | -------------------------------------------------------------------------------- /examples/basic-example-metrics/.env.example: -------------------------------------------------------------------------------- 1 | REDIS_URL=redis://localhost:6379 2 | API_KEY= 3 | WEBHOOK_SIGNING_KEY= 4 | KEYSTORE_PASSPHRASE= 5 | -------------------------------------------------------------------------------- /examples/basic-example-metrics/config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "relayers": [ 3 | { 4 | "id": "sepolia-example", 5 | "name": "Sepolia Example", 6 | "network": "sepolia", 7 | "paused": false, 8 | "notification_id": "notification-example", 9 | "signer_id": "local-signer", 10 | "network_type": "evm", 11 | "policies": { 12 | "min_balance": 0 13 | } 14 | } 15 | ], 16 | "notifications": [ 17 | { 18 | "id": "notification-example", 19 | "type": "webhook", 20 | "url": "", 21 | "signing_key": { 22 | "type": "env", 23 | "value": "WEBHOOK_SIGNING_KEY" 24 | } 25 | } 26 | ], 27 | "signers": [ 28 | { 29 | "id": "local-signer", 30 | "type": "local", 31 | "config": { 32 | "path": "config/keys/local-signer.json", 33 | "passphrase": { 34 | "type": "env", 35 | "value": "KEYSTORE_PASSPHRASE" 36 | } 37 | } 38 | } 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /examples/basic-example-metrics/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Ports `8080` for relayer server, `6379`for redis server. 3 | 4 | services: 5 | relayer: 6 | build: 7 | context: ../../ 8 | dockerfile: Dockerfile.development 9 | ports: 10 | - 8080:8080/tcp 11 | secrets: 12 | - api_key 13 | - webhook_signing_key 14 | - keystore_passphrase 15 | environment: 16 | METRICS_PORT: 8081 17 | REDIS_URL: ${REDIS_URL} 18 | RATE_LIMIT_REQUESTS_PER_SECOND: 10 19 | RATE_LIMIT_BURST: 30 20 | METRICS_ENABLED: true 21 | WEBHOOK_SIGNING_KEY: ${WEBHOOK_SIGNING_KEY} 22 | API_KEY: ${API_KEY} 23 | KEYSTORE_PASSPHRASE: ${KEYSTORE_PASSPHRASE} 24 | depends_on: 25 | - redis 26 | restart: on-failure:5 27 | mem_swappiness: 0 28 | security_opt: 29 | - no-new-privileges 30 | volumes: 31 | - ./config:/app/config/ 32 | # Default: writes to stdout/console 33 | - ${LOGS_DATA_DIR:-./logs}:/app/logs 34 | networks: 35 | - relayer-network 36 | - metrics-network 37 | 38 | redis: 39 | image: redis:bookworm 40 | ports: 41 | - 6379:6379/tcp 42 | security_opt: 43 | - no-new-privileges 44 | volumes: 45 | - redis_data:/data 46 | command: [redis-server, --appendonly, yes, --save, '60', '1'] 47 | networks: 48 | - relayer-network 49 | - metrics-network 50 | restart: on-failure:5 51 | 52 | redis-exporter: 53 | image: oliver006/redis_exporter:v1.67.0 54 | environment: 55 | - REDIS_ADDR=redis://redis:6379 56 | security_opt: 57 | - no-new-privileges 58 | profiles: 59 | - metrics 60 | depends_on: 61 | - redis 62 | networks: 63 | - metrics-network 64 | restart: on-failure:5 65 | 66 | prometheus: 67 | container_name: openzeppelin-relayer-prometheus 68 | image: prom/prometheus:v3.1.0 69 | security_opt: 70 | - no-new-privileges 71 | command: --log.level=warn --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus 72 | --storage.tsdb.retention.time=30d --web.console.libraries=/usr/share/prometheus/console_libraries 73 | --web.console.templates=/usr/share/prometheus/consoles 74 | ports: 75 | - 9090:9090/tcp 76 | networks: 77 | - metrics-network 78 | - relayer-network 79 | volumes: 80 | - ./cmd/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml 81 | restart: on-failure:5 82 | profiles: 83 | - metrics 84 | 85 | grafana: 86 | image: grafana/grafana:11.5.1 87 | security_opt: 88 | - no-new-privileges 89 | ports: [3000:3000/tcp] 90 | networks: 91 | - metrics-network 92 | - relayer-network 93 | volumes: 94 | - ./cmd/prometheus/grafana.ini:/etc/grafana/grafana.ini 95 | - ./cmd/prometheus/datasources:/etc/grafana/provisioning/datasources 96 | - ./cmd/prometheus/dashboards:/etc/grafana/provisioning/dashboards 97 | restart: on-failure:5 98 | profiles: 99 | - metrics 100 | 101 | networks: 102 | metrics-network: 103 | internal: true 104 | relayer-network: 105 | driver: bridge 106 | 107 | volumes: 108 | redis_data: 109 | driver: local 110 | 111 | secrets: 112 | api_key: 113 | environment: API_KEY 114 | webhook_signing_key: 115 | environment: WEBHOOK_SIGNING_KEY 116 | keystore_passphrase: 117 | environment: KEYSTORE_PASSPHRASE 118 | -------------------------------------------------------------------------------- /examples/basic-example/.env.example: -------------------------------------------------------------------------------- 1 | API_KEY= 2 | WEBHOOK_SIGNING_KEY= 3 | KEYSTORE_PASSPHRASE= 4 | -------------------------------------------------------------------------------- /examples/basic-example/README.md: -------------------------------------------------------------------------------- 1 | # OpenZeppelin Relayer Basic Example 2 | 3 | This guide demonstrates how to configure and use the OpenZeppelin Relayer service with a basic setup. In this example, we configure and utilize an Ethereum Sepolia Relayer. 4 | 5 | 6 | ## Getting Started 7 | 8 | ### Prerequisites 9 | 10 | - [Docker](https://docs.docker.com/get-docker/) 11 | - [Docker Compose](https://docs.docker.com/compose/install/) 12 | - Rust (for key generation tools) 13 | 14 | ### Step 1: Clone the Repository 15 | 16 | Clone this repository to your local machine: 17 | 18 | ```bash 19 | git clone https://github.com/OpenZeppelin/openzeppelin-relayer 20 | cd openzeppelin-relayer 21 | ``` 22 | 23 | ### Step 2: Create a Signer 24 | 25 | Create a new signer keystore using the provided key generation tool: 26 | 27 | ```sh 28 | cargo run --example create_key -- \ 29 | --password \ 30 | --output-dir examples/basic-example/config/keys \ 31 | --filename local-signer.json 32 | ``` 33 | 34 | Note: Replace with a strong password for the keystore. 35 | 36 | Create `examples/basic-example/.env` file from `examples/basic-example/.env.example`. 37 | 38 | ```bash 39 | cp examples/basic-example/.env.example examples/basic-example/.env 40 | ``` 41 | 42 | Then, update the `KEYSTORE_PASSPHRASE` field in the `examples/basic-example/.env` file with the password you used. 43 | 44 | ### Step 3: Configure Notifications 45 | 46 | #### Configure Webhook URL 47 | 48 | `examples/basic-example/config/config.json` file is partially pre-configured. You need to specify the webhook URL that will receive updates from the relayer service. 49 | 50 | For simplicity, visit [Webhook.site](https://webhook.site), copy your unique URL, and then update the notifications[0].url field in `examples/basic-example/config/config.json` with this value. 51 | 52 | 53 | #### Configure Webhook Signing Key 54 | 55 | To sign webhook notification payloads, populate the `WEBHOOK_SIGNING_KEY` entry in the `examples/basic-example/.env` file. 56 | 57 | For development purposes, you can generate the signing key using: 58 | 59 | ```bash 60 | cargo run --example generate_uuid 61 | ``` 62 | > Note: Alternatively, you can use any online UUID generator. 63 | 64 | 65 | Copy the generated UUID and update the `WEBHOOK_SIGNING_KEY` entry in the `examples/basic-example/.env` file. 66 | 67 | 68 | ### Step 4: Configure API Key 69 | 70 | Generate an API key signing key for development purposes using: 71 | 72 | ```bash 73 | cargo run --example generate_uuid 74 | ``` 75 | > Note: Alternatively, you can use any online UUID generator. 76 | 77 | 78 | Copy the generated UUID and update the `API_KEY` entry in the `examples/basic-example/.env` file. 79 | 80 | 81 | ### Step 5: Run the Service 82 | 83 | Start the service with Docker Compose: 84 | 85 | ```bash 86 | docker compose -f examples/basic-example/docker-compose.yaml up 87 | ``` 88 | 89 | ### Step 6: Test the Relayer 90 | 91 | The service is available at `http://localhost:8080/api/v1` 92 | 93 | ```bash 94 | curl -X GET http://localhost:8080/api/v1/relayers \ 95 | -H "Content-Type: application/json" \ 96 | -H "AUTHORIZATION: Bearer YOUR_API_KEY" 97 | ``` 98 | -------------------------------------------------------------------------------- /examples/basic-example/config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "relayers": [ 3 | { 4 | "id": "sepolia-example", 5 | "name": "Sepolia Example", 6 | "network": "sepolia", 7 | "paused": false, 8 | "notification_id": "notification-example", 9 | "signer_id": "local-signer", 10 | "network_type": "evm", 11 | "policies": { 12 | "min_balance": 0 13 | } 14 | } 15 | ], 16 | "notifications": [ 17 | { 18 | "id": "notification-example", 19 | "type": "webhook", 20 | "url": "", 21 | "signing_key": { 22 | "type": "env", 23 | "value": "WEBHOOK_SIGNING_KEY" 24 | } 25 | } 26 | ], 27 | "signers": [ 28 | { 29 | "id": "local-signer", 30 | "type": "local", 31 | "config": { 32 | "path": "config/keys/local-signer.json", 33 | "passphrase": { 34 | "type": "env", 35 | "value": "KEYSTORE_PASSPHRASE" 36 | } 37 | } 38 | } 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /examples/basic-example/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3.9' 3 | 4 | services: 5 | relayer: 6 | build: 7 | context: ../../ 8 | dockerfile: Dockerfile.development 9 | ports: 10 | - 8080:8080/tcp 11 | secrets: 12 | - api_key 13 | - webhook_signing_key 14 | - keystore_passphrase 15 | environment: 16 | REDIS_URL: redis://redis:6379 17 | RATE_LIMIT_REQUESTS_PER_SECOND: 10 18 | RATE_LIMIT_BURST: 50 19 | WEBHOOK_SIGNING_KEY: ${WEBHOOK_SIGNING_KEY} 20 | API_KEY: ${API_KEY} 21 | KEYSTORE_PASSPHRASE: ${KEYSTORE_PASSPHRASE} 22 | security_opt: 23 | - no-new-privileges 24 | networks: 25 | - relayer-network 26 | - metrics-network 27 | volumes: 28 | - ./config:/app/config/ 29 | depends_on: 30 | - redis 31 | restart: on-failure:5 32 | 33 | 34 | redis: 35 | image: redis:bookworm 36 | ports: 37 | - 6379:6379/tcp 38 | security_opt: 39 | - no-new-privileges 40 | volumes: 41 | - redis_data:/data 42 | command: [redis-server, --appendonly, yes, --save, '60', '1'] 43 | networks: 44 | - relayer-network 45 | - metrics-network 46 | restart: on-failure:5 47 | 48 | 49 | networks: 50 | metrics-network: 51 | internal: true 52 | relayer-network: 53 | driver: bridge 54 | 55 | volumes: 56 | redis_data: 57 | driver: local 58 | 59 | secrets: 60 | api_key: 61 | environment: API_KEY 62 | webhook_signing_key: 63 | environment: WEBHOOK_SIGNING_KEY 64 | keystore_passphrase: 65 | environment: KEYSTORE_PASSPHRASE 66 | -------------------------------------------------------------------------------- /examples/evm-turnkey-signer/config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "relayers": [ 3 | { 4 | "id": "sepolia-example", 5 | "name": "Sepolia Example", 6 | "network": "sepolia", 7 | "paused": false, 8 | "notification_id": "notification-example", 9 | "signer_id": "turnkey-signer-evm", 10 | "network_type": "evm", 11 | "policies": { 12 | "min_balance": 0 13 | } 14 | } 15 | ], 16 | "notifications": [ 17 | { 18 | "id": "notification-example", 19 | "type": "webhook", 20 | "url": "", 21 | "signing_key": { 22 | "type": "env", 23 | "value": "WEBHOOK_SIGNING_KEY" 24 | } 25 | } 26 | ], 27 | "signers": [ 28 | { 29 | "id": "turnkey-signer-evm", 30 | "type": "turnkey", 31 | "config": { 32 | "api_public_key": "", 33 | "api_private_key": { 34 | "type": "env", 35 | "value": "TURNKEY_API_PRIVATE_KEY" 36 | }, 37 | "organization_id": "", 38 | "private_key_id": "", 39 | "public_key": "" 40 | } 41 | } 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /examples/evm-turnkey-signer/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | services: 4 | relayer: 5 | build: 6 | context: ../../ 7 | dockerfile: Dockerfile.development 8 | ports: 9 | - 8080:8080/tcp 10 | secrets: 11 | - api_key 12 | - webhook_signing_key 13 | - keystore_passphrase 14 | environment: 15 | REDIS_URL: ${REDIS_URL} 16 | RATE_LIMIT_REQUESTS_PER_SECOND: 10 17 | RATE_LIMIT_BURST: 50 18 | WEBHOOK_SIGNING_KEY: ${WEBHOOK_SIGNING_KEY} 19 | API_KEY: ${API_KEY} 20 | TURNKEY_API_PRIVATE_KEY: ${TURNKEY_API_PRIVATE_KEY} 21 | security_opt: 22 | - no-new-privileges 23 | networks: 24 | - relayer-network 25 | - metrics-network 26 | volumes: 27 | - ./config:/app/config/ 28 | depends_on: 29 | - redis 30 | restart: on-failure:5 31 | 32 | 33 | redis: 34 | image: redis:bookworm 35 | ports: 36 | - 6379:6379/tcp 37 | security_opt: 38 | - no-new-privileges 39 | volumes: 40 | - redis_data:/data 41 | command: [redis-server, --appendonly, yes, --save, '60', '1'] 42 | networks: 43 | - relayer-network 44 | - metrics-network 45 | restart: on-failure:5 46 | 47 | networks: 48 | metrics-network: 49 | internal: true 50 | relayer-network: 51 | driver: bridge 52 | 53 | volumes: 54 | redis_data: 55 | driver: local 56 | vault-data: 57 | driver: local 58 | 59 | 60 | secrets: 61 | api_key: 62 | environment: API_KEY 63 | webhook_signing_key: 64 | environment: WEBHOOK_SIGNING_KEY 65 | keystore_passphrase: 66 | environment: KEYSTORE_PASSPHRASE 67 | turnkey_api_private_key: 68 | environment: TURNKEY_API_PRIVATE_KEY 69 | -------------------------------------------------------------------------------- /examples/solana-google-cloud-kms-signer/.env.example: -------------------------------------------------------------------------------- 1 | REDIS_URL=redis://localhost:6379 2 | API_KEY= 3 | WEBHOOK_SIGNING_KEY= 4 | GOOGLE_CLOUD_KMS_PRIVATE_KEY_ID= 5 | GOOGLE_CLOUD_KMS_PRIVATE_KEY= 6 | GOOGLE_CLOUD_KMS_CLIENT_EMAIL= 7 | -------------------------------------------------------------------------------- /examples/solana-google-cloud-kms-signer/config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "relayers": [ 3 | { 4 | "id": "solana-example", 5 | "name": "Solana Example", 6 | "network": "devnet", 7 | "paused": false, 8 | "signer_id": "google-cloud-kms-signer-solana", 9 | "network_type": "solana", 10 | "policies": { 11 | "fee_payment_strategy": "user", 12 | "min_balance": 0, 13 | "allowed_tokens": [ 14 | { 15 | "mint": "So11111111111111111111111111111111111111112" 16 | } 17 | ] 18 | } 19 | } 20 | ], 21 | "notifications": [ 22 | { 23 | "id": "notification-example", 24 | "type": "webhook", 25 | "url": "", 26 | "signing_key": { 27 | "type": "env", 28 | "value": "WEBHOOK_SIGNING_KEY" 29 | } 30 | } 31 | ], 32 | "signers": [ 33 | { 34 | "id": "google-cloud-kms-signer-solana", 35 | "type": "google_cloud_kms", 36 | "config": { 37 | "service_account": { 38 | "project_id": "", 39 | "private_key_id": { 40 | "type": "env", 41 | "value": "GOOGLE_CLOUD_KMS_PRIVATE_KEY_ID" 42 | }, 43 | "private_key": { 44 | "type": "env", 45 | "value": "GOOGLE_CLOUD_KMS_PRIVATE_KEY" 46 | }, 47 | "client_email": { 48 | "type": "env", 49 | "value": "GOOGLE_CLOUD_KMS_CLIENT_EMAIL" 50 | }, 51 | "client_id": "" 52 | }, 53 | "key": { 54 | "key_ring_id": "", 55 | "key_id": "" 56 | } 57 | } 58 | } 59 | ] 60 | } 61 | -------------------------------------------------------------------------------- /examples/solana-google-cloud-kms-signer/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | relayer: 4 | build: 5 | context: ../../ 6 | dockerfile: Dockerfile.development 7 | ports: 8 | - 8080:8080/tcp 9 | secrets: 10 | - api_key 11 | - webhook_signing_key 12 | - google_cloud_kms_private_key_id 13 | - google_cloud_kms_private_key 14 | - google_cloud_kms_client_email 15 | environment: 16 | REDIS_URL: ${REDIS_URL} 17 | RATE_LIMIT_REQUESTS_PER_SECOND: 10 18 | RATE_LIMIT_BURST: 50 19 | WEBHOOK_SIGNING_KEY: ${WEBHOOK_SIGNING_KEY} 20 | API_KEY: ${API_KEY} 21 | GOOGLE_CLOUD_KMS_PRIVATE_KEY_ID: ${GOOGLE_CLOUD_KMS_PRIVATE_KEY_ID} 22 | GOOGLE_CLOUD_KMS_PRIVATE_KEY: ${GOOGLE_CLOUD_KMS_PRIVATE_KEY} 23 | GOOGLE_CLOUD_KMS_CLIENT_EMAIL: ${GOOGLE_CLOUD_KMS_CLIENT_EMAIL} 24 | security_opt: 25 | - no-new-privileges 26 | networks: 27 | - relayer-network 28 | - metrics-network 29 | volumes: 30 | - ./config:/app/config/ 31 | depends_on: 32 | - redis 33 | restart: on-failure:5 34 | redis: 35 | image: redis:bookworm 36 | ports: 37 | - 6379:6379/tcp 38 | security_opt: 39 | - no-new-privileges 40 | volumes: 41 | - redis_data:/data 42 | command: 43 | - redis-server 44 | - --appendonly 45 | - 'yes' 46 | - --save 47 | - '60' 48 | - '1' 49 | networks: 50 | - relayer-network 51 | - metrics-network 52 | restart: on-failure:5 53 | networks: 54 | metrics-network: 55 | internal: true 56 | relayer-network: 57 | driver: bridge 58 | volumes: 59 | redis_data: 60 | driver: local 61 | vault-data: 62 | driver: local 63 | secrets: 64 | api_key: 65 | environment: API_KEY 66 | webhook_signing_key: 67 | environment: WEBHOOK_SIGNING_KEY 68 | google_cloud_kms_private_key_id: 69 | environment: GOOGLE_CLOUD_KMS_PRIVATE_KEY_ID 70 | google_cloud_kms_private_key: 71 | environment: GOOGLE_CLOUD_KMS_PRIVATE_KEY 72 | google_cloud_kms_client_email: 73 | environment: GOOGLE_CLOUD_KMS_CLIENT_EMAIL 74 | -------------------------------------------------------------------------------- /examples/solana-turnkey-signer/.env.example: -------------------------------------------------------------------------------- 1 | REDIS_URL=redis://redis:6379 2 | API_KEY= 3 | WEBHOOK_SIGNING_KEY= 4 | TURNKEY_API_PRIVATE_KEY= -------------------------------------------------------------------------------- /examples/solana-turnkey-signer/config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "relayers": [ 3 | { 4 | "id": "solana-example", 5 | "name": "Solana Example", 6 | "network": "devnet", 7 | "paused": false, 8 | "signer_id": "turnkey-signer-solana", 9 | "network_type": "solana", 10 | "policies": { 11 | "fee_payment_strategy": "user", 12 | "min_balance": 0, 13 | "allowed_tokens": [ 14 | { 15 | "mint": "So11111111111111111111111111111111111111112" 16 | } 17 | ] 18 | } 19 | } 20 | ], 21 | "notifications": [ 22 | { 23 | "id": "notification-example", 24 | "type": "webhook", 25 | "url": "", 26 | "signing_key": { 27 | "type": "env", 28 | "value": "WEBHOOK_SIGNING_KEY" 29 | } 30 | } 31 | ], 32 | "signers": [ 33 | { 34 | "id": "turnkey-signer-solana", 35 | "type": "turnkey", 36 | "config": { 37 | "api_public_key": "", 38 | "api_private_key": { 39 | "type": "env", 40 | "value": "TURNKEY_API_PRIVATE_KEY" 41 | }, 42 | "organization_id": "", 43 | "private_key_id": "", 44 | "public_key": "" 45 | } 46 | } 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /examples/solana-turnkey-signer/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | services: 4 | relayer: 5 | build: 6 | context: ../../ 7 | dockerfile: Dockerfile.development 8 | ports: 9 | - 8080:8080/tcp 10 | secrets: 11 | - api_key 12 | - webhook_signing_key 13 | - keystore_passphrase 14 | environment: 15 | REDIS_URL: ${REDIS_URL} 16 | RATE_LIMIT_REQUESTS_PER_SECOND: 10 17 | RATE_LIMIT_BURST: 50 18 | WEBHOOK_SIGNING_KEY: ${WEBHOOK_SIGNING_KEY} 19 | API_KEY: ${API_KEY} 20 | TURNKEY_API_PRIVATE_KEY: ${TURNKEY_API_PRIVATE_KEY} 21 | security_opt: 22 | - no-new-privileges 23 | networks: 24 | - relayer-network 25 | - metrics-network 26 | volumes: 27 | - ./config:/app/config/ 28 | depends_on: 29 | - redis 30 | restart: on-failure:5 31 | 32 | 33 | redis: 34 | image: redis:bookworm 35 | ports: 36 | - 6379:6379/tcp 37 | security_opt: 38 | - no-new-privileges 39 | volumes: 40 | - redis_data:/data 41 | command: [redis-server, --appendonly, yes, --save, '60', '1'] 42 | networks: 43 | - relayer-network 44 | - metrics-network 45 | restart: on-failure:5 46 | 47 | networks: 48 | metrics-network: 49 | internal: true 50 | relayer-network: 51 | driver: bridge 52 | 53 | volumes: 54 | redis_data: 55 | driver: local 56 | vault-data: 57 | driver: local 58 | 59 | 60 | secrets: 61 | api_key: 62 | environment: API_KEY 63 | webhook_signing_key: 64 | environment: WEBHOOK_SIGNING_KEY 65 | keystore_passphrase: 66 | environment: KEYSTORE_PASSPHRASE 67 | turnkey_api_private_key: 68 | environment: TURNKEY_API_PRIVATE_KEY 69 | -------------------------------------------------------------------------------- /examples/vault-secret-signer/.env.example: -------------------------------------------------------------------------------- 1 | REDIS_URL=redis://redis:6379 2 | API_KEY= 3 | WEBHOOK_SIGNING_KEY= 4 | VAULT_ROLE_ID= 5 | VAULT_SECRET_ID= 6 | -------------------------------------------------------------------------------- /examples/vault-secret-signer/config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "relayers": [ 3 | { 4 | "id": "solana-example", 5 | "name": "Solana Example", 6 | "network": "devnet", 7 | "paused": false, 8 | "signer_id": "local-vault", 9 | "network_type": "solana", 10 | "policies": { 11 | "fee_payment_strategy": "user", 12 | "min_balance": 0, 13 | "allowed_tokens": [ 14 | { 15 | "mint": "So11111111111111111111111111111111111111112" 16 | } 17 | ] 18 | } 19 | } 20 | ], 21 | "notifications": [ 22 | { 23 | "id": "notification-example", 24 | "type": "webhook", 25 | "url": "", 26 | "signing_key": { 27 | "type": "env", 28 | "value": "WEBHOOK_SIGNING_KEY" 29 | } 30 | } 31 | ], 32 | "signers": [ 33 | { 34 | "id": "local-vault", 35 | "type": "vault", 36 | "config": { 37 | "address": "http://vault:8200", 38 | "role_id": { 39 | "type": "env", 40 | "value": "VAULT_ROLE_ID" 41 | }, 42 | "secret_id": { 43 | "type": "env", 44 | "value": "VAULT_SECRET_ID" 45 | }, 46 | "key_name": "my-app" 47 | } 48 | } 49 | ] 50 | } 51 | -------------------------------------------------------------------------------- /examples/vault-secret-signer/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | services: 4 | relayer: 5 | build: 6 | context: ../../ 7 | dockerfile: Dockerfile.development 8 | ports: 9 | - 8080:8080/tcp 10 | secrets: 11 | - api_key 12 | - webhook_signing_key 13 | - keystore_passphrase 14 | environment: 15 | REDIS_URL: ${REDIS_URL} 16 | RATE_LIMIT_REQUESTS_PER_SECOND: 10 17 | RATE_LIMIT_BURST: 50 18 | WEBHOOK_SIGNING_KEY: ${WEBHOOK_SIGNING_KEY} 19 | API_KEY: ${API_KEY} 20 | security_opt: 21 | - no-new-privileges 22 | networks: 23 | - relayer-network 24 | - metrics-network 25 | volumes: 26 | - ./config:/app/config/ 27 | depends_on: 28 | - redis 29 | restart: on-failure:5 30 | 31 | 32 | redis: 33 | image: redis:bookworm 34 | ports: 35 | - 6379:6379/tcp 36 | security_opt: 37 | - no-new-privileges 38 | volumes: 39 | - redis_data:/data 40 | command: [redis-server, --appendonly, yes, --save, '60', '1'] 41 | networks: 42 | - relayer-network 43 | - metrics-network 44 | restart: on-failure:5 45 | 46 | vault: 47 | image: hashicorp/vault:1.19.0 48 | platform: linux/amd64 49 | ports: 50 | - 8200:8200 51 | environment: 52 | - VAULT_DEV_ROOT_TOKEN_ID=dev-only-token 53 | cap_add: 54 | - IPC_LOCK 55 | networks: 56 | - relayer-network 57 | command: [vault, server, -dev, -dev-listen-address=0.0.0.0:8200] 58 | restart: on-failure 59 | 60 | networks: 61 | metrics-network: 62 | internal: true 63 | relayer-network: 64 | driver: bridge 65 | 66 | volumes: 67 | redis_data: 68 | driver: local 69 | vault-data: 70 | driver: local 71 | 72 | 73 | secrets: 74 | api_key: 75 | environment: API_KEY 76 | webhook_signing_key: 77 | environment: WEBHOOK_SIGNING_KEY 78 | keystore_passphrase: 79 | environment: KEYSTORE_PASSPHRASE 80 | -------------------------------------------------------------------------------- /examples/vault-transit-signer/.env.example: -------------------------------------------------------------------------------- 1 | REDIS_URL=redis://redis:6379 2 | API_KEY= 3 | WEBHOOK_SIGNING_KEY= 4 | VAULT_ROLE_ID= 5 | VAULT_SECRET_ID= 6 | -------------------------------------------------------------------------------- /examples/vault-transit-signer/config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "relayers": [ 3 | { 4 | "id": "solana-example", 5 | "name": "Solana Example", 6 | "network": "devnet", 7 | "paused": false, 8 | "signer_id": "local_vault_transit", 9 | "network_type": "solana", 10 | "policies": { 11 | "fee_payment_strategy": "user", 12 | "min_balance": 0, 13 | "allowed_tokens": [ 14 | { 15 | "mint": "So11111111111111111111111111111111111111112" 16 | } 17 | ] 18 | } 19 | } 20 | ], 21 | "notifications": [ 22 | { 23 | "id": "notification-example", 24 | "type": "webhook", 25 | "url": "", 26 | "signing_key": { 27 | "type": "env", 28 | "value": "WEBHOOK_SIGNING_KEY" 29 | } 30 | } 31 | ], 32 | "signers": [ 33 | { 34 | "id": "local_vault_transit", 35 | "type": "vault_transit", 36 | "config": { 37 | "address": "http://vault:8200", 38 | "role_id": { 39 | "type": "env", 40 | "value": "VAULT_ROLE_ID" 41 | }, 42 | "secret_id": { 43 | "type": "env", 44 | "value": "VAULT_SECRET_ID" 45 | }, 46 | "key_name": "my_signing_key", 47 | "pubkey": "" 48 | } 49 | } 50 | ] 51 | } 52 | -------------------------------------------------------------------------------- /examples/vault-transit-signer/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | services: 4 | relayer: 5 | build: 6 | context: ../../ 7 | dockerfile: Dockerfile.development 8 | ports: 9 | - 8080:8080/tcp 10 | secrets: 11 | - api_key 12 | - webhook_signing_key 13 | - keystore_passphrase 14 | environment: 15 | REDIS_URL: ${REDIS_URL} 16 | RATE_LIMIT_REQUESTS_PER_SECOND: 10 17 | RATE_LIMIT_BURST: 50 18 | WEBHOOK_SIGNING_KEY: ${WEBHOOK_SIGNING_KEY} 19 | API_KEY: ${API_KEY} 20 | security_opt: 21 | - no-new-privileges 22 | networks: 23 | - relayer-network 24 | - metrics-network 25 | volumes: 26 | - ./config:/app/config/ 27 | depends_on: 28 | - redis 29 | restart: on-failure:5 30 | 31 | 32 | redis: 33 | image: redis:bookworm 34 | ports: 35 | - 6379:6379/tcp 36 | security_opt: 37 | - no-new-privileges 38 | volumes: 39 | - redis_data:/data 40 | command: [redis-server, --appendonly, yes, --save, '60', '1'] 41 | networks: 42 | - relayer-network 43 | - metrics-network 44 | restart: on-failure:5 45 | 46 | vault: 47 | image: hashicorp/vault:1.19.0 48 | platform: linux/amd64 49 | ports: 50 | - 8200:8200 51 | environment: 52 | - VAULT_DEV_ROOT_TOKEN_ID=dev-only-token 53 | cap_add: 54 | - IPC_LOCK 55 | networks: 56 | - relayer-network 57 | command: [vault, server, -dev, -dev-listen-address=0.0.0.0:8200] 58 | restart: on-failure 59 | 60 | networks: 61 | metrics-network: 62 | internal: true 63 | relayer-network: 64 | driver: bridge 65 | 66 | volumes: 67 | redis_data: 68 | driver: local 69 | vault-data: 70 | driver: local 71 | 72 | 73 | secrets: 74 | api_key: 75 | environment: API_KEY 76 | webhook_signing_key: 77 | environment: WEBHOOK_SIGNING_KEY 78 | keystore_passphrase: 79 | environment: KEYSTORE_PASSPHRASE 80 | -------------------------------------------------------------------------------- /helpers/generate_openapi.rs: -------------------------------------------------------------------------------- 1 | //! # OpenAPI Specification Generator 2 | //! 3 | //! This utility generates an OpenAPI specification JSON file from the 4 | //! OpenZeppelin Relayer API definitions. It doesn't require starting the full server 5 | //! and can be used as part of documentation or CI/CD workflows. 6 | //! 7 | //! ## Usage 8 | //! 9 | //! Run the utility with optional output path parameter: 10 | //! 11 | //! ```bash 12 | //! # Generate to default location (openapi.json in current directory) 13 | //! cargo run --example generate_openapi 14 | //! 15 | //! # Or specify a custom output path 16 | //! cargo run --example generate_openapi -- ./docs/api/openapi.json 17 | //! ``` 18 | //! 19 | //! ## Features 20 | //! 21 | //! - Generates a complete OpenAPI specification from code annotations 22 | //! - Includes all API endpoints including Utopia network endpoints 23 | //! - Creates output directories automatically if they don't exist 24 | //! - Pretty-prints the JSON for better readability 25 | //! 26 | //! ## Integration 27 | //! 28 | //! This utility is commonly used in CI/CD pipelines to generate up-to-date API documentation 29 | //! whenever the API changes. The generated file can be committed to the repository 30 | //! or published to API documentation platforms. 31 | use std::env; 32 | use std::fs; 33 | use std::path::Path; 34 | 35 | use openzeppelin_relayer::openapi::ApiDoc; 36 | use utoipa::OpenApi; 37 | 38 | fn main() -> Result<(), Box> { 39 | let args: Vec = env::args().collect(); 40 | let output_path = args.get(1).map(|s| s.as_str()).unwrap_or("openapi.json"); 41 | 42 | if let Some(parent) = Path::new(output_path).parent() { 43 | if !parent.exists() { 44 | fs::create_dir_all(parent)?; 45 | } 46 | } 47 | 48 | println!("Generating OpenAPI specification to {}", output_path); 49 | 50 | let openapi = ApiDoc::openapi(); 51 | 52 | let json = serde_json::to_string_pretty(&openapi)?; 53 | 54 | fs::write(output_path, json)?; 55 | 56 | println!("OpenAPI specification successfully generated!"); 57 | 58 | Ok(()) 59 | } 60 | -------------------------------------------------------------------------------- /helpers/generate_uuid.rs: -------------------------------------------------------------------------------- 1 | //! UUID Key Generation Tool 2 | //! 3 | //! This tool generates random UUID key and prints it to the console. 4 | //! 5 | //! # Usage 6 | //! 7 | //! ```bash 8 | //! cargo run --example generate_uuid 9 | //! ``` 10 | use eyre::Result; 11 | use uuid::Uuid; 12 | 13 | /// Main entry point for uuid key generation tool 14 | fn main() -> Result<()> { 15 | let uuid = Uuid::new_v4().to_string(); 16 | 17 | println!("Generated new uuid: {}", uuid); 18 | 19 | Ok(()) 20 | } 21 | 22 | #[cfg(test)] 23 | mod tests { 24 | use super::*; 25 | use std::str::FromStr; 26 | 27 | #[test] 28 | fn test_uuid_generation() { 29 | let uuid_string = Uuid::new_v4().to_string(); 30 | 31 | let parsed_uuid = Uuid::from_str(&uuid_string); 32 | assert!(parsed_uuid.is_ok(), "Generated string is not a valid UUID"); 33 | 34 | let uuid = parsed_uuid.unwrap(); 35 | assert_eq!(uuid.get_version_num(), 4, "UUID is not version 4"); 36 | 37 | let uuid1 = Uuid::new_v4(); 38 | let uuid2 = Uuid::new_v4(); 39 | assert_ne!(uuid1, uuid2, "Two generated UUIDs should not be equal"); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /netlify.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | base = "/" 3 | command = "rustup default stable && cargo install cargo-make --locked --force && cd docs && npm ci && npm run docs && cd .. && cargo make rust-antora" 4 | publish = "docs/build/site" 5 | 6 | [build.environment] 7 | PKG_CONFIG_PATH = "/home/linuxbrew/.linuxbrew/lib/pkgconfig" 8 | -------------------------------------------------------------------------------- /plugins/example.ts: -------------------------------------------------------------------------------- 1 | import { ethers } from "ethers"; 2 | 3 | function sleep(ms: number) { 4 | return new Promise((resolve) => setTimeout(resolve, ms)); 5 | } 6 | 7 | async function main() { 8 | console.log("Running plugin with ethers:", ethers.version); 9 | await sleep(5000); 10 | console.log("Plugin finished"); 11 | } 12 | 13 | main(); 14 | -------------------------------------------------------------------------------- /plugins/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "plugins", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "keywords": [], 10 | "author": "", 11 | "license": "", 12 | "dependencies": { 13 | "ethers": "^6.14.3" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /plugins/pnpm-lock.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | lockfileVersion: '9.0' 3 | settings: 4 | autoInstallPeers: true 5 | excludeLinksFromLockfile: false 6 | importers: 7 | .: 8 | dependencies: 9 | ethers: 10 | specifier: ^6.14.3 11 | version: 6.14.3 12 | packages: 13 | '@adraffy/ens-normalize@1.10.1': 14 | resolution: {integrity: sha512-96Z2IP3mYmF1Xg2cDm8f1gWGf/HUVedQ3FMifV4kG/PQ4yEP51xDtRAEfhVNt5f/uzpNkZHwWQuUcu6D6K+Ekw==} 15 | '@noble/curves@1.2.0': 16 | resolution: {integrity: sha512-oYclrNgRaM9SsBUBVbb8M6DTV7ZHRTKugureoYEncY5c65HOmRzvSiTE3y5CYaPYJA/GVkrhXEoF0M3Ya9PMnw==} 17 | '@noble/hashes@1.3.2': 18 | resolution: {integrity: sha512-MVC8EAQp7MvEcm30KWENFjgR+Mkmf+D189XJTkFIlwohU5hcBbn1ZkKq7KVTi2Hme3PMGF390DaL52beVrIihQ==} 19 | engines: {node: '>= 16'} 20 | '@types/node@22.7.5': 21 | resolution: {integrity: sha512-jML7s2NAzMWc//QSJ1a3prpk78cOPchGvXJsC3C6R6PSMoooztvRVQEz89gmBTBY1SPMaqo5teB4uNHPdetShQ==} 22 | aes-js@4.0.0-beta.5: 23 | resolution: {integrity: sha512-G965FqalsNyrPqgEGON7nIx1e/OVENSgiEIzyC63haUMuvNnwIgIjMs52hlTCKhkBny7A2ORNlfY9Zu+jmGk1Q==} 24 | ethers@6.14.3: 25 | resolution: {integrity: sha512-qq7ft/oCJohoTcsNPFaXSQUm457MA5iWqkf1Mb11ujONdg7jBI6sAOrHaTi3j0CBqIGFSCeR/RMc+qwRRub7IA==} 26 | engines: {node: '>=14.0.0'} 27 | tslib@2.7.0: 28 | resolution: {integrity: sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==} 29 | undici-types@6.19.8: 30 | resolution: {integrity: sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==} 31 | ws@8.17.1: 32 | resolution: {integrity: sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==} 33 | engines: {node: '>=10.0.0'} 34 | peerDependencies: 35 | bufferutil: ^4.0.1 36 | utf-8-validate: '>=5.0.2' 37 | peerDependenciesMeta: 38 | bufferutil: 39 | optional: true 40 | utf-8-validate: 41 | optional: true 42 | snapshots: 43 | '@adraffy/ens-normalize@1.10.1': {} 44 | '@noble/curves@1.2.0': 45 | dependencies: 46 | '@noble/hashes': 1.3.2 47 | '@noble/hashes@1.3.2': {} 48 | '@types/node@22.7.5': 49 | dependencies: 50 | undici-types: 6.19.8 51 | aes-js@4.0.0-beta.5: {} 52 | ethers@6.14.3: 53 | dependencies: 54 | '@adraffy/ens-normalize': 1.10.1 55 | '@noble/curves': 1.2.0 56 | '@noble/hashes': 1.3.2 57 | '@types/node': 22.7.5 58 | aes-js: 4.0.0-beta.5 59 | tslib: 2.7.0 60 | ws: 8.17.1 61 | transitivePeerDependencies: 62 | - bufferutil 63 | - utf-8-validate 64 | tslib@2.7.0: {} 65 | undici-types@6.19.8: {} 66 | ws@8.17.1: {} 67 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.86.0" 3 | profile = "minimal" 4 | components = [ 5 | "rustc", 6 | "cargo", 7 | "rustfmt", 8 | "clippy", 9 | "rust-docs", 10 | "llvm-tools", 11 | "rust-src", 12 | ] 13 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # Basic formatting 2 | max_width = 100 3 | tab_spaces = 4 4 | hard_tabs = false 5 | edition = "2024" 6 | 7 | # Code organization 8 | reorder_imports = true 9 | reorder_modules = true 10 | 11 | # Formatting preferences 12 | merge_derives = true 13 | 14 | # Function and control flow formatting 15 | fn_params_layout = "Tall" 16 | 17 | # Code style preferences 18 | use_try_shorthand = true 19 | use_field_init_shorthand = true 20 | -------------------------------------------------------------------------------- /scripts/docker_compose.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | if [ -f .env ]; then 3 | export $(grep -v '^#' .env | xargs) 4 | fi 5 | 6 | # Function to run docker compose up 7 | # If METRICS_ENABLED is true, run docker compose up with the metrics profile 8 | docker_compose_up() { 9 | if [ "$METRICS_ENABLED" = "true" ]; then 10 | docker compose --profile metrics up -d 11 | else 12 | docker compose up -d 13 | fi 14 | } 15 | 16 | # Function to run docker compose down 17 | # If METRICS_ENABLED is true, run docker compose down with the metrics profile 18 | docker_compose_down() { 19 | if [ "$METRICS_ENABLED" = "true" ]; then 20 | docker compose --profile metrics down 21 | else 22 | docker compose down 23 | fi 24 | } 25 | 26 | # Check command-line argument 27 | case "$1" in 28 | up) 29 | docker_compose_up 30 | ;; 31 | down) 32 | docker_compose_down 33 | ;; 34 | *) 35 | echo "Usage: $0 {up|down}" 36 | exit 1 37 | ;; 38 | esac 39 | -------------------------------------------------------------------------------- /scripts/rust_antora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit immediately if a command exits with a non-zero status 4 | set -euo pipefail 5 | 6 | # Base directories 7 | REPO_ROOT="$PWD" 8 | DOCS_DIR="$REPO_ROOT/docs" 9 | NAME=$(grep '^name:' "$DOCS_DIR/antora.yml" | awk '{print $2}') 10 | VERSION=$(grep '^version:' "$DOCS_DIR/antora.yml" | awk '{print $2}') 11 | BUILD_DIR="$DOCS_DIR/build/site" 12 | RUST_DOCS_DIR="$DOCS_DIR/rust_docs" 13 | API_DOCS_FILE="$DOCS_DIR/api_docs.html" 14 | 15 | # Check if the target directory exists 16 | if [ ! -d "$BUILD_DIR" ]; then 17 | echo "Error: Build directory '$BUILD_DIR' not found." 18 | exit 1 19 | fi 20 | 21 | # Copy the Rust docs to the target directory 22 | if [ -d "$RUST_DOCS_DIR" ] && [ "$(ls -A "$RUST_DOCS_DIR")" ]; then 23 | echo "Copying '$RUST_DOCS_DIR' to '$BUILD_DIR'..." 24 | cp -r "$RUST_DOCS_DIR/doc/"* "$BUILD_DIR/" 25 | echo "Rust docs successfully copied to '$BUILD_DIR'." 26 | # Remove the original Rust docs directory 27 | echo "Removing original Rust docs directory '$RUST_DOCS_DIR'..." 28 | rm -rf "$RUST_DOCS_DIR" 29 | echo "Original Rust docs directory '$RUST_DOCS_DIR' removed." 30 | else 31 | echo "Source directory '$RUST_DOCS_DIR' does not exist or is empty." 32 | fi 33 | 34 | # Copy the API docs file to the target directory 35 | if [ -f "$API_DOCS_FILE" ]; then 36 | echo "Copying '$API_DOCS_FILE' to '$BUILD_DIR'..." 37 | cp "$API_DOCS_FILE" "$BUILD_DIR/" 38 | echo "API docs successfully copied to '$BUILD_DIR'." 39 | # Remove the original API docs file 40 | echo "Removing original API docs file '$API_DOCS_FILE'..." 41 | rm "$API_DOCS_FILE" 42 | echo "Original API docs file '$API_DOCS_FILE' removed." 43 | fi 44 | -------------------------------------------------------------------------------- /src/api/controllers/mod.rs: -------------------------------------------------------------------------------- 1 | //! # API Controllers Module 2 | //! 3 | //! Handles HTTP request processing and business logic coordination. 4 | //! 5 | //! ## Controllers 6 | //! 7 | //! * `relayer` - Transaction and relayer management endpoints 8 | 9 | pub mod relayer; 10 | -------------------------------------------------------------------------------- /src/api/mod.rs: -------------------------------------------------------------------------------- 1 | //! # API Module 2 | //! 3 | //! Contains HTTP API implementation for the relayer service. 4 | //! 5 | //! ## Structure 6 | //! 7 | //! * `controllers` - Request handling and business logic 8 | //! * `routes` - API endpoint definitions and routing 9 | 10 | pub mod controllers; 11 | 12 | pub mod routes; 13 | -------------------------------------------------------------------------------- /src/api/routes/docs/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod relayer_docs; 2 | -------------------------------------------------------------------------------- /src/api/routes/health.rs: -------------------------------------------------------------------------------- 1 | //! This module provides a health check endpoint for the API. 2 | //! 3 | //! The `/health` endpoint can be used to verify that the service is running and responsive. 4 | use actix_web::{get, web, HttpResponse}; 5 | 6 | /// Health routes implementation 7 | /// 8 | /// Note: OpenAPI documentation for these endpoints can be found in the `openapi.rs` file 9 | /// Handles the `/health` endpoint. 10 | /// 11 | /// Returns an `HttpResponse` with a status of `200 OK` and a body of `"OK"`. 12 | #[utoipa::path( 13 | get, 14 | path = "/v1/health", 15 | tag = "Health", 16 | responses( 17 | (status = 200, description = "Service is healthy", body = String), 18 | (status = 500, description = "Internal server error", body = String), 19 | ) 20 | )] 21 | #[get("/health")] 22 | async fn health() -> Result { 23 | Ok(HttpResponse::Ok().body("OK")) 24 | } 25 | 26 | /// Initializes the health check service. 27 | /// 28 | /// Registers the `health` endpoint with the provided service configuration. 29 | pub fn init(cfg: &mut web::ServiceConfig) { 30 | cfg.service(health); 31 | } 32 | 33 | #[cfg(test)] 34 | mod tests { 35 | use super::*; 36 | use actix_web::{test, App}; 37 | 38 | #[actix_web::test] 39 | async fn test_health_endpoint() { 40 | // Arrange 41 | let app = test::init_service(App::new().configure(init)).await; 42 | 43 | // Act 44 | let req = test::TestRequest::get().uri("/health").to_request(); 45 | let resp = test::call_service(&app, req).await; 46 | 47 | // Assert 48 | assert!(resp.status().is_success()); 49 | 50 | let body = test::read_body(resp).await; 51 | assert_eq!(body, "OK"); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/api/routes/mod.rs: -------------------------------------------------------------------------------- 1 | //! # API Routes Module 2 | //! 3 | //! Configures HTTP routes for the relayer service API. 4 | //! 5 | //! ## Routes 6 | //! 7 | //! * `/health` - Health check endpoints 8 | //! * `/relayers` - Relayer management endpoints 9 | 10 | pub mod docs; 11 | pub mod health; 12 | pub mod metrics; 13 | pub mod relayer; 14 | 15 | use actix_web::web; 16 | pub fn configure_routes(cfg: &mut web::ServiceConfig) { 17 | cfg.configure(health::init) 18 | .configure(relayer::init) 19 | .configure(metrics::init); 20 | } 21 | -------------------------------------------------------------------------------- /src/bootstrap/initialize_app_state.rs: -------------------------------------------------------------------------------- 1 | //! Application state initialization 2 | //! 3 | //! This module contains functions for initializing the application state, 4 | //! including setting up repositories, job queues, and other necessary components. 5 | use crate::{ 6 | jobs::{self, Queue}, 7 | models::{AppState, DefaultAppState}, 8 | repositories::{ 9 | InMemoryNetworkRepository, InMemoryNotificationRepository, InMemoryPluginRepository, 10 | InMemoryRelayerRepository, InMemorySignerRepository, InMemoryTransactionCounter, 11 | InMemoryTransactionRepository, RelayerRepositoryStorage, 12 | }, 13 | }; 14 | use actix_web::web; 15 | use color_eyre::Result; 16 | use std::sync::Arc; 17 | 18 | /// Initializes application state 19 | /// 20 | /// # Returns 21 | /// 22 | /// * `Result>` - Initialized application state 23 | /// 24 | /// # Errors 25 | /// 26 | /// Returns error if: 27 | /// - Repository initialization fails 28 | /// - Configuration loading fails 29 | pub async fn initialize_app_state() -> Result> { 30 | let relayer_repository = Arc::new(RelayerRepositoryStorage::in_memory( 31 | InMemoryRelayerRepository::new(), 32 | )); 33 | let transaction_repository = Arc::new(InMemoryTransactionRepository::new()); 34 | let signer_repository = Arc::new(InMemorySignerRepository::new()); 35 | let notification_repository = Arc::new(InMemoryNotificationRepository::new()); 36 | let network_repository = Arc::new(InMemoryNetworkRepository::new()); 37 | let transaction_counter_store = Arc::new(InMemoryTransactionCounter::new()); 38 | let queue = Queue::setup().await?; 39 | let job_producer = Arc::new(jobs::JobProducer::new(queue.clone())); 40 | let plugin_repository = Arc::new(InMemoryPluginRepository::new()); 41 | 42 | let app_state = web::ThinData(AppState { 43 | relayer_repository, 44 | transaction_repository, 45 | signer_repository, 46 | notification_repository, 47 | network_repository, 48 | transaction_counter_store, 49 | job_producer, 50 | plugin_repository, 51 | }); 52 | 53 | Ok(app_state) 54 | } 55 | -------------------------------------------------------------------------------- /src/bootstrap/initialize_relayers.rs: -------------------------------------------------------------------------------- 1 | //! Relayer initialization 2 | //! 3 | //! This module contains functions for initializing relayers, ensuring they are 4 | //! properly configured and ready for operation. 5 | use crate::{ 6 | domain::{get_network_relayer, Relayer}, 7 | jobs::JobProducer, 8 | models::AppState, 9 | repositories::Repository, 10 | }; 11 | use actix_web::web::ThinData; 12 | 13 | use color_eyre::{eyre::WrapErr, Report, Result}; 14 | use futures::future::try_join_all; 15 | use log::info; 16 | 17 | async fn initialize_relayer( 18 | relayer_id: String, 19 | app_state: ThinData>, 20 | ) -> Result<()> { 21 | let relayer_service = get_network_relayer(relayer_id.clone(), &app_state).await?; 22 | 23 | info!("Initializing relayer: {}", relayer_id.clone()); 24 | 25 | relayer_service.initialize_relayer().await?; 26 | 27 | Ok::<(), Report>(()) 28 | } 29 | 30 | pub async fn initialize_relayers(app_state: ThinData>) -> Result<()> { 31 | let relayers = app_state.relayer_repository.list_all().await?; 32 | 33 | let relayer_futures = relayers.iter().map(|relayer| { 34 | let app_state = app_state.clone(); 35 | async move { initialize_relayer(relayer.id.clone(), app_state).await } 36 | }); 37 | 38 | try_join_all(relayer_futures) 39 | .await 40 | .wrap_err("Failed to initialize relayers")?; 41 | Ok(()) 42 | } 43 | -------------------------------------------------------------------------------- /src/bootstrap/mod.rs: -------------------------------------------------------------------------------- 1 | //! Initialization routines for the relayer system 2 | //! 3 | //! This module contains functions and utilities for initializing various 4 | //! components of the relayer system, including relayers, configuration, 5 | //! application state, and workers. 6 | //! 7 | //! # Submodules 8 | //! 9 | //! - `initialize_relayers`: Functions for initializing relayers 10 | //! - `config_processor`: Functions for processing configuration files 11 | //! - `initialize_app_state`: Functions for initializing application state 12 | //! - `initialize_workers`: Functions for initializing background workers 13 | mod initialize_relayers; 14 | pub use initialize_relayers::*; 15 | 16 | mod config_processor; 17 | pub use config_processor::*; 18 | 19 | mod initialize_app_state; 20 | pub use initialize_app_state::*; 21 | 22 | mod initialize_workers; 23 | pub use initialize_workers::*; 24 | -------------------------------------------------------------------------------- /src/config/config_file/network/solana.rs: -------------------------------------------------------------------------------- 1 | //! Solana Network Configuration 2 | //! 3 | //! This module provides configuration support for Solana blockchain networks including 4 | //! mainnet-beta, testnet, devnet, and custom Solana-compatible networks. 5 | //! 6 | //! ## Key Features 7 | //! 8 | //! - **Full inheritance support**: Solana networks can inherit from other Solana networks 9 | //! - **Standard validation**: Inherits all common field validation requirements 10 | //! - **Type safety**: Inheritance only allowed between Solana networks 11 | 12 | use super::common::NetworkConfigCommon; 13 | use crate::config::ConfigFileError; 14 | use serde::{Deserialize, Serialize}; 15 | 16 | /// Configuration specific to Solana networks. 17 | #[derive(Debug, Serialize, Deserialize, Clone)] 18 | #[serde(deny_unknown_fields)] 19 | pub struct SolanaNetworkConfig { 20 | /// Common network fields. 21 | #[serde(flatten)] 22 | pub common: NetworkConfigCommon, 23 | // Additional Solana-specific fields can be added here. 24 | } 25 | 26 | impl SolanaNetworkConfig { 27 | /// Validates the specific configuration fields for a Solana network. 28 | /// 29 | /// # Returns 30 | /// - `Ok(())` if the Solana configuration is valid. 31 | /// - `Err(ConfigFileError)` if validation fails (e.g., missing fields, invalid URLs). 32 | pub fn validate(&self) -> Result<(), ConfigFileError> { 33 | self.common.validate()?; 34 | Ok(()) 35 | } 36 | 37 | /// Merges this Solana configuration with a parent Solana configuration. 38 | /// Parent values are used as defaults, child values take precedence. 39 | pub fn merge_with_parent(&self, parent: &Self) -> Self { 40 | Self { 41 | common: self.common.merge_with_parent(&parent.common), 42 | // Add Solana-specific field merging here as they are added to the struct 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/config/config_file/network/stellar.rs: -------------------------------------------------------------------------------- 1 | //! Stellar Network Configuration 2 | //! 3 | //! This module provides configuration support for Stellar blockchain networks including 4 | //! Stellar mainnet (Pubnet), testnet, and custom Stellar-compatible networks. 5 | //! 6 | //! ## Key Features 7 | //! 8 | //! - **Full inheritance support**: Stellar networks can inherit from other Stellar networks 9 | //! - **Network passphrase**: Critical field for transaction signing and network identification 10 | //! - **Standard validation**: Inherits all common field validation requirements 11 | //! - **Type safety**: Inheritance only allowed between Stellar networks 12 | 13 | use super::common::NetworkConfigCommon; 14 | use crate::config::ConfigFileError; 15 | use serde::{Deserialize, Serialize}; 16 | 17 | /// Configuration specific to Stellar networks. 18 | #[derive(Debug, Serialize, Deserialize, Clone)] 19 | #[serde(deny_unknown_fields)] 20 | pub struct StellarNetworkConfig { 21 | /// Common network fields. 22 | #[serde(flatten)] 23 | pub common: NetworkConfigCommon, 24 | /// The passphrase for the Stellar network. 25 | pub passphrase: Option, 26 | // Additional Stellar-specific fields can be added here. 27 | } 28 | 29 | impl StellarNetworkConfig { 30 | /// Validates the specific configuration fields for a Stellar network. 31 | /// 32 | /// # Returns 33 | /// - `Ok(())` if the Stellar configuration is valid. 34 | /// - `Err(ConfigFileError)` if validation fails (e.g., missing fields, invalid URLs). 35 | pub fn validate(&self) -> Result<(), ConfigFileError> { 36 | self.common.validate()?; 37 | Ok(()) 38 | } 39 | 40 | /// Merges this Stellar configuration with a parent Stellar configuration. 41 | /// Parent values are used as defaults, child values take precedence. 42 | pub fn merge_with_parent(&self, parent: &Self) -> Self { 43 | Self { 44 | common: self.common.merge_with_parent(&parent.common), 45 | passphrase: self 46 | .passphrase 47 | .clone() 48 | .or_else(|| parent.passphrase.clone()), 49 | // Add Stellar-specific field merging here as they are added to the struct 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/config/config_file/plugin.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | 3 | use crate::config::ConfigFileError; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | // TODO: in case we want to support other languages and add 7 | // more flexibility to the plugins folder, we should 8 | // move this to a config file 9 | const PLUGIN_FILE_TYPE: &str = ".ts"; 10 | const PLUGIN_LANG: &str = "typescript"; 11 | 12 | #[derive(Debug, Serialize, Deserialize, Clone)] 13 | pub struct PluginFileConfig { 14 | pub id: String, 15 | pub path: String, 16 | } 17 | 18 | pub struct PluginsFileConfig { 19 | pub plugins: Vec, 20 | } 21 | 22 | impl PluginsFileConfig { 23 | pub fn new(plugins: Vec) -> Self { 24 | Self { plugins } 25 | } 26 | 27 | pub fn validate(&self) -> Result<(), ConfigFileError> { 28 | let mut ids = HashSet::new(); 29 | for plugin in &self.plugins { 30 | if !ids.insert(plugin.id.clone()) { 31 | return Err(ConfigFileError::DuplicateId(plugin.id.clone())); 32 | } 33 | 34 | if plugin.id.is_empty() { 35 | return Err(ConfigFileError::MissingField("id".into())); 36 | } 37 | 38 | if plugin.path.is_empty() { 39 | return Err(ConfigFileError::MissingField("path".into())); 40 | } 41 | 42 | if !plugin.path.ends_with(PLUGIN_FILE_TYPE) { 43 | return Err(ConfigFileError::InvalidFormat(format!( 44 | "Plugin path must be a {} file (ends with '{}')", 45 | PLUGIN_LANG, PLUGIN_FILE_TYPE 46 | ))); 47 | } 48 | } 49 | 50 | Ok(()) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/config/error.rs: -------------------------------------------------------------------------------- 1 | //! Error types for configuration system. 2 | //! 3 | //! This module defines all possible error types used in the configuration system. 4 | 5 | use thiserror::Error; 6 | 7 | #[derive(Error, Debug)] 8 | pub enum ConfigFileError { 9 | #[error("Invalid ID length: {0}")] 10 | InvalidIdLength(String), 11 | #[error("Invalid ID format: {0}")] 12 | InvalidIdFormat(String), 13 | #[error("Missing required field: {0}")] 14 | MissingField(String), 15 | #[error("IO error: {0}")] 16 | IoError(#[from] std::io::Error), 17 | #[error("JSON error: {0}")] 18 | JsonError(#[from] serde_json::Error), 19 | #[error("Duplicate id error: {0}")] 20 | DuplicateId(String), 21 | #[error("Invalid network type: {0}")] 22 | InvalidNetworkType(String), 23 | #[error("Invalid network name for {network_type}: {name}")] 24 | InvalidNetwork { network_type: String, name: String }, 25 | #[error("Invalid policy: {0}")] 26 | InvalidPolicy(String), 27 | #[error("Internal error: {0}")] 28 | InternalError(String), 29 | #[error("Missing env var: {0}")] 30 | MissingEnvVar(String), 31 | #[error("Invalid format: {0}")] 32 | InvalidFormat(String), 33 | #[error("File not found: {0}")] 34 | FileNotFound(String), 35 | #[error("Invalid reference: {0}")] 36 | InvalidReference(String), 37 | #[error("File read error: {0}")] 38 | FileRead(String), 39 | #[error("Test Signer error: {0}")] 40 | TestSigner(String), 41 | #[error("Incompatible inheritance type: {0}")] 42 | IncompatibleInheritanceType(String), 43 | #[error("Circular inheritance detected: {0}")] 44 | CircularInheritance(String), 45 | #[error("Maximum inheritance depth exceeded: {0}")] 46 | MaxInheritanceDepthExceeded(String), 47 | #[error("Invalid operation: {0}")] 48 | InvalidOperation(String), 49 | } 50 | -------------------------------------------------------------------------------- /src/config/mod.rs: -------------------------------------------------------------------------------- 1 | //! Configuration system for OpenZeppelin Relayer. 2 | //! 3 | //! This module handles: 4 | //! - Loading and parsing config files 5 | //! - Environment variable integration 6 | //! - Configuration validation 7 | //! - Type-safe config access 8 | //! 9 | //! # Structure 10 | //! 11 | //! Configuration is organized into sections: 12 | //! - Relayers: Network-specific relayer configurations 13 | //! - Signers: Key management and signing configurations 14 | //! - Notifications: Alert and monitoring configurations 15 | //! - Networks: Custom and overridden network definitions 16 | mod server_config; 17 | pub use server_config::*; 18 | 19 | mod config_file; 20 | pub use config_file::*; 21 | 22 | mod rate_limit; 23 | pub use rate_limit::*; 24 | 25 | mod error; 26 | pub use error::*; 27 | -------------------------------------------------------------------------------- /src/constants/authorization.rs: -------------------------------------------------------------------------------- 1 | pub const AUTHORIZATION_HEADER_NAME: &str = "Authorization"; 2 | pub const AUTHORIZATION_HEADER_VALUE_PREFIX: &str = "Bearer "; 3 | -------------------------------------------------------------------------------- /src/constants/evm_transaction.rs: -------------------------------------------------------------------------------- 1 | use crate::models::evm::Speed; 2 | 3 | pub const DEFAULT_TX_VALID_TIMESPAN: i64 = 8 * 60 * 60 * 1000; // 8 hours in milliseconds 4 | 5 | pub const DEFAULT_TRANSACTION_SPEED: Speed = Speed::Fast; 6 | 7 | // Maximum number of transaction attempts before considering a NOOP 8 | pub const MAXIMUM_TX_ATTEMPTS: usize = 50; 9 | // Maximum number of NOOP transactions to attempt 10 | pub const MAXIMUM_NOOP_RETRY_ATTEMPTS: u32 = 50; 11 | -------------------------------------------------------------------------------- /src/constants/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module contains all the constant values used in the system 2 | mod relayer; 3 | pub use relayer::*; 4 | 5 | mod worker; 6 | pub use worker::*; 7 | 8 | mod token; 9 | pub use token::*; 10 | 11 | mod authorization; 12 | pub use authorization::*; 13 | 14 | mod evm_transaction; 15 | pub use evm_transaction::*; 16 | 17 | mod stellar_transaction; 18 | pub use stellar_transaction::*; 19 | 20 | mod public_endpoints; 21 | pub use public_endpoints::*; 22 | 23 | mod validation; 24 | pub use validation::*; 25 | 26 | mod oracles; 27 | pub use oracles::*; 28 | 29 | mod retry; 30 | pub use retry::*; 31 | -------------------------------------------------------------------------------- /src/constants/oracles.rs: -------------------------------------------------------------------------------- 1 | pub const OPTIMISM_GAS_PRICE_ORACLE_ADDRESS: &str = "0x420000000000000000000000000000000000000F"; 2 | -------------------------------------------------------------------------------- /src/constants/public_endpoints.rs: -------------------------------------------------------------------------------- 1 | pub const PUBLIC_ENDPOINTS: &[&str] = &["/swagger-ui", "/api-docs"]; 2 | -------------------------------------------------------------------------------- /src/constants/relayer.rs: -------------------------------------------------------------------------------- 1 | //! Default minimum balance constants for different blockchain networks 2 | //! These values are used to ensure relayers maintain sufficient funds for operation. 3 | pub const DEFAULT_EVM_MIN_BALANCE: u128 = 1; // 0.001 ETH in wei 4 | pub const DEFAULT_STELLAR_MIN_BALANCE: u64 = 1_000_000; // 1 XLM 5 | pub const DEFAULT_SOLANA_MIN_BALANCE: u64 = 10_000_000; // 0.01 Lamport 6 | pub const MAX_SOLANA_TX_DATA_SIZE: u16 = 1232; 7 | pub const EVM_SMALLEST_UNIT_NAME: &str = "wei"; 8 | pub const ZERO_ADDRESS: &str = "0x0000000000000000000000000000000000000000"; 9 | #[allow(dead_code)] 10 | pub const STELLAR_SMALLEST_UNIT_NAME: &str = "stroop"; 11 | pub const SOLANA_SMALLEST_UNIT_NAME: &str = "lamport"; 12 | 13 | pub const DEFAULT_RPC_WEIGHT: u8 = 100; 14 | -------------------------------------------------------------------------------- /src/constants/retry.rs: -------------------------------------------------------------------------------- 1 | /// The percentage (0.0 to 1.0) for jitter in retry delays 2 | pub const RETRY_JITTER_PERCENT: f64 = 0.2; 3 | -------------------------------------------------------------------------------- /src/constants/stellar_transaction.rs: -------------------------------------------------------------------------------- 1 | pub const STELLAR_DEFAULT_TRANSACTION_FEE: u32 = 100; 2 | pub const STELLAR_DEFAULT_STATUS_RETRY_DELAY_SECONDS: i64 = 5; 3 | -------------------------------------------------------------------------------- /src/constants/token.rs: -------------------------------------------------------------------------------- 1 | pub const DEFAULT_CONVERSION_SLIPPAGE_PERCENTAGE: f32 = 1.0; 2 | pub const WRAPPED_SOL_MINT: &str = "So11111111111111111111111111111111111111112"; 3 | pub const NATIVE_SOL: &str = "11111111111111111111111111111111"; 4 | pub const SYSTEM_PROGRAM_ID: &str = "11111111111111111111111111111111"; 5 | pub const SOLANA_DECIMALS: u8 = 9; 6 | pub const JUPITER_BASE_API_URL: &str = "https://lite-api.jup.ag"; 7 | -------------------------------------------------------------------------------- /src/constants/validation.rs: -------------------------------------------------------------------------------- 1 | pub const MINIMUM_SECRET_VALUE_LENGTH: usize = 32; 2 | -------------------------------------------------------------------------------- /src/constants/worker.rs: -------------------------------------------------------------------------------- 1 | pub const WORKER_DEFAULT_MAXIMUM_RETRIES: usize = 5; 2 | -------------------------------------------------------------------------------- /src/domain/mod.rs: -------------------------------------------------------------------------------- 1 | //! # Domain Module 2 | //! 3 | //! Core domain logic for the relayer service, implementing: 4 | //! 5 | //! * Transaction processing 6 | //! * Relayer management 7 | //! * Network-specific implementations 8 | 9 | mod relayer; 10 | pub use relayer::*; 11 | 12 | mod transaction; 13 | pub use transaction::*; 14 | -------------------------------------------------------------------------------- /src/domain/relayer/evm/mod.rs: -------------------------------------------------------------------------------- 1 | /// The `evm` module provides functionality for interacting with 2 | /// Ethereum Virtual Machine (EVM) based blockchains. It includes 3 | /// the `evm_relayer` submodule which contains the core logic for 4 | /// relaying transactions and events between different EVM networks. 5 | mod evm_relayer; 6 | mod validations; 7 | 8 | pub use evm_relayer::*; 9 | pub use validations::*; 10 | -------------------------------------------------------------------------------- /src/domain/relayer/solana/mod.rs: -------------------------------------------------------------------------------- 1 | /// Module for Solana relayer functionality 2 | mod solana_relayer; 3 | use std::sync::Arc; 4 | 5 | pub use solana_relayer::*; 6 | 7 | /// Module for Solana RPC functionality 8 | mod rpc; 9 | pub use rpc::*; 10 | 11 | mod dex; 12 | pub use dex::*; 13 | 14 | mod token; 15 | pub use token::*; 16 | 17 | use crate::{ 18 | jobs::JobProducer, 19 | models::{NetworkType, RelayerError, RelayerRepoModel, SignerRepoModel, SolanaNetwork}, 20 | repositories::{ 21 | InMemoryNetworkRepository, InMemoryRelayerRepository, InMemoryTransactionRepository, 22 | RelayerRepositoryStorage, 23 | }, 24 | services::{get_network_provider, JupiterService, SolanaSignerFactory}, 25 | }; 26 | 27 | /// Function to create a Solana relayer instance 28 | pub async fn create_solana_relayer( 29 | relayer: RelayerRepoModel, 30 | signer: SignerRepoModel, 31 | relayer_repository: Arc>, 32 | network_repository: Arc, 33 | transaction_repository: Arc, 34 | job_producer: Arc, 35 | ) -> Result { 36 | let network_repo = network_repository 37 | .get(NetworkType::Solana, &relayer.network) 38 | .await 39 | .ok() 40 | .flatten() 41 | .ok_or_else(|| { 42 | RelayerError::NetworkConfiguration(format!("Network {} not found", relayer.network)) 43 | })?; 44 | 45 | let network = SolanaNetwork::try_from(network_repo)?; 46 | let provider = Arc::new(get_network_provider( 47 | &network, 48 | relayer.custom_rpc_urls.clone(), 49 | )?); 50 | let signer_service = Arc::new(SolanaSignerFactory::create_solana_signer(&signer)?); 51 | let jupiter_service = Arc::new(JupiterService::new_from_network(relayer.network.as_str())); 52 | let rpc_methods = SolanaRpcMethodsImpl::new( 53 | relayer.clone(), 54 | provider.clone(), 55 | signer_service.clone(), 56 | jupiter_service.clone(), 57 | job_producer.clone(), 58 | ); 59 | let rpc_handler = Arc::new(SolanaRpcHandler::new(rpc_methods)); 60 | let dex_service = create_network_dex_generic( 61 | &relayer, 62 | provider.clone(), 63 | signer_service.clone(), 64 | jupiter_service.clone(), 65 | )?; 66 | 67 | let relayer = DefaultSolanaRelayer::new( 68 | relayer, 69 | signer_service, 70 | relayer_repository, 71 | network_repository, 72 | provider, 73 | rpc_handler, 74 | transaction_repository, 75 | job_producer, 76 | Arc::new(dex_service), 77 | ) 78 | .await?; 79 | 80 | Ok(relayer) 81 | } 82 | -------------------------------------------------------------------------------- /src/domain/relayer/solana/rpc/methods/get_features_enabled.rs: -------------------------------------------------------------------------------- 1 | //! Retrieves a list of features enabled by the relayer. 2 | //! 3 | //! # Deprecated 4 | //! 5 | //! This method is deprecated. It is recommended to use more fine-grained methods for feature 6 | //! detection. 7 | //! 8 | //! # Description 9 | //! 10 | //! This function returns a list of enabled features on the relayer. 11 | //! 12 | //! # Returns 13 | //! 14 | //! On success, returns a vector of strings where each string represents an enabled feature 15 | //! (e.g., "gasless"). 16 | use crate::{ 17 | jobs::JobProducerTrait, 18 | models::{GetFeaturesEnabledRequestParams, GetFeaturesEnabledResult}, 19 | services::{JupiterServiceTrait, SolanaProviderTrait, SolanaSignTrait}, 20 | }; 21 | 22 | use super::*; 23 | 24 | impl SolanaRpcMethodsImpl 25 | where 26 | P: SolanaProviderTrait + Send + Sync, 27 | S: SolanaSignTrait + Send + Sync, 28 | J: JupiterServiceTrait + Send + Sync, 29 | JP: JobProducerTrait + Send + Sync, 30 | { 31 | pub(crate) async fn get_features_enabled_impl( 32 | &self, 33 | _params: GetFeaturesEnabledRequestParams, 34 | ) -> Result { 35 | // gasless is enabled out of the box to be compliant with the spec 36 | Ok(GetFeaturesEnabledResult { 37 | features: vec!["gasless".to_string()], 38 | }) 39 | } 40 | } 41 | 42 | #[cfg(test)] 43 | mod tests { 44 | use super::*; 45 | 46 | #[tokio::test] 47 | async fn test_get_features_enabled() { 48 | let (relayer, signer, provider, jupiter_service, _, job_producer) = setup_test_context(); 49 | 50 | let rpc = SolanaRpcMethodsImpl::new_mock( 51 | relayer, 52 | Arc::new(provider), 53 | Arc::new(signer), 54 | Arc::new(jupiter_service), 55 | Arc::new(job_producer), 56 | ); 57 | 58 | let result = rpc 59 | .get_features_enabled_impl(GetFeaturesEnabledRequestParams {}) 60 | .await; 61 | 62 | assert!(result.is_ok(), "Should return Ok result"); 63 | 64 | let features = result.unwrap().features; 65 | assert_eq!(features.len(), 1, "Should return exactly one feature"); 66 | assert_eq!( 67 | features[0], "gasless", 68 | "Should return 'gasless' as enabled feature" 69 | ); 70 | 71 | println!("Enabled features: {:?}", features); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/domain/relayer/solana/rpc/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module implements the Solana RPC functionality. 2 | 3 | mod methods; 4 | pub use methods::*; 5 | 6 | mod handler; 7 | pub use handler::*; 8 | 9 | use log::error; 10 | use thiserror::Error; 11 | 12 | use crate::{ 13 | models::{SignerError, SolanaEncodingError}, 14 | services::SolanaProviderError, 15 | }; 16 | 17 | use super::TokenError; 18 | 19 | #[derive(Debug, Error)] 20 | #[allow(dead_code)] 21 | pub enum SolanaRpcError { 22 | #[error("Unsupported method: {0}")] 23 | UnsupportedMethod(String), 24 | #[error("BadRequest: {0}")] 25 | BadRequest(String), 26 | #[error("Feature fetch error: {0}")] 27 | FeatureFetch(String), 28 | #[error("Invalid params: {0}")] 29 | InvalidParams(String), 30 | #[error("Unsupported Fee token error: {0}")] 31 | UnsupportedFeeToken(String), 32 | #[error("Estimation Error: {0}")] 33 | Estimation(String), 34 | #[error("Insufficient funds: {0}")] 35 | InsufficientFunds(String), 36 | #[error("Transaction preparation error: {0}")] 37 | TransactionPreparation(String), 38 | #[error("Preparation error: {0}")] 39 | Preparation(String), 40 | #[error("Signature error: {0}")] 41 | Signature(String), 42 | #[error("Token fetch error: {0}")] 43 | TokenFetch(String), 44 | #[error("Token Account error: {0}")] 45 | TokenAccount(String), 46 | #[error("Send error: {0}")] 47 | Send(String), 48 | #[error("Transaction validation error: {0}")] 49 | SolanaTransactionValidation(#[from] SolanaTransactionValidationError), 50 | #[error("Signing error: {0}")] 51 | Signing(#[from] SignerError), 52 | #[error("Encoding error: {0}")] 53 | Encoding(#[from] SolanaEncodingError), 54 | #[error("Provider error: {0}")] 55 | Provider(#[from] SolanaProviderError), 56 | #[error("Token error: {0}")] 57 | Token(#[from] TokenError), 58 | #[error("Internal error: {0}")] 59 | Internal(String), 60 | } 61 | -------------------------------------------------------------------------------- /src/domain/relayer/stellar/mod.rs: -------------------------------------------------------------------------------- 1 | mod stellar_relayer; 2 | pub use stellar_relayer::*; 3 | -------------------------------------------------------------------------------- /src/domain/transaction/evm/mod.rs: -------------------------------------------------------------------------------- 1 | /// This module provides functionality related to Ethereum Virtual Machine (EVM) transactions. 2 | /// It includes the core transaction logic and utility functions for handling EVM transactions. 3 | pub mod evm_transaction; 4 | pub use evm_transaction::*; 5 | 6 | pub mod price_calculator; 7 | pub use price_calculator::*; 8 | 9 | mod utils; 10 | pub use utils::*; 11 | 12 | pub mod status; 13 | -------------------------------------------------------------------------------- /src/domain/transaction/solana/mod.rs: -------------------------------------------------------------------------------- 1 | mod solana_transaction; 2 | pub use solana_transaction::*; 3 | -------------------------------------------------------------------------------- /src/domain/transaction/solana/solana_transaction.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use eyre::Result; 3 | use log::info; 4 | use std::sync::Arc; 5 | 6 | use crate::{ 7 | domain::transaction::Transaction, 8 | jobs::JobProducer, 9 | models::{RelayerRepoModel, TransactionError, TransactionRepoModel}, 10 | repositories::{ 11 | InMemoryRelayerRepository, InMemoryTransactionRepository, RelayerRepositoryStorage, 12 | }, 13 | services::SolanaProvider, 14 | }; 15 | 16 | #[allow(dead_code)] 17 | pub struct SolanaRelayerTransaction { 18 | relayer: RelayerRepoModel, 19 | provider: Arc, 20 | relayer_repository: Arc>, 21 | transaction_repository: Arc, 22 | job_producer: Arc, 23 | } 24 | 25 | #[allow(dead_code)] 26 | impl SolanaRelayerTransaction { 27 | pub fn new( 28 | relayer: RelayerRepoModel, 29 | relayer_repository: Arc>, 30 | provider: Arc, 31 | transaction_repository: Arc, 32 | job_producer: Arc, 33 | ) -> Result { 34 | Ok(Self { 35 | relayer_repository, 36 | provider, 37 | transaction_repository, 38 | relayer, 39 | job_producer, 40 | }) 41 | } 42 | } 43 | 44 | #[async_trait] 45 | impl Transaction for SolanaRelayerTransaction { 46 | async fn prepare_transaction( 47 | &self, 48 | tx: TransactionRepoModel, 49 | ) -> Result { 50 | info!("preparing transaction"); 51 | Ok(tx) 52 | } 53 | 54 | async fn submit_transaction( 55 | &self, 56 | tx: TransactionRepoModel, 57 | ) -> Result { 58 | info!("submitting transaction"); 59 | Ok(tx) 60 | } 61 | 62 | async fn resubmit_transaction( 63 | &self, 64 | tx: TransactionRepoModel, 65 | ) -> Result { 66 | info!("resubmitting transaction"); 67 | // For now, just call submit_transaction as Solana implementation is a stub 68 | self.submit_transaction(tx).await 69 | } 70 | 71 | async fn handle_transaction_status( 72 | &self, 73 | tx: TransactionRepoModel, 74 | ) -> Result { 75 | Ok(tx) 76 | } 77 | 78 | async fn cancel_transaction( 79 | &self, 80 | tx: TransactionRepoModel, 81 | ) -> Result { 82 | Ok(tx) 83 | } 84 | 85 | async fn replace_transaction( 86 | &self, 87 | tx: TransactionRepoModel, 88 | ) -> Result { 89 | Ok(tx) 90 | } 91 | 92 | async fn sign_transaction( 93 | &self, 94 | tx: TransactionRepoModel, 95 | ) -> Result { 96 | Ok(tx) 97 | } 98 | 99 | async fn validate_transaction( 100 | &self, 101 | _tx: TransactionRepoModel, 102 | ) -> Result { 103 | Ok(true) 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /src/domain/transaction/stellar/mod.rs: -------------------------------------------------------------------------------- 1 | mod stellar_transaction; 2 | pub use stellar_transaction::*; 3 | 4 | mod prepare; 5 | 6 | mod submit; 7 | 8 | mod status; 9 | 10 | mod utils; 11 | pub use utils::*; 12 | 13 | mod lane_gate; 14 | pub use lane_gate::*; 15 | 16 | #[cfg(test)] 17 | pub mod test_helpers; 18 | -------------------------------------------------------------------------------- /src/domain/transaction/stellar/utils.rs: -------------------------------------------------------------------------------- 1 | //! Utility functions for Stellar transaction domain logic. 2 | use crate::models::OperationSpec; 3 | use crate::models::RelayerError; 4 | 5 | /// Returns true if any operation is not a Payment operation. 6 | pub fn needs_simulation(operations: &[OperationSpec]) -> bool { 7 | operations 8 | .iter() 9 | .any(|op| !matches!(op, OperationSpec::Payment { .. })) 10 | } 11 | 12 | pub fn next_sequence_u64(seq_num: i64) -> Result { 13 | let next_i64 = seq_num 14 | .checked_add(1) 15 | .ok_or_else(|| RelayerError::ProviderError("sequence overflow".into()))?; 16 | u64::try_from(next_i64) 17 | .map_err(|_| RelayerError::ProviderError("sequence overflows u64".into())) 18 | } 19 | 20 | pub fn i64_from_u64(value: u64) -> Result { 21 | i64::try_from(value).map_err(|_| RelayerError::ProviderError("u64→i64 overflow".into())) 22 | } 23 | 24 | #[cfg(test)] 25 | mod tests { 26 | use super::*; 27 | use crate::models::AssetSpec; 28 | 29 | const TEST_PK: &str = "GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWHF"; 30 | 31 | fn payment_op(destination: &str) -> OperationSpec { 32 | OperationSpec::Payment { 33 | destination: destination.to_string(), 34 | amount: 100, 35 | asset: AssetSpec::Native, 36 | } 37 | } 38 | 39 | #[test] 40 | fn returns_false_for_only_payment_ops() { 41 | let ops = vec![payment_op(TEST_PK)]; 42 | assert!(!needs_simulation(&ops)); 43 | } 44 | 45 | mod next_sequence_u64_tests { 46 | use super::*; 47 | 48 | #[test] 49 | fn test_increment() { 50 | assert_eq!(next_sequence_u64(0).unwrap(), 1); 51 | 52 | assert_eq!(next_sequence_u64(12345).unwrap(), 12346); 53 | } 54 | 55 | #[test] 56 | fn test_error_path_overflow_i64_max() { 57 | let result = next_sequence_u64(i64::MAX); 58 | assert!(result.is_err()); 59 | match result.unwrap_err() { 60 | RelayerError::ProviderError(msg) => assert_eq!(msg, "sequence overflow"), 61 | _ => panic!("Unexpected error type"), 62 | } 63 | } 64 | } 65 | 66 | mod i64_from_u64_tests { 67 | use super::*; 68 | 69 | #[test] 70 | fn test_happy_path_conversion() { 71 | assert_eq!(i64_from_u64(0).unwrap(), 0); 72 | assert_eq!(i64_from_u64(12345).unwrap(), 12345); 73 | assert_eq!(i64_from_u64(i64::MAX as u64).unwrap(), i64::MAX); 74 | } 75 | 76 | #[test] 77 | fn test_error_path_overflow_u64_max() { 78 | let result = i64_from_u64(u64::MAX); 79 | assert!(result.is_err()); 80 | match result.unwrap_err() { 81 | RelayerError::ProviderError(msg) => assert_eq!(msg, "u64→i64 overflow"), 82 | _ => panic!("Unexpected error type"), 83 | } 84 | } 85 | 86 | #[test] 87 | fn test_edge_case_just_above_i64_max() { 88 | // Smallest u64 value that will overflow i64 89 | let value = (i64::MAX as u64) + 1; 90 | let result = i64_from_u64(value); 91 | assert!(result.is_err()); 92 | match result.unwrap_err() { 93 | RelayerError::ProviderError(msg) => assert_eq!(msg, "u64→i64 overflow"), 94 | _ => panic!("Unexpected error type"), 95 | } 96 | } 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /src/jobs/handlers/mod.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use apalis::prelude::{Attempt, Error}; 4 | use eyre::Report; 5 | 6 | mod transaction_request_handler; 7 | use log::info; 8 | pub use transaction_request_handler::*; 9 | 10 | mod transaction_submission_handler; 11 | pub use transaction_submission_handler::*; 12 | 13 | mod notification_handler; 14 | pub use notification_handler::*; 15 | 16 | mod transaction_status_handler; 17 | pub use transaction_status_handler::*; 18 | 19 | mod solana_swap_request_handler; 20 | pub use solana_swap_request_handler::*; 21 | 22 | pub fn handle_result( 23 | result: Result<(), Report>, 24 | attempt: Attempt, 25 | job_type: &str, 26 | max_attempts: usize, 27 | ) -> Result<(), Error> { 28 | if result.is_ok() { 29 | info!("{} request handled successfully", job_type); 30 | return Ok(()); 31 | } 32 | info!("{} request failed: {:?}", job_type, result); 33 | 34 | if attempt.current() >= max_attempts { 35 | info!("Max attempts ({}) reached, failing job", max_attempts); 36 | Err(Error::Abort(Arc::new("Failed to handle request".into())))? 37 | } 38 | 39 | Err(Error::Failed(Arc::new( 40 | "Failed to handle request. Retrying".into(), 41 | )))? 42 | } 43 | 44 | #[cfg(test)] 45 | mod tests { 46 | use super::*; 47 | use apalis::prelude::Attempt; 48 | 49 | #[test] 50 | fn test_handle_result_success() { 51 | let result: Result<(), Report> = Ok(()); 52 | let attempt = Attempt::default(); 53 | 54 | let handled = handle_result(result, attempt, "test_job", 3); 55 | assert!(handled.is_ok()); 56 | } 57 | 58 | #[test] 59 | fn test_handle_result_retry() { 60 | let result: Result<(), Report> = Err(Report::msg("Test error")); 61 | let attempt = Attempt::default(); 62 | 63 | let handled = handle_result(result, attempt, "test_job", 3); 64 | 65 | assert!(handled.is_err()); 66 | match handled { 67 | Err(Error::Failed(_)) => { 68 | // This is the expected error type for a retry 69 | } 70 | _ => panic!("Expected Failed error for retry"), 71 | } 72 | } 73 | 74 | #[test] 75 | fn test_handle_result_abort() { 76 | let result: Result<(), Report> = Err(Report::msg("Test error")); 77 | let attempt = Attempt::default(); 78 | for _ in 0..3 { 79 | attempt.increment(); 80 | } 81 | 82 | let handled = handle_result(result, attempt, "test_job", 3); 83 | 84 | assert!(handled.is_err()); 85 | match handled { 86 | Err(Error::Abort(_)) => { 87 | // This is the expected error type for an abort 88 | } 89 | _ => panic!("Expected Abort error for max attempts"), 90 | } 91 | } 92 | 93 | #[test] 94 | fn test_handle_result_max_attempts_exceeded() { 95 | let result: Result<(), Report> = Err(Report::msg("Test error")); 96 | let attempt = Attempt::default(); 97 | for _ in 0..5 { 98 | attempt.increment(); 99 | } 100 | 101 | let handled = handle_result(result, attempt, "test_job", 3); 102 | 103 | assert!(handled.is_err()); 104 | match handled { 105 | Err(Error::Abort(_)) => { 106 | // This is the expected error type for exceeding max attempts 107 | } 108 | _ => panic!("Expected Abort error for exceeding max attempts"), 109 | } 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /src/jobs/handlers/solana_swap_request_handler.rs: -------------------------------------------------------------------------------- 1 | //! Solana swap request handling worker implementation. 2 | //! 3 | //! This module implements the solana token swap request handling worker that processes 4 | //! notification jobs from the queue. 5 | 6 | use actix_web::web::ThinData; 7 | use apalis::prelude::{Attempt, Data, *}; 8 | use eyre::Result; 9 | use log::info; 10 | 11 | use crate::{ 12 | constants::WORKER_DEFAULT_MAXIMUM_RETRIES, 13 | domain::{create_solana_relayer, get_relayer_by_id, SolanaRelayerDexTrait}, 14 | jobs::{handle_result, Job, JobProducer, SolanaTokenSwapRequest}, 15 | models::AppState, 16 | repositories::Repository, 17 | }; 18 | 19 | /// Handles incoming swap jobs from the queue. 20 | /// 21 | /// # Arguments 22 | /// * `job` - The notification job containing recipient and message details 23 | /// * `context` - Application state containing notification services 24 | /// 25 | /// # Returns 26 | /// * `Result<(), Error>` - Success or failure of notification processing 27 | pub async fn solana_token_swap_request_handler( 28 | job: Job, 29 | context: Data>>, 30 | attempt: Attempt, 31 | ) -> Result<(), Error> { 32 | info!("handling solana token swap request: {:?}", job.data); 33 | 34 | let result = handle_request(job.data, context).await; 35 | 36 | handle_result( 37 | result, 38 | attempt, 39 | "SolanaTokenSwapRequest", 40 | WORKER_DEFAULT_MAXIMUM_RETRIES, 41 | ) 42 | } 43 | 44 | #[derive(Default, Debug, Clone)] 45 | pub struct CronReminder(); 46 | 47 | /// Handles incoming swap jobs from the cron queue. 48 | pub async fn solana_token_swap_cron_handler( 49 | job: CronReminder, 50 | relayer_id: Data, 51 | data: Data>>, 52 | attempt: Attempt, 53 | ) -> Result<(), Error> { 54 | info!("handling solana token swap cron request: {:?}", job); 55 | 56 | let result = handle_request( 57 | SolanaTokenSwapRequest { 58 | relayer_id: relayer_id.to_string(), 59 | }, 60 | data, 61 | ) 62 | .await; 63 | 64 | handle_result( 65 | result, 66 | attempt, 67 | "SolanaTokenSwapRequest", 68 | WORKER_DEFAULT_MAXIMUM_RETRIES, 69 | ) 70 | } 71 | 72 | async fn handle_request( 73 | request: SolanaTokenSwapRequest, 74 | context: Data>>, 75 | ) -> Result<()> { 76 | info!("handling solana token swap request: {:?}", request); 77 | 78 | let relayer_model = get_relayer_by_id(request.relayer_id.clone(), &context).await?; 79 | let signer_model = context 80 | .signer_repository 81 | .get_by_id(relayer_model.signer_id.clone()) 82 | .await?; 83 | 84 | let relayer = create_solana_relayer( 85 | relayer_model, 86 | signer_model, 87 | context.relayer_repository(), 88 | context.network_repository(), 89 | context.transaction_repository(), 90 | context.job_producer(), 91 | ) 92 | .await?; 93 | 94 | relayer 95 | .handle_token_swap_request(request.relayer_id.clone()) 96 | .await 97 | .map_err(|e| eyre::eyre!("Failed to handle solana token swap request: {}", e))?; 98 | 99 | Ok(()) 100 | } 101 | 102 | #[cfg(test)] 103 | mod tests {} 104 | -------------------------------------------------------------------------------- /src/jobs/handlers/transaction_request_handler.rs: -------------------------------------------------------------------------------- 1 | //! Transaction request handler for processing incoming transaction jobs. 2 | //! 3 | //! Handles the validation and preparation of transactions before they are 4 | //! submitted to the network 5 | use actix_web::web::ThinData; 6 | use apalis::prelude::{Attempt, Context, Data, TaskId, Worker, *}; 7 | use apalis_redis::RedisContext; 8 | use eyre::Result; 9 | use log::info; 10 | 11 | use crate::{ 12 | constants::WORKER_DEFAULT_MAXIMUM_RETRIES, 13 | domain::{get_relayer_transaction, get_transaction_by_id, Transaction}, 14 | jobs::{handle_result, Job, JobProducer, TransactionRequest}, 15 | models::AppState, 16 | }; 17 | 18 | pub async fn transaction_request_handler( 19 | job: Job, 20 | state: Data>>, 21 | attempt: Attempt, 22 | worker: Worker, 23 | task_id: TaskId, 24 | ctx: RedisContext, 25 | ) -> Result<(), Error> { 26 | info!("Handling transaction request: {:?}", job.data); 27 | info!("Attempt: {:?}", attempt); 28 | info!("Worker: {:?}", worker); 29 | info!("Task ID: {:?}", task_id); 30 | info!("Context: {:?}", ctx); 31 | 32 | let result = handle_request(job.data, state).await; 33 | 34 | handle_result( 35 | result, 36 | attempt, 37 | "Transaction Request", 38 | WORKER_DEFAULT_MAXIMUM_RETRIES, 39 | ) 40 | } 41 | 42 | async fn handle_request( 43 | request: TransactionRequest, 44 | state: Data>>, 45 | ) -> Result<()> { 46 | let relayer_transaction = get_relayer_transaction(request.relayer_id, &state).await?; 47 | 48 | let transaction = get_transaction_by_id(request.transaction_id, &state).await?; 49 | 50 | relayer_transaction.prepare_transaction(transaction).await?; 51 | 52 | info!("Transaction request handled successfully"); 53 | 54 | Ok(()) 55 | } 56 | 57 | #[cfg(test)] 58 | mod tests { 59 | use super::*; 60 | use apalis::prelude::Attempt; 61 | 62 | #[tokio::test] 63 | async fn test_handler_result_processing() { 64 | // This test focuses only on the interaction with handle_result 65 | // which we can test without mocking the entire state 66 | 67 | // Create a minimal job 68 | let request = TransactionRequest::new("tx123", "relayer-1"); 69 | let job = Job::new(crate::jobs::JobType::TransactionRequest, request); 70 | 71 | // Create a test attempt 72 | let attempt = Attempt::default(); 73 | 74 | // We cannot fully test the transaction_request_handler without extensive mocking 75 | // of the domain layer, but we can verify our test setup is correct 76 | assert_eq!(job.data.transaction_id, "tx123"); 77 | assert_eq!(job.data.relayer_id, "relayer-1"); 78 | assert_eq!(attempt.current(), 0); 79 | } 80 | 81 | // Note: Fully testing the functionality would require either: 82 | // 1. Dependency injection for all external dependencies 83 | // 2. Feature flags to enable mock implementations 84 | // 3. Integration tests with a real or test database 85 | 86 | // For now, these tests serve as placeholders to be expanded 87 | // when the appropriate testing infrastructure is in place. 88 | } 89 | -------------------------------------------------------------------------------- /src/jobs/handlers/transaction_status_handler.rs: -------------------------------------------------------------------------------- 1 | //! Transaction status monitoring handler. 2 | //! 3 | //! Monitors the status of submitted transactions by: 4 | //! - Checking transaction status on the network 5 | //! - Updating transaction status in storage 6 | //! - Triggering notifications on status changes 7 | use actix_web::web::ThinData; 8 | use apalis::prelude::{Attempt, Data, *}; 9 | 10 | use eyre::Result; 11 | use log::info; 12 | 13 | use crate::{ 14 | constants::WORKER_DEFAULT_MAXIMUM_RETRIES, 15 | domain::{get_relayer_transaction, get_transaction_by_id, Transaction}, 16 | jobs::{handle_result, Job, JobProducer, TransactionStatusCheck}, 17 | models::AppState, 18 | }; 19 | 20 | pub async fn transaction_status_handler( 21 | job: Job, 22 | state: Data>>, 23 | attempt: Attempt, 24 | ) -> Result<(), Error> { 25 | info!("Handling transaction status job: {:?}", job.data); 26 | 27 | let result = handle_request(job.data, state).await; 28 | 29 | handle_result( 30 | result, 31 | attempt, 32 | "Transaction Status", 33 | WORKER_DEFAULT_MAXIMUM_RETRIES, 34 | ) 35 | } 36 | 37 | async fn handle_request( 38 | status_request: TransactionStatusCheck, 39 | state: Data>>, 40 | ) -> Result<()> { 41 | let relayer_transaction = 42 | get_relayer_transaction(status_request.relayer_id.clone(), &state).await?; 43 | 44 | let transaction = get_transaction_by_id(status_request.transaction_id, &state).await?; 45 | 46 | relayer_transaction 47 | .handle_transaction_status(transaction) 48 | .await?; 49 | 50 | info!("Status check handled successfully"); 51 | 52 | Ok(()) 53 | } 54 | 55 | #[cfg(test)] 56 | mod tests { 57 | use super::*; 58 | use apalis::prelude::Attempt; 59 | use std::collections::HashMap; 60 | 61 | #[tokio::test] 62 | async fn test_status_check_job_validation() { 63 | // Create a basic status check job 64 | let check_job = TransactionStatusCheck::new("tx123", "relayer-1"); 65 | let job = Job::new(crate::jobs::JobType::TransactionStatusCheck, check_job); 66 | 67 | // Validate the job data 68 | assert_eq!(job.data.transaction_id, "tx123"); 69 | assert_eq!(job.data.relayer_id, "relayer-1"); 70 | assert!(job.data.metadata.is_none()); 71 | } 72 | 73 | #[tokio::test] 74 | async fn test_status_check_with_metadata() { 75 | // Create a job with retry metadata 76 | let mut metadata = HashMap::new(); 77 | metadata.insert("retry_count".to_string(), "2".to_string()); 78 | metadata.insert("last_status".to_string(), "pending".to_string()); 79 | 80 | let check_job = 81 | TransactionStatusCheck::new("tx123", "relayer-1").with_metadata(metadata.clone()); 82 | 83 | // Validate the metadata 84 | assert!(check_job.metadata.is_some()); 85 | let job_metadata = check_job.metadata.unwrap(); 86 | assert_eq!(job_metadata.get("retry_count").unwrap(), "2"); 87 | assert_eq!(job_metadata.get("last_status").unwrap(), "pending"); 88 | } 89 | 90 | #[tokio::test] 91 | async fn test_status_handler_attempt_tracking() { 92 | // Create attempts with different retry counts 93 | let first_attempt = Attempt::default(); 94 | assert_eq!(first_attempt.current(), 0); 95 | 96 | let second_attempt = Attempt::default(); 97 | second_attempt.increment(); 98 | assert_eq!(second_attempt.current(), 1); 99 | 100 | let final_attempt = Attempt::default(); 101 | for _ in 0..WORKER_DEFAULT_MAXIMUM_RETRIES { 102 | final_attempt.increment(); 103 | } 104 | assert_eq!(final_attempt.current(), WORKER_DEFAULT_MAXIMUM_RETRIES); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/jobs/mod.rs: -------------------------------------------------------------------------------- 1 | /// This module handles job queue operations. 2 | mod queue; 3 | pub use queue::*; 4 | 5 | /// This module contains handlers for processing jobs. 6 | mod handlers; 7 | pub use handlers::*; 8 | 9 | /// This module is responsible for producing jobs. 10 | mod job_producer; 11 | pub use job_producer::*; 12 | 13 | /// This module defines the job structure and related operations. 14 | mod job; 15 | pub use job::*; 16 | 17 | /// This module implements retry backoff strategies for job processing. 18 | mod retry_backoff; 19 | pub use retry_backoff::*; 20 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Blockchain Transaction Service Library 2 | //! 3 | //! This library provides functionality for sending/broadcasting transactions to blockchain networks 4 | //! and triggering notifications based on transaction events. It includes: 5 | //! 6 | //! - Configuration management through JSON files 7 | //! - Blockchain network transaction broadcasting 8 | //! - Customizable webhook notifications 9 | //! - Extensible repository and service architecture 10 | //! 11 | //! # Module Structure 12 | //! 13 | //! - `api`: API routes and handlers 14 | //! - `bootstrap`: Bootstrap and initialization 15 | //! - `config`: Configuration management 16 | //! - `constants`: Constants and environment variables 17 | //! - `domain`: Domain-specific logic 18 | //! - `jobs`: Job scheduling and execution 19 | //! - `logging`: Logging and tracing 20 | //! - `metrics`: Metrics and monitoring 21 | //! - `models`: Data structures for configuration and blockchain data 22 | //! - `repositories`: Configuration storage and management 23 | //! - `services`: Core business logic and blockchain interaction 24 | //! - `utils`: Common utilities and helper functions 25 | 26 | pub mod api; 27 | pub mod bootstrap; 28 | pub mod config; 29 | pub mod constants; 30 | pub mod domain; 31 | pub mod jobs; 32 | pub mod logging; 33 | pub mod metrics; 34 | pub mod models; 35 | pub mod openapi; 36 | pub mod repositories; 37 | pub mod services; 38 | pub mod utils; 39 | -------------------------------------------------------------------------------- /src/metrics/README.md: -------------------------------------------------------------------------------- 1 | # Metrics 2 | 3 | - This folder contains middleware that is used to intercept the requests for all the endpoints as well as the definition of the metrics that are collected. 4 | 5 | - Metrics server is started on port `8081` which collects the metrics from the relayer app and exposes them on the `/metrics` endpoint. 6 | 7 | - We use `prometheus` to collect metrics from the application. The list of metrics are exposed on the `/metrics` endpoint. 8 | 9 | - For details on specific metrics you can call them on the `/metrics/{metric_name}` endpoint. 10 | 11 | - To view prometheus metrics in a UI, you can use `http://localhost:9090` on your browser. 12 | 13 | - To view grafana dashboard, you can use `http://localhost:3000` on your browser. 14 | -------------------------------------------------------------------------------- /src/models/address.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | #[derive(Debug, Clone, PartialEq, Eq)] 4 | #[allow(dead_code)] 5 | pub enum Address { 6 | /// Ethereum-like address (20 bytes) 7 | Evm([u8; 20]), 8 | /// Stellar address (Base32-encoded string) 9 | Stellar(String), 10 | /// Solana address (Base58-encoded string) 11 | Solana(String), 12 | } 13 | 14 | impl fmt::Display for Address { 15 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 16 | match self { 17 | Address::Evm(addr) => write!(f, "0x{}", hex::encode(addr)), 18 | Address::Stellar(addr) => write!(f, "{}", addr), 19 | Address::Solana(addr) => write!(f, "{}", addr), 20 | } 21 | } 22 | } 23 | 24 | impl Address { 25 | /// Validates an address based on the type 26 | #[allow(dead_code)] 27 | pub fn validate(&self) -> bool { 28 | match self { 29 | Address::Evm(addr) => addr.len() == 20, 30 | Address::Stellar(addr) => { 31 | addr.len() <= 56 && addr.chars().all(|c| c.is_ascii_alphanumeric() || c == '=') 32 | } 33 | Address::Solana(addr) => { 34 | addr.len() <= 44 && addr.chars().all(|c| c.is_ascii_alphanumeric()) 35 | } 36 | } 37 | } 38 | } 39 | 40 | #[cfg(test)] 41 | mod tests { 42 | use super::*; 43 | 44 | #[test] 45 | fn test_evm_address_display() { 46 | let address = Address::Evm([ 47 | 200, 52, 220, 220, 154, 7, 77, 187, 173, 204, 113, 88, 71, 137, 174, 75, 70, 61, 177, 48 | 22, 49 | ]); 50 | assert_eq!( 51 | address.to_string(), 52 | "0xc834dcdc9a074dbbadcc71584789ae4b463db116" 53 | ); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/models/error/address.rs: -------------------------------------------------------------------------------- 1 | use serde::Serialize; 2 | use thiserror::Error; 3 | 4 | use super::{SignerError, TransactionError}; 5 | 6 | #[derive(Error, Debug, Serialize)] 7 | pub enum AddressError { 8 | #[error("Address conversion error: {0}")] 9 | ConversionError(String), 10 | } 11 | 12 | impl From for SignerError { 13 | fn from(err: AddressError) -> Self { 14 | SignerError::SigningError(err.to_string()) 15 | } 16 | } 17 | 18 | impl From for TransactionError { 19 | fn from(err: AddressError) -> Self { 20 | TransactionError::ValidationError(err.to_string()) 21 | } 22 | } 23 | 24 | #[cfg(test)] 25 | mod tests { 26 | use super::*; 27 | 28 | #[test] 29 | fn test_address_error_creation() { 30 | let error = AddressError::ConversionError("Invalid format".to_string()); 31 | assert!(matches!(error, AddressError::ConversionError(_))); 32 | assert_eq!( 33 | error.to_string(), 34 | "Address conversion error: Invalid format" 35 | ); 36 | } 37 | 38 | #[test] 39 | fn test_conversion_to_signer_error() { 40 | let address_error = AddressError::ConversionError("Invalid format".to_string()); 41 | let signer_error: SignerError = address_error.into(); 42 | 43 | assert!(matches!(signer_error, SignerError::SigningError(_))); 44 | assert_eq!( 45 | signer_error.to_string(), 46 | "Failed to sign transaction: Address conversion error: Invalid format" 47 | ); 48 | } 49 | 50 | #[test] 51 | fn test_conversion_to_transaction_error() { 52 | let address_error = AddressError::ConversionError("Invalid format".to_string()); 53 | let transaction_error: TransactionError = address_error.into(); 54 | 55 | assert!(matches!( 56 | transaction_error, 57 | TransactionError::ValidationError(_) 58 | )); 59 | assert_eq!( 60 | transaction_error.to_string(), 61 | "Transaction validation error: Address conversion error: Invalid format" 62 | ); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/models/error/mod.rs: -------------------------------------------------------------------------------- 1 | mod api; 2 | pub use api::*; 3 | 4 | mod repository; 5 | pub use repository::*; 6 | 7 | mod relayer; 8 | pub use relayer::*; 9 | 10 | mod transaction; 11 | pub use transaction::*; 12 | 13 | mod network; 14 | pub use network::*; 15 | 16 | mod signer; 17 | pub use signer::*; 18 | 19 | mod address; 20 | pub use address::*; 21 | 22 | mod provider; 23 | pub use provider::*; 24 | -------------------------------------------------------------------------------- /src/models/error/network.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | #[derive(Debug, Error)] 4 | pub enum NetworkError { 5 | #[error("Invalid network: {0}")] 6 | InvalidNetwork(String), 7 | } 8 | 9 | #[cfg(test)] 10 | mod tests { 11 | use super::*; 12 | 13 | #[test] 14 | fn test_network_error_creation() { 15 | let error = NetworkError::InvalidNetwork("ethereum".to_string()); 16 | assert!(matches!(error, NetworkError::InvalidNetwork(_))); 17 | } 18 | 19 | #[test] 20 | fn test_network_error_display() { 21 | let error = NetworkError::InvalidNetwork("polygon".to_string()); 22 | assert_eq!(error.to_string(), "Invalid network: polygon"); 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/models/error/provider.rs: -------------------------------------------------------------------------------- 1 | use serde::Serialize; 2 | use thiserror::Error; 3 | 4 | #[derive(Error, Debug, Serialize)] 5 | pub enum StellarProviderError { 6 | #[error("RPC client error: {0}")] 7 | RpcError(String), 8 | #[error("Simulation failed: {0}")] 9 | SimulationFailed(String), 10 | #[error("Insufficient balance: {0}")] 11 | InsufficientBalance(String), 12 | #[error("Bad sequence number: {0}")] 13 | BadSeq(String), 14 | #[error("Unknown error: {0}")] 15 | Unknown(String), 16 | } 17 | 18 | impl From for StellarProviderError { 19 | fn from(err: eyre::Report) -> Self { 20 | StellarProviderError::RpcError(err.to_string()) 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/models/error/repository.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | use crate::models::ApiError; 4 | 5 | #[derive(Debug, Error)] 6 | pub enum RepositoryError { 7 | #[error("Entity not found: {0}")] 8 | NotFound(String), 9 | 10 | #[error("Entity already exists: {0}")] 11 | LockError(String), 12 | 13 | #[error("Failed to connect to the database: {0}")] 14 | ConnectionError(String), 15 | 16 | #[error("Constraint violated: {0}")] 17 | ConstraintViolation(String), 18 | 19 | #[error("Invalid data: {0}")] 20 | InvalidData(String), 21 | 22 | #[error("Transaction failure: {0}")] 23 | TransactionFailure(String), 24 | 25 | #[error("Transaction validation failed: {0}")] 26 | TransactionValidationFailed(String), 27 | 28 | #[error("Permission denied: {0}")] 29 | PermissionDenied(String), 30 | 31 | #[error("An unknown error occurred: {0}")] 32 | Unknown(String), 33 | 34 | #[error("Not supported: {0}")] 35 | NotSupported(String), 36 | } 37 | 38 | impl From for ApiError { 39 | fn from(error: RepositoryError) -> Self { 40 | match error { 41 | RepositoryError::NotFound(msg) => ApiError::NotFound(msg), 42 | RepositoryError::Unknown(msg) => ApiError::InternalError(msg), 43 | _ => ApiError::InternalError("An unknown error occurred".to_string()), 44 | } 45 | } 46 | } 47 | 48 | #[cfg(test)] 49 | mod tests { 50 | use super::*; 51 | 52 | #[test] 53 | fn test_repository_error_to_api_error_not_found() { 54 | let repo_error = RepositoryError::NotFound("User not found".to_string()); 55 | let api_error = ApiError::from(repo_error); 56 | 57 | match api_error { 58 | ApiError::NotFound(msg) => assert_eq!(msg, "User not found"), 59 | _ => panic!("Expected ApiError::NotFound, got something else"), 60 | } 61 | } 62 | 63 | #[test] 64 | fn test_repository_error_to_api_error_unknown() { 65 | let repo_error = RepositoryError::Unknown("Database error".to_string()); 66 | let api_error = ApiError::from(repo_error); 67 | 68 | match api_error { 69 | ApiError::InternalError(msg) => assert_eq!(msg, "Database error"), 70 | _ => panic!("Expected ApiError::InternalError, got something else"), 71 | } 72 | } 73 | 74 | #[test] 75 | fn test_repository_error_to_api_error_other_errors() { 76 | let test_cases = vec![ 77 | RepositoryError::LockError("Lock error".to_string()), 78 | RepositoryError::ConnectionError("Connection error".to_string()), 79 | RepositoryError::ConstraintViolation("Constraint error".to_string()), 80 | RepositoryError::InvalidData("Invalid data".to_string()), 81 | RepositoryError::TransactionFailure("Transaction failed".to_string()), 82 | RepositoryError::TransactionValidationFailed("Validation failed".to_string()), 83 | RepositoryError::PermissionDenied("Permission denied".to_string()), 84 | RepositoryError::NotSupported("Not supported".to_string()), 85 | ]; 86 | 87 | for repo_error in test_cases { 88 | let api_error = ApiError::from(repo_error); 89 | 90 | match api_error { 91 | ApiError::InternalError(msg) => assert_eq!(msg, "An unknown error occurred"), 92 | _ => panic!("Expected ApiError::InternalError, got something else"), 93 | } 94 | } 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/models/mod.rs: -------------------------------------------------------------------------------- 1 | //! # Models Module 2 | //! 3 | //! Contains core data structures and type definitions for the relayer service. 4 | 5 | mod network; 6 | pub use network::*; 7 | 8 | mod app_state; 9 | pub use app_state::*; 10 | 11 | mod api_response; 12 | pub use api_response::*; 13 | 14 | mod transaction; 15 | pub use transaction::*; 16 | 17 | mod relayer; 18 | pub use relayer::*; 19 | 20 | mod error; 21 | pub use error::*; 22 | 23 | mod pagination; 24 | pub use pagination::*; 25 | 26 | mod signer; 27 | pub use signer::*; 28 | 29 | mod address; 30 | pub use address::*; 31 | 32 | mod notification; 33 | pub use notification::*; 34 | 35 | mod rpc; 36 | pub use rpc::*; 37 | 38 | mod types; 39 | pub use types::*; 40 | 41 | mod secret_string; 42 | pub use secret_string::*; 43 | 44 | mod plain_or_env_value; 45 | pub use plain_or_env_value::*; 46 | 47 | mod plugin; 48 | pub use plugin::*; 49 | -------------------------------------------------------------------------------- /src/models/network/evm/mod.rs: -------------------------------------------------------------------------------- 1 | mod network; 2 | 3 | pub use network::*; 4 | -------------------------------------------------------------------------------- /src/models/network/mod.rs: -------------------------------------------------------------------------------- 1 | mod evm; 2 | mod repository; 3 | mod solana; 4 | mod stellar; 5 | 6 | pub use evm::*; 7 | pub use repository::*; 8 | pub use solana::*; 9 | pub use stellar::*; 10 | -------------------------------------------------------------------------------- /src/models/network/solana/mod.rs: -------------------------------------------------------------------------------- 1 | mod network; 2 | 3 | pub use network::*; 4 | -------------------------------------------------------------------------------- /src/models/network/solana/network.rs: -------------------------------------------------------------------------------- 1 | use crate::models::{NetworkConfigData, NetworkRepoModel, RepositoryError}; 2 | use core::time::Duration; 3 | 4 | #[derive(Clone, PartialEq, Eq, Hash)] 5 | pub struct SolanaNetwork { 6 | /// Unique network identifier (e.g., "mainnet", "sepolia", "custom-devnet"). 7 | pub network: String, 8 | /// List of RPC endpoint URLs for connecting to the network. 9 | pub rpc_urls: Vec, 10 | /// List of Explorer endpoint URLs for connecting to the network. 11 | pub explorer_urls: Option>, 12 | /// Estimated average time between blocks in milliseconds. 13 | pub average_blocktime_ms: u64, 14 | /// Flag indicating if the network is a testnet. 15 | pub is_testnet: bool, 16 | /// List of arbitrary tags for categorizing or filtering networks. 17 | pub tags: Vec, 18 | } 19 | 20 | impl TryFrom for SolanaNetwork { 21 | type Error = RepositoryError; 22 | 23 | /// Converts a NetworkRepoModel to a SolanaNetwork. 24 | /// 25 | /// # Arguments 26 | /// * `network_repo` - The repository model to convert 27 | /// 28 | /// # Returns 29 | /// Result containing the SolanaNetwork if successful, or a RepositoryError 30 | fn try_from(network_repo: NetworkRepoModel) -> Result { 31 | match &network_repo.config { 32 | NetworkConfigData::Solana(solana_config) => { 33 | let common = &solana_config.common; 34 | 35 | let rpc_urls = common.rpc_urls.clone().ok_or_else(|| { 36 | RepositoryError::InvalidData(format!( 37 | "Solana network '{}' has no rpc_urls", 38 | network_repo.name 39 | )) 40 | })?; 41 | 42 | let average_blocktime_ms = common.average_blocktime_ms.ok_or_else(|| { 43 | RepositoryError::InvalidData(format!( 44 | "Solana network '{}' has no average_blocktime_ms", 45 | network_repo.name 46 | )) 47 | })?; 48 | 49 | Ok(SolanaNetwork { 50 | network: common.network.clone(), 51 | rpc_urls, 52 | explorer_urls: common.explorer_urls.clone(), 53 | average_blocktime_ms, 54 | is_testnet: common.is_testnet.unwrap_or(false), 55 | tags: common.tags.clone().unwrap_or_default(), 56 | }) 57 | } 58 | _ => Err(RepositoryError::InvalidData(format!( 59 | "Network '{}' is not a Solana network", 60 | network_repo.name 61 | ))), 62 | } 63 | } 64 | } 65 | 66 | impl SolanaNetwork { 67 | pub fn average_blocktime(&self) -> Option { 68 | Some(Duration::from_millis(self.average_blocktime_ms)) 69 | } 70 | 71 | pub fn public_rpc_urls(&self) -> Option<&[String]> { 72 | if self.rpc_urls.is_empty() { 73 | None 74 | } else { 75 | Some(&self.rpc_urls) 76 | } 77 | } 78 | 79 | pub fn explorer_urls(&self) -> Option<&[String]> { 80 | self.explorer_urls.as_deref() 81 | } 82 | 83 | pub fn is_testnet(&self) -> bool { 84 | self.is_testnet 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/models/network/stellar/mod.rs: -------------------------------------------------------------------------------- 1 | mod network; 2 | 3 | pub use network::*; 4 | -------------------------------------------------------------------------------- /src/models/notification/mod.rs: -------------------------------------------------------------------------------- 1 | mod webhook_notification; 2 | pub use webhook_notification::*; 3 | 4 | mod repository; 5 | pub use repository::*; 6 | -------------------------------------------------------------------------------- /src/models/notification/repository.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | use crate::models::SecretString; 4 | 5 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] 6 | #[serde(rename_all = "lowercase")] 7 | pub enum NotificationType { 8 | Webhook, 9 | } 10 | 11 | #[derive(Debug, Clone, Serialize)] 12 | pub struct NotificationRepoModel { 13 | pub id: String, 14 | pub notification_type: NotificationType, 15 | pub url: String, 16 | pub signing_key: Option, 17 | } 18 | -------------------------------------------------------------------------------- /src/models/pagination.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | use utoipa::ToSchema; 3 | 4 | #[derive(Debug, Deserialize, Clone, ToSchema)] 5 | pub struct PaginationQuery { 6 | #[serde(default = "default_page")] 7 | pub page: u32, 8 | #[serde(default = "default_per_page")] 9 | pub per_page: u32, 10 | } 11 | 12 | fn default_page() -> u32 { 13 | 1 14 | } 15 | fn default_per_page() -> u32 { 16 | 10 17 | } 18 | -------------------------------------------------------------------------------- /src/models/plugin.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, Clone)] 2 | pub struct PluginModel { 3 | /// Plugin ID 4 | pub id: String, 5 | /// Plugin path 6 | pub path: String, 7 | } 8 | -------------------------------------------------------------------------------- /src/models/relayer/mod.rs: -------------------------------------------------------------------------------- 1 | mod repository; 2 | pub use repository::*; 3 | 4 | mod response; 5 | pub use response::*; 6 | 7 | mod rpc_config; 8 | pub use rpc_config::*; 9 | -------------------------------------------------------------------------------- /src/models/rpc/evm/mod.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use utoipa::ToSchema; 3 | 4 | #[derive(Debug, Serialize, Deserialize, ToSchema, PartialEq)] 5 | #[serde(untagged)] 6 | pub enum EvmRpcResult { 7 | GenericRpcResult(String), 8 | } 9 | 10 | #[derive(Debug, Serialize, Deserialize, ToSchema, PartialEq)] 11 | #[serde(tag = "method", content = "params")] 12 | pub enum EvmRpcRequest { 13 | GenericRpcRequest(String), 14 | } 15 | -------------------------------------------------------------------------------- /src/models/rpc/mod.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use utoipa::ToSchema; 3 | 4 | mod solana; 5 | pub use solana::*; 6 | 7 | mod stellar; 8 | pub use stellar::*; 9 | 10 | mod evm; 11 | pub use evm::*; 12 | 13 | #[derive(Debug, Serialize, Deserialize, ToSchema, PartialEq)] 14 | #[serde(untagged)] 15 | pub enum NetworkRpcResult { 16 | Solana(SolanaRpcResult), 17 | Stellar(StellarRpcResult), 18 | Evm(EvmRpcResult), 19 | } 20 | 21 | #[derive(Debug, Serialize, Deserialize, ToSchema, PartialEq)] 22 | #[serde(untagged)] 23 | #[serde(deny_unknown_fields)] 24 | pub enum NetworkRpcRequest { 25 | Solana(SolanaRpcRequest), 26 | Stellar(StellarRpcRequest), 27 | Evm(EvmRpcRequest), 28 | } 29 | -------------------------------------------------------------------------------- /src/models/rpc/stellar/mod.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use utoipa::ToSchema; 3 | 4 | #[derive(Debug, Serialize, Deserialize, ToSchema, PartialEq)] 5 | #[serde(untagged)] 6 | pub enum StellarRpcResult { 7 | GenericRpcResult(String), 8 | } 9 | 10 | #[derive(Debug, Serialize, Deserialize, ToSchema, PartialEq)] 11 | #[serde(tag = "method", content = "params")] 12 | pub enum StellarRpcRequest { 13 | GenericRpcRequest(String), 14 | } 15 | -------------------------------------------------------------------------------- /src/models/signer/mod.rs: -------------------------------------------------------------------------------- 1 | mod repository; 2 | pub use repository::*; 3 | -------------------------------------------------------------------------------- /src/models/transaction/mod.rs: -------------------------------------------------------------------------------- 1 | mod request; 2 | pub use request::*; 3 | 4 | mod response; 5 | pub use response::*; 6 | 7 | mod repository; 8 | pub use repository::*; 9 | 10 | mod stellar_types; 11 | pub use stellar_types::*; 12 | -------------------------------------------------------------------------------- /src/models/transaction/request/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod evm; 2 | pub mod solana; 3 | pub mod stellar; 4 | 5 | use crate::models::{ApiError, NetworkType, RelayerRepoModel}; 6 | use serde::Serialize; 7 | 8 | pub use evm::EvmTransactionRequest; 9 | pub use solana::SolanaTransactionRequest; 10 | pub use stellar::StellarTransactionRequest; 11 | use utoipa::ToSchema; 12 | 13 | #[derive(Serialize, ToSchema)] 14 | #[serde(untagged)] 15 | pub enum NetworkTransactionRequest { 16 | Evm(EvmTransactionRequest), 17 | Solana(SolanaTransactionRequest), 18 | Stellar(StellarTransactionRequest), 19 | } 20 | 21 | impl NetworkTransactionRequest { 22 | pub fn from_json( 23 | network_type: &NetworkType, 24 | json: serde_json::Value, 25 | ) -> Result { 26 | match network_type { 27 | NetworkType::Evm => Ok(Self::Evm( 28 | serde_json::from_value(json).map_err(|e| ApiError::BadRequest(e.to_string()))?, 29 | )), 30 | NetworkType::Solana => Ok(Self::Solana( 31 | serde_json::from_value(json).map_err(|e| ApiError::BadRequest(e.to_string()))?, 32 | )), 33 | NetworkType::Stellar => Ok(Self::Stellar( 34 | serde_json::from_value(json).map_err(|e| ApiError::BadRequest(e.to_string()))?, 35 | )), 36 | } 37 | } 38 | 39 | pub fn validate(&self, relayer: &RelayerRepoModel) -> Result<(), ApiError> { 40 | match self { 41 | NetworkTransactionRequest::Evm(request) => request.validate(relayer), 42 | _ => Ok(()), 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/models/transaction/request/solana.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use utoipa::ToSchema; 3 | 4 | #[derive(Deserialize, Serialize, ToSchema)] 5 | pub struct SolanaTransactionRequest { 6 | pub fee_payer: String, 7 | pub instructions: Vec, 8 | } 9 | -------------------------------------------------------------------------------- /src/models/transaction/request/stellar.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use utoipa::ToSchema; 3 | 4 | use crate::models::transaction::stellar_types::{MemoSpec, OperationSpec}; 5 | 6 | #[derive(Deserialize, Serialize, ToSchema)] 7 | pub struct StellarTransactionRequest { 8 | pub source_account: String, 9 | pub network: String, 10 | #[schema(max_length = 100)] 11 | pub operations: Vec, 12 | #[schema(nullable = true)] 13 | pub memo: Option, 14 | #[schema(nullable = true)] 15 | pub valid_until: Option, 16 | } 17 | -------------------------------------------------------------------------------- /src/models/types.rs: -------------------------------------------------------------------------------- 1 | pub type U256 = alloy::primitives::U256; 2 | -------------------------------------------------------------------------------- /src/openapi.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | api::routes::{docs::relayer_docs, health, metrics}, 3 | domain, models, 4 | }; 5 | use utoipa::{ 6 | openapi::security::{Http, HttpAuthScheme, SecurityScheme}, 7 | Modify, OpenApi, 8 | }; 9 | 10 | struct SecurityAddon; 11 | 12 | impl Modify for SecurityAddon { 13 | fn modify(&self, openapi: &mut utoipa::openapi::OpenApi) { 14 | if let Some(components) = openapi.components.as_mut() { 15 | components.add_security_scheme( 16 | "bearer_auth", 17 | SecurityScheme::Http(Http::new(HttpAuthScheme::Bearer)), 18 | ); 19 | } 20 | } 21 | } 22 | // https://gitbook.com/docs/api-references/guides/managing-api-operations 23 | 24 | #[derive(OpenApi)] 25 | #[openapi( 26 | modifiers(&SecurityAddon), 27 | tags( 28 | (name = "Relayers", description = "Relayers are the core components of the OpenZeppelin Relayer API. They are responsible for executing transactions on behalf of users and providing a secure and reliable way to interact with the blockchain."), 29 | (name = "Metrics", description = "Metrics are responsible for showing the metrics related to the relayers."), 30 | (name = "Health", description = "Health is responsible for showing the health of the relayers.") 31 | ), 32 | info(description = "OpenZeppelin Relayer API", version = "0.1.0", title = "OpenZeppelin Relayer API", license( 33 | name = "AGPL-3.0 license", 34 | url = "https://github.com/OpenZeppelin/openzeppelin-relayer/blob/main/LICENSE" 35 | ), 36 | contact( 37 | name = "OpenZeppelin", 38 | url = "https://www.openzeppelin.com", 39 | ), 40 | terms_of_service = "https://www.openzeppelin.com/tos"), 41 | paths( 42 | relayer_docs::doc_get_relayer, 43 | relayer_docs::doc_list_relayers, 44 | relayer_docs::doc_get_relayer_balance, 45 | relayer_docs::doc_update_relayer, 46 | relayer_docs::doc_get_transaction_by_nonce, 47 | relayer_docs::doc_get_transaction_by_id, 48 | relayer_docs::doc_list_transactions, 49 | relayer_docs::doc_get_relayer_status, 50 | relayer_docs::doc_sign_typed_data, 51 | relayer_docs::doc_sign, 52 | relayer_docs::doc_cancel_transaction, 53 | relayer_docs::doc_delete_pending_transactions, 54 | relayer_docs::doc_rpc, 55 | relayer_docs::doc_send_transaction, 56 | relayer_docs::doc_replace_transaction, 57 | health::health, 58 | metrics::list_metrics, 59 | metrics::metric_detail, 60 | metrics::scrape_metrics 61 | ), 62 | components(schemas( 63 | models::RelayerResponse, 64 | models::NetworkPolicyResponse, 65 | models::EvmPolicyResponse, 66 | models::SolanaPolicyResponse, 67 | models::StellarPolicyResponse, 68 | domain::RelayerUpdateRequest, 69 | domain::SignDataRequest, 70 | domain::SignTypedDataRequest 71 | )) 72 | )] 73 | pub struct ApiDoc; 74 | -------------------------------------------------------------------------------- /src/repositories/mod.rs: -------------------------------------------------------------------------------- 1 | //! # Repository Module 2 | //! 3 | //! Implements data persistence layer for the relayer service using Repository pattern. 4 | 5 | use crate::models::{PaginationQuery, RepositoryError}; 6 | use async_trait::async_trait; 7 | use eyre::Result; 8 | 9 | mod relayer; 10 | pub use relayer::*; 11 | 12 | pub mod transaction; 13 | pub use transaction::*; 14 | 15 | mod signer; 16 | pub use signer::*; 17 | 18 | mod notification; 19 | pub use notification::*; 20 | 21 | mod transaction_counter; 22 | pub use transaction_counter::*; 23 | 24 | mod network; 25 | pub use network::*; 26 | 27 | mod plugin; 28 | pub use plugin::*; 29 | 30 | #[derive(Debug)] 31 | pub struct PaginatedResult { 32 | pub items: Vec, 33 | pub total: u64, 34 | pub page: u32, 35 | pub per_page: u32, 36 | } 37 | #[cfg(test)] 38 | use mockall::automock; 39 | 40 | #[async_trait] 41 | #[allow(dead_code)] 42 | #[cfg_attr(test, automock)] 43 | pub trait Repository { 44 | async fn create(&self, entity: T) -> Result; 45 | async fn get_by_id(&self, id: ID) -> Result; 46 | async fn list_all(&self) -> Result, RepositoryError>; 47 | async fn list_paginated( 48 | &self, 49 | query: PaginationQuery, 50 | ) -> Result, RepositoryError>; 51 | async fn update(&self, id: ID, entity: T) -> Result; 52 | async fn delete_by_id(&self, id: ID) -> Result<(), RepositoryError>; 53 | async fn count(&self) -> Result; 54 | } 55 | -------------------------------------------------------------------------------- /src/repositories/plugin.rs: -------------------------------------------------------------------------------- 1 | //! This module provides an in-memory implementation of plugins. 2 | //! 3 | //! The `InMemoryPluginRepository` struct is used to store and retrieve plugins 4 | //! script paths for further execution. 5 | use crate::{ 6 | config::PluginFileConfig, 7 | models::PluginModel, 8 | repositories::{ConversionError, RepositoryError}, 9 | }; 10 | use async_trait::async_trait; 11 | 12 | #[cfg(test)] 13 | use mockall::automock; 14 | 15 | use std::collections::HashMap; 16 | use tokio::sync::{Mutex, MutexGuard}; 17 | 18 | #[derive(Debug)] 19 | pub struct InMemoryPluginRepository { 20 | store: Mutex>, 21 | } 22 | 23 | impl InMemoryPluginRepository { 24 | pub fn new() -> Self { 25 | Self { 26 | store: Mutex::new(HashMap::new()), 27 | } 28 | } 29 | 30 | async fn acquire_lock(lock: &Mutex) -> Result, RepositoryError> { 31 | Ok(lock.lock().await) 32 | } 33 | } 34 | 35 | impl Default for InMemoryPluginRepository { 36 | fn default() -> Self { 37 | Self::new() 38 | } 39 | } 40 | 41 | #[async_trait] 42 | #[allow(dead_code)] 43 | #[cfg_attr(test, automock)] 44 | pub trait PluginRepositoryTrait { 45 | async fn get_by_id(&self, id: &str) -> Result, RepositoryError>; 46 | async fn add(&self, plugin: PluginModel) -> Result<(), RepositoryError>; 47 | } 48 | 49 | #[async_trait] 50 | impl PluginRepositoryTrait for InMemoryPluginRepository { 51 | async fn get_by_id(&self, id: &str) -> Result, RepositoryError> { 52 | let store = Self::acquire_lock(&self.store).await?; 53 | Ok(store.get(id).cloned()) 54 | } 55 | 56 | async fn add(&self, plugin: PluginModel) -> Result<(), RepositoryError> { 57 | let mut store = Self::acquire_lock(&self.store).await?; 58 | store.insert(plugin.id.clone(), plugin); 59 | Ok(()) 60 | } 61 | } 62 | 63 | impl TryFrom for PluginModel { 64 | type Error = ConversionError; 65 | 66 | fn try_from(config: PluginFileConfig) -> Result { 67 | Ok(PluginModel { 68 | id: config.id.clone(), 69 | path: config.path.clone(), 70 | }) 71 | } 72 | } 73 | 74 | impl PartialEq for PluginModel { 75 | fn eq(&self, other: &Self) -> bool { 76 | self.id == other.id && self.path == other.path 77 | } 78 | } 79 | 80 | #[cfg(test)] 81 | mod tests { 82 | use super::*; 83 | use std::sync::Arc; 84 | 85 | #[tokio::test] 86 | async fn test_in_memory_plugin_repository() { 87 | let plugin_repository = Arc::new(InMemoryPluginRepository::new()); 88 | 89 | // Test add and get_by_id 90 | let plugin = PluginModel { 91 | id: "test-plugin".to_string(), 92 | path: "test-path".to_string(), 93 | }; 94 | plugin_repository.add(plugin.clone()).await.unwrap(); 95 | assert_eq!( 96 | plugin_repository.get_by_id("test-plugin").await.unwrap(), 97 | Some(plugin) 98 | ); 99 | } 100 | 101 | #[tokio::test] 102 | async fn test_get_nonexistent_plugin() { 103 | let plugin_repository = Arc::new(InMemoryPluginRepository::new()); 104 | 105 | let result = plugin_repository.get_by_id("test-plugin").await; 106 | assert!(matches!(result, Ok(None))); 107 | } 108 | 109 | #[tokio::test] 110 | async fn test_try_from() { 111 | let plugin = PluginFileConfig { 112 | id: "test-plugin".to_string(), 113 | path: "test-path".to_string(), 114 | }; 115 | let result = PluginModel::try_from(plugin); 116 | assert!(result.is_ok()); 117 | assert_eq!( 118 | result.unwrap(), 119 | PluginModel { 120 | id: "test-plugin".to_string(), 121 | path: "test-path".to_string(), 122 | } 123 | ); 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /src/services/gas/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod evm_gas_price; 2 | pub use evm_gas_price::*; 3 | pub mod network_extra_fee; 4 | pub use network_extra_fee::*; 5 | 6 | pub mod optimism_extra_fee; 7 | -------------------------------------------------------------------------------- /src/services/mod.rs: -------------------------------------------------------------------------------- 1 | //! # Services Module 2 | //! 3 | //! Implements external service integrations and providers for blockchain networks. 4 | 5 | pub mod provider; 6 | pub use provider::*; 7 | 8 | mod signer; 9 | pub use signer::*; 10 | 11 | mod notification; 12 | pub use notification::*; 13 | 14 | mod transaction_counter; 15 | pub use transaction_counter::*; 16 | 17 | pub mod gas; 18 | pub use gas::*; 19 | 20 | mod jupiter; 21 | pub use jupiter::*; 22 | 23 | mod vault; 24 | pub use vault::*; 25 | 26 | mod turnkey; 27 | pub use turnkey::*; 28 | 29 | mod google_cloud_kms; 30 | pub use google_cloud_kms::*; 31 | -------------------------------------------------------------------------------- /src/services/plugins/mod.rs: -------------------------------------------------------------------------------- 1 | //! Plugins service module for handling plugins execution and interaction with relayer 2 | struct PluginService {} 3 | 4 | impl PluginService { 5 | pub fn call_plugin(&self, _plugin_id: &str) -> Result { 6 | unimplemented!() 7 | } 8 | } 9 | 10 | impl Default for PluginService { 11 | fn default() -> Self { 12 | Self {} 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/services/signer/stellar/mod.rs: -------------------------------------------------------------------------------- 1 | // openzeppelin-relayer/src/services/signer/stellar/mod.rs 2 | //! Stellar signer implementation (local keystore) 3 | 4 | mod local_signer; 5 | use async_trait::async_trait; 6 | use local_signer::*; 7 | 8 | use crate::{ 9 | domain::{SignDataRequest, SignDataResponse, SignTransactionResponse, SignTypedDataRequest}, 10 | models::{Address, NetworkTransactionData, SignerConfig, SignerRepoModel}, 11 | services::signer::{SignerError, SignerFactoryError}, 12 | services::Signer, 13 | }; 14 | 15 | use super::DataSignerTrait; 16 | 17 | pub enum StellarSigner { 18 | Local(LocalSigner), 19 | Vault(LocalSigner), 20 | VaultCloud(LocalSigner), 21 | } 22 | 23 | #[async_trait] 24 | impl Signer for StellarSigner { 25 | async fn address(&self) -> Result { 26 | match self { 27 | Self::Local(s) | Self::Vault(s) | Self::VaultCloud(s) => s.address().await, 28 | } 29 | } 30 | 31 | async fn sign_transaction( 32 | &self, 33 | tx: NetworkTransactionData, 34 | ) -> Result { 35 | match self { 36 | Self::Local(s) | Self::Vault(s) | Self::VaultCloud(s) => s.sign_transaction(tx).await, 37 | } 38 | } 39 | } 40 | 41 | pub struct StellarSignerFactory; 42 | 43 | impl StellarSignerFactory { 44 | pub fn create_stellar_signer(m: &SignerRepoModel) -> Result { 45 | let signer = match m.config { 46 | SignerConfig::Local(_) 47 | | SignerConfig::Test(_) 48 | | SignerConfig::Vault(_) 49 | | SignerConfig::VaultCloud(_) => StellarSigner::Local(LocalSigner::new(m)?), 50 | SignerConfig::AwsKms(_) => { 51 | return Err(SignerFactoryError::UnsupportedType("AWS KMS".into())) 52 | } 53 | SignerConfig::VaultTransit(_) => { 54 | return Err(SignerFactoryError::UnsupportedType("Vault Transit".into())) 55 | } 56 | SignerConfig::Turnkey(_) => { 57 | return Err(SignerFactoryError::UnsupportedType("Turnkey".into())) 58 | } 59 | SignerConfig::GoogleCloudKms(_) => { 60 | return Err(SignerFactoryError::UnsupportedType( 61 | "Google Cloud KMS".into(), 62 | )) 63 | } 64 | }; 65 | Ok(signer) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/services/transaction_counter/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module provides a service for managing transaction counters. 2 | //! 3 | //! The `TransactionCounterService` struct offers methods to get, increment, 4 | //! decrement, and set transaction counts associated with a specific relayer 5 | //! and address. It uses an in-memory store to keep track of these counts. 6 | use std::sync::Arc; 7 | 8 | use crate::repositories::{TransactionCounterError, TransactionCounterTrait}; 9 | use async_trait::async_trait; 10 | 11 | #[cfg(test)] 12 | use mockall::automock; 13 | 14 | #[derive(Clone)] 15 | pub struct TransactionCounterService { 16 | relayer_id: String, 17 | address: String, 18 | store: Arc, 19 | } 20 | 21 | impl TransactionCounterService { 22 | pub fn new(relayer_id: String, address: String, store: Arc) -> Self { 23 | Self { 24 | relayer_id, 25 | address, 26 | store, 27 | } 28 | } 29 | } 30 | 31 | #[async_trait] 32 | #[cfg_attr(test, automock)] 33 | pub trait TransactionCounterServiceTrait: Send + Sync { 34 | async fn get(&self) -> Result, TransactionCounterError>; 35 | async fn get_and_increment(&self) -> Result; 36 | async fn decrement(&self) -> Result; 37 | async fn set(&self, value: u64) -> Result<(), TransactionCounterError>; 38 | } 39 | 40 | #[async_trait] 41 | #[allow(dead_code)] 42 | impl TransactionCounterServiceTrait 43 | for TransactionCounterService 44 | { 45 | async fn get(&self) -> Result, TransactionCounterError> { 46 | self.store.get(&self.relayer_id, &self.address) 47 | } 48 | 49 | async fn get_and_increment(&self) -> Result { 50 | self.store 51 | .get_and_increment(&self.relayer_id, &self.address) 52 | } 53 | 54 | async fn decrement(&self) -> Result { 55 | self.store.decrement(&self.relayer_id, &self.address) 56 | } 57 | 58 | async fn set(&self, value: u64) -> Result<(), TransactionCounterError> { 59 | self.store.set(&self.relayer_id, &self.address, value) 60 | } 61 | } 62 | 63 | #[cfg(test)] 64 | mod tests { 65 | use super::*; 66 | use crate::repositories::InMemoryTransactionCounter; 67 | 68 | #[tokio::test] 69 | async fn test_transaction_counter() { 70 | let store = Arc::new(InMemoryTransactionCounter::default()); 71 | let service = 72 | TransactionCounterService::new("relayer_id".to_string(), "address".to_string(), store); 73 | 74 | assert_eq!(service.get().await.unwrap(), None); 75 | assert_eq!(service.get_and_increment().await.unwrap(), 0); 76 | assert_eq!(service.get_and_increment().await.unwrap(), 1); 77 | assert_eq!(service.decrement().await.unwrap(), 1); 78 | assert!(service.set(10).await.is_ok()); 79 | assert_eq!(service.get().await.unwrap(), Some(10)); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/utils/base64.rs: -------------------------------------------------------------------------------- 1 | // base64 encode and decode helper functions 2 | 3 | use base64::Engine; 4 | 5 | pub fn base64_encode(message: &[u8]) -> String { 6 | base64::engine::general_purpose::STANDARD.encode(message) 7 | } 8 | pub fn base64_decode(data: &str) -> Result, base64::DecodeError> { 9 | base64::engine::general_purpose::STANDARD.decode(data) 10 | } 11 | 12 | pub fn base64_url_encode(message: &[u8]) -> String { 13 | base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(message) 14 | } 15 | pub fn base64_url_decode(data: &str) -> Result, base64::DecodeError> { 16 | base64::engine::general_purpose::URL_SAFE_NO_PAD.decode(data) 17 | } 18 | 19 | #[cfg(test)] 20 | mod tests { 21 | use super::*; 22 | 23 | #[test] 24 | fn test_base64_encode() { 25 | assert_eq!(base64_encode(b"Hello, world!"), "SGVsbG8sIHdvcmxkIQ=="); 26 | } 27 | 28 | #[test] 29 | fn test_base64_decode() { 30 | let decoded = base64_decode("SGVsbG8sIHdvcmxkIQ==").unwrap(); 31 | assert_eq!(decoded, b"Hello, world!"); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/utils/key.rs: -------------------------------------------------------------------------------- 1 | use rand::RngCore; 2 | 3 | pub fn unsafe_generate_random_private_key() -> Vec { 4 | let mut rng = rand::rng(); 5 | let mut pk = vec![0u8; 32]; 6 | rng.fill_bytes(pk.as_mut_slice()); 7 | pk 8 | } 9 | 10 | #[cfg(test)] 11 | mod tests { 12 | use super::*; 13 | use std::collections::HashSet; 14 | 15 | #[test] 16 | fn test_private_key_length() { 17 | let pk = unsafe_generate_random_private_key(); 18 | assert_eq!(pk.len(), 32, "Private key should be 32 bytes"); 19 | } 20 | 21 | #[test] 22 | fn test_private_key_uniqueness() { 23 | let mut keys = HashSet::new(); 24 | for _ in 0..100 { 25 | let pk = unsafe_generate_random_private_key(); 26 | assert!(keys.insert(pk), "Generated private key should be unique"); 27 | } 28 | } 29 | 30 | #[test] 31 | fn test_private_key_not_zero() { 32 | let pk = unsafe_generate_random_private_key(); 33 | assert!( 34 | !pk.iter().all(|&byte| byte == 0), 35 | "Private key should not be all zeros" 36 | ); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | mod serde; 2 | 3 | pub use serde::*; 4 | 5 | mod key; 6 | pub use key::*; 7 | 8 | mod auth; 9 | pub use auth::*; 10 | 11 | mod time; 12 | pub use time::*; 13 | 14 | mod transaction; 15 | pub use transaction::*; 16 | 17 | mod base64; 18 | pub use base64::*; 19 | -------------------------------------------------------------------------------- /src/utils/serde/field_as_string.rs: -------------------------------------------------------------------------------- 1 | use serde::{de, Deserialize, Deserializer, Serializer}; 2 | use std::str::FromStr; 3 | 4 | pub fn deserialize<'de, T, D>(deserializer: D) -> Result 5 | where 6 | T: FromStr, 7 | D: Deserializer<'de>, 8 | ::Err: std::fmt::Debug, 9 | { 10 | let s: String = String::deserialize(deserializer)?; 11 | s.parse() 12 | .map_err(|e| de::Error::custom(format!("Parse error: {e:?}"))) 13 | } 14 | 15 | pub fn serialize(value: &T, serializer: S) -> Result 16 | where 17 | T: ToString, 18 | S: Serializer, 19 | { 20 | serializer.serialize_str(&value.to_string()) 21 | } 22 | -------------------------------------------------------------------------------- /src/utils/serde/mod.rs: -------------------------------------------------------------------------------- 1 | mod u128_deserializer; 2 | pub use u128_deserializer::*; 3 | 4 | mod u64_deserializer; 5 | pub use u64_deserializer::*; 6 | pub mod field_as_string; 7 | -------------------------------------------------------------------------------- /src/utils/serde/u64_deserializer.rs: -------------------------------------------------------------------------------- 1 | //! Deserialization utilities for u64 values 2 | //! 3 | //! This module provides a custom deserializer for u64 values. 4 | 5 | use std::fmt; 6 | 7 | use serde::{de, Deserializer}; 8 | 9 | #[derive(Debug)] 10 | struct U64Visitor; 11 | 12 | impl de::Visitor<'_> for U64Visitor { 13 | type Value = u64; 14 | 15 | fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { 16 | formatter.write_str("a string containing a u64 number or a u64 integer") 17 | } 18 | 19 | // Handle string inputs like "340282366920938463463374607431768211455" 20 | fn visit_str(self, value: &str) -> Result 21 | where 22 | E: de::Error, 23 | { 24 | value.parse::().map_err(de::Error::custom) 25 | } 26 | 27 | // Handle u64 inputs 28 | #[allow(clippy::unnecessary_cast)] 29 | fn visit_u64(self, value: u64) -> Result 30 | where 31 | E: de::Error, 32 | { 33 | Ok(value) 34 | } 35 | 36 | // Handle i64 inputs 37 | fn visit_i64(self, value: i64) -> Result 38 | where 39 | E: de::Error, 40 | { 41 | if value < 0 { 42 | Err(de::Error::custom( 43 | "negative value cannot be converted to u64", 44 | )) 45 | } else { 46 | Ok(value as u64) 47 | } 48 | } 49 | } 50 | 51 | pub fn deserialize_u64<'de, D>(deserializer: D) -> Result 52 | where 53 | D: Deserializer<'de>, 54 | { 55 | deserializer.deserialize_any(U64Visitor) 56 | } 57 | 58 | #[cfg(test)] 59 | mod tests { 60 | use super::*; 61 | use serde::de::value::{ 62 | Error as ValueError, I64Deserializer, StringDeserializer, U64Deserializer, 63 | }; 64 | 65 | #[test] 66 | fn test_deserialize_from_string() { 67 | let input = "12345"; 68 | let deserializer = StringDeserializer::::new(input.to_string()); 69 | let result = deserialize_u64(deserializer); 70 | assert!(result.is_ok()); 71 | assert_eq!(result.unwrap(), 12345); 72 | } 73 | 74 | #[test] 75 | fn test_deserialize_from_string_max_u64() { 76 | let input = "18446744073709551615"; // u64::MAX 77 | let deserializer = StringDeserializer::::new(input.to_string()); 78 | let result = deserialize_u64(deserializer); 79 | assert!(result.is_ok()); 80 | assert_eq!(result.unwrap(), u64::MAX); 81 | } 82 | 83 | #[test] 84 | fn test_deserialize_from_invalid_string() { 85 | let input = "not a number"; 86 | let deserializer = StringDeserializer::::new(input.to_string()); 87 | let result = deserialize_u64(deserializer); 88 | assert!(result.is_err()); 89 | } 90 | 91 | #[test] 92 | fn test_deserialize_from_u64() { 93 | let input: u64 = 54321; 94 | let deserializer = U64Deserializer::::new(input); 95 | let result = deserialize_u64(deserializer); 96 | assert!(result.is_ok()); 97 | assert_eq!(result.unwrap(), 54321); 98 | } 99 | 100 | #[test] 101 | fn test_deserialize_from_i64_positive() { 102 | let input: i64 = 9876; 103 | let deserializer = I64Deserializer::::new(input); 104 | let result = deserialize_u64(deserializer); 105 | assert!(result.is_ok()); 106 | assert_eq!(result.unwrap(), 9876); 107 | } 108 | 109 | #[test] 110 | fn test_deserialize_from_i64_negative() { 111 | let input: i64 = -123; 112 | let deserializer = I64Deserializer::::new(input); 113 | let result = deserialize_u64(deserializer); 114 | assert!(result.is_err()); 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/utils/time.rs: -------------------------------------------------------------------------------- 1 | /// Converts minutes to milliseconds 2 | pub const fn minutes_ms(minutes: i64) -> i64 { 3 | minutes * 60 * 1000 4 | } 5 | 6 | #[cfg(test)] 7 | mod tests { 8 | use super::*; 9 | 10 | #[test] 11 | fn test_minutes_ms() { 12 | assert_eq!(minutes_ms(1), 60_000); 13 | assert_eq!(minutes_ms(5), 300_000); 14 | assert_eq!(minutes_ms(10), 600_000); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/utils/transaction.rs: -------------------------------------------------------------------------------- 1 | use crate::constants::DEFAULT_TRANSACTION_SPEED; 2 | use crate::models::evm::Speed; 3 | use crate::utils::time::minutes_ms; 4 | 5 | /// Gets the resubmit timeout for a given speed 6 | /// Returns the timeout in milliseconds based on the speed: 7 | /// - SafeLow: 10 minutes 8 | /// - Average: 5 minutes 9 | /// - Fast: 3 minutes 10 | /// - Fastest: 2 minutes 11 | /// If no speed is provided, uses the default transaction speed 12 | pub fn get_resubmit_timeout_for_speed(speed: &Option) -> i64 { 13 | let speed_value = speed.clone().unwrap_or(DEFAULT_TRANSACTION_SPEED); 14 | 15 | match speed_value { 16 | Speed::SafeLow => minutes_ms(10), 17 | Speed::Average => minutes_ms(5), 18 | Speed::Fast => minutes_ms(3), 19 | Speed::Fastest => minutes_ms(2), 20 | } 21 | } 22 | 23 | /// Calculates the resubmit age with exponential backoff 24 | /// 25 | /// # Arguments 26 | /// * `timeout` - The base timeout in milliseconds 27 | /// * `attempts` - The number of attempts made so far 28 | /// 29 | /// # Returns 30 | /// The new timeout with exponential backoff applied: timeout * 2^(attempts-1) 31 | pub fn get_resubmit_timeout_with_backoff(timeout: i64, attempts: usize) -> i64 { 32 | if attempts <= 1 { 33 | timeout 34 | } else { 35 | timeout * 2_i64.pow((attempts - 1) as u32) 36 | } 37 | } 38 | 39 | #[cfg(test)] 40 | mod tests { 41 | use super::*; 42 | 43 | #[test] 44 | fn test_get_resubmit_timeout_for_speed() { 45 | // Test with existing speeds 46 | assert_eq!( 47 | get_resubmit_timeout_for_speed(&Some(Speed::SafeLow)), 48 | minutes_ms(10) 49 | ); 50 | assert_eq!( 51 | get_resubmit_timeout_for_speed(&Some(Speed::Average)), 52 | minutes_ms(5) 53 | ); 54 | assert_eq!( 55 | get_resubmit_timeout_for_speed(&Some(Speed::Fast)), 56 | minutes_ms(3) 57 | ); 58 | assert_eq!( 59 | get_resubmit_timeout_for_speed(&Some(Speed::Fastest)), 60 | minutes_ms(2) 61 | ); 62 | 63 | // Test with None speed (should return default) 64 | assert_eq!( 65 | get_resubmit_timeout_for_speed(&None), 66 | minutes_ms(3) // DEFAULT_TRANSACTION_SPEED is Speed::Fast 67 | ); 68 | } 69 | 70 | #[test] 71 | fn test_get_resubmit_timeout_with_backoff() { 72 | let base_timeout = 300000; // 5 minutes in ms 73 | 74 | // First attempt - no backoff 75 | assert_eq!(get_resubmit_timeout_with_backoff(base_timeout, 1), 300000); 76 | 77 | // Second attempt - 2x backoff 78 | assert_eq!(get_resubmit_timeout_with_backoff(base_timeout, 2), 600000); 79 | 80 | // Third attempt - 4x backoff 81 | assert_eq!(get_resubmit_timeout_with_backoff(base_timeout, 3), 1200000); 82 | 83 | // Fourth attempt - 8x backoff 84 | assert_eq!(get_resubmit_timeout_with_backoff(base_timeout, 4), 2400000); 85 | 86 | // Edge case - attempt 0 should be treated as attempt 1 87 | assert_eq!(get_resubmit_timeout_with_backoff(base_timeout, 0), 300000); 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /tests/integration.rs: -------------------------------------------------------------------------------- 1 | //! Integration tests for the OpenZeppelin Relayer. 2 | //! 3 | //! Contains tests for relayer functionality 4 | 5 | mod integration { 6 | mod logging; 7 | mod metrics; 8 | } 9 | -------------------------------------------------------------------------------- /tests/properties.rs: -------------------------------------------------------------------------------- 1 | //! PBT tests for the OpenZeppelin Relayer. 2 | //! 3 | //! Contains tests for relayer functionality 4 | 5 | mod properties { 6 | mod logging; 7 | } 8 | -------------------------------------------------------------------------------- /tests/properties/logging.rs: -------------------------------------------------------------------------------- 1 | //! Property-based tests for logging. 2 | //! 3 | //! These tests verify the behavior of the `compute_rolled_file_path` function, 4 | //! focusing on template variable substitution and output consistency. 5 | //! The tests ensure that the logging system handles template variables correctly 6 | //! and produces consistent, well-formed output across various input combinations. 7 | //! 8 | //! Refer to `src/logging/mod.rs` for more details. 9 | use openzeppelin_relayer::logging::compute_rolled_file_path; 10 | use proptest::{prelude::*, test_runner::Config}; 11 | 12 | proptest! { 13 | // Set the number of cases to 1000 14 | #![proptest_config(Config { 15 | cases: 1000, ..Config::default() 16 | })] 17 | 18 | /// Property test for compute_rolled_file_path when base ends with ".log" 19 | #[test] 20 | fn prop_compute_rolled_file_path_with_log_suffix( 21 | base in ".*[^.]", 22 | // ensuring non-empty ending character in date 23 | date in "[0-9]{4}-[0-9]{2}-[0-9]{2}" 24 | ) { 25 | let base_with_log = format!("{}{}.log", base, ""); 26 | let result = compute_rolled_file_path(&base_with_log, &date, 1); 27 | let expected = format!("{}-{}.{}.log", base_with_log.strip_suffix(".log").unwrap(), date, 1); 28 | prop_assert_eq!(result, expected); 29 | } 30 | 31 | /// Property test for compute_rolled_file_path when base does not end with ".log" 32 | #[test] 33 | fn prop_compute_rolled_file_path_without_log_suffix( 34 | base in ".*", 35 | date in "[0-9]{4}-[0-9]{2}-[0-9]{2}" 36 | ) { 37 | // Ensure base does not end with ".log" 38 | let base_non_log = if base.ends_with(".log") 39 | { 40 | format!("{}x", base) 41 | } else { 42 | base 43 | }; 44 | let result = compute_rolled_file_path(&base_non_log, &date,1); 45 | let expected = format!("{}-{}.{}.log", base_non_log, date, 1); 46 | prop_assert_eq!(result, expected); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /tests/utils/test_keys/unit-test-local-signer.json: -------------------------------------------------------------------------------- 1 | {"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"981dbc6e12b03661798018468552019d"},"ciphertext":"4956bb91cb0e02689f3d9dbd94dbd9c7e6438e44387869fb81a22c750d410b37","kdf":"scrypt","kdfparams":{"dklen":32,"n":8192,"p":1,"r":8,"salt":"8580c4df3ed0073a7467a83e6925ef523b33005487112877ba33e070c78ee2d3"},"mac":"82202935d7e4bda61633d5bf2152cdd122089dbe5df180c43f8996699f5310b5"},"id":"ebbaf818-1207-4b94-9f68-0be98d664062","version":3} 2 | -------------------------------------------------------------------------------- /typos.toml: -------------------------------------------------------------------------------- 1 | [default.extend-identifiers] 2 | HashiCorp = "HashiCorp" 3 | NOOPs = "NOOPs" 4 | --------------------------------------------------------------------------------