├── .dockerignore ├── .env.example ├── .github ├── ISSUE_TEMPLATE │ ├── bug.yml │ ├── docs.yml │ └── feature.yml ├── RELEASE.md ├── actions │ └── prepare │ │ └── action.yml ├── codecov.yml ├── dependabot.yml ├── pr-title-checker-config.json ├── pull_request_template.md ├── release-please │ ├── .config.json │ └── manifest.json └── workflows │ ├── ci.yaml │ ├── cla.yml │ ├── pr-title.yaml │ ├── rc.yml │ ├── release-bins.yml │ ├── release-docker.yml │ ├── release-docs.yml │ ├── release-please.yml │ ├── release-sbom.yml │ ├── rust-docs-url.yml │ ├── scorecard.yml │ └── update-lock.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── .pre-commit-hooks.yaml ├── .typos.toml ├── .yamlfix.toml ├── CHANGELOG.md ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── DOCKER_README.md ├── Dockerfile.development ├── Dockerfile.production ├── LICENSE ├── Makefile.toml ├── README.md ├── SECURITY.md ├── cmd └── prometheus │ ├── dashboards │ ├── dashboard.yml │ └── monitor_metrics.json │ ├── datasources │ └── prometheus.yml │ ├── grafana.ini │ └── prometheus.yml ├── committed.toml ├── config ├── filters │ └── .gitkeep ├── monitors │ └── .gitkeep ├── networks │ └── .gitkeep └── triggers │ ├── .gitkeep │ └── scripts │ └── .gitkeep ├── data └── .gitkeep ├── docker-compose.yaml ├── docs ├── README.md ├── antora.yml ├── modules │ └── ROOT │ │ ├── nav.adoc │ │ └── pages │ │ ├── error.adoc │ │ ├── index.adoc │ │ ├── quickstart.adoc │ │ ├── rpc.adoc │ │ ├── scripts.adoc │ │ └── structure.adoc ├── package-lock.json └── package.json ├── examples └── config │ ├── filters │ ├── evm_filter_block_number.js │ ├── evm_filter_block_number.py │ ├── evm_filter_block_number.sh │ ├── stellar_filter_block_number.js │ ├── stellar_filter_block_number.py │ └── stellar_filter_block_number.sh │ ├── monitors │ ├── evm_transfer_usdc.json │ └── stellar_swap_dex.json │ ├── networks │ ├── arbitrum_nova.json │ ├── arbitrum_one.json │ ├── arbitrum_sepolia.json │ ├── base.json │ ├── base_sepolia.json │ ├── bsc_mainnet.json │ ├── bsc_testnet.json │ ├── ethereum_mainnet.json │ ├── ethereum_sepolia.json │ ├── optimism_mainnet.json │ ├── optimism_sepolia.json │ ├── polygon_amoy.json │ ├── polygon_mainnet.json │ ├── stellar_mainnet.json │ ├── stellar_testnet.json │ ├── unichain_sepolia.json │ ├── zksync_era_mainnet.json │ └── zksync_era_sepolia.json │ └── triggers │ ├── discord_notifications.json │ ├── email_notifications.json │ ├── script_notifications.json │ ├── scripts │ ├── custom_notification.js │ ├── custom_notification.py │ └── custom_notification.sh │ ├── slack_notifications.json │ ├── telegram_notifications.json │ └── webhook_notifications.json ├── netlify.toml ├── rust-toolchain.toml ├── rustfmt.toml ├── scripts ├── docker_compose.sh ├── rust_antora.sh └── validate_network_config.sh ├── src ├── bootstrap │ └── mod.rs ├── lib.rs ├── main.rs ├── models │ ├── blockchain │ │ ├── evm │ │ │ ├── block.rs │ │ │ ├── mod.rs │ │ │ ├── monitor.rs │ │ │ ├── receipt.rs │ │ │ └── transaction.rs │ │ ├── mod.rs │ │ └── stellar │ │ │ ├── block.rs │ │ │ ├── event.rs │ │ │ ├── mod.rs │ │ │ ├── monitor.rs │ │ │ └── transaction.rs │ ├── config │ │ ├── error.rs │ │ ├── mod.rs │ │ ├── monitor_config.rs │ │ ├── network_config.rs │ │ └── trigger_config.rs │ ├── core │ │ ├── mod.rs │ │ ├── monitor.rs │ │ ├── network.rs │ │ └── trigger.rs │ ├── mod.rs │ └── security │ │ ├── error.rs │ │ ├── mod.rs │ │ └── secret.rs ├── repositories │ ├── error.rs │ ├── mod.rs │ ├── monitor.rs │ ├── network.rs │ └── trigger.rs ├── services │ ├── blockchain │ │ ├── client.rs │ │ ├── clients │ │ │ ├── evm │ │ │ │ └── client.rs │ │ │ ├── mod.rs │ │ │ └── stellar │ │ │ │ └── client.rs │ │ ├── error.rs │ │ ├── mod.rs │ │ ├── pool.rs │ │ └── transports │ │ │ ├── endpoint_manager.rs │ │ │ ├── error.rs │ │ │ ├── evm │ │ │ └── http.rs │ │ │ ├── http.rs │ │ │ ├── mod.rs │ │ │ └── stellar │ │ │ └── http.rs │ ├── blockwatcher │ │ ├── error.rs │ │ ├── mod.rs │ │ ├── service.rs │ │ ├── storage.rs │ │ └── tracker.rs │ ├── filter │ │ ├── error.rs │ │ ├── expression │ │ │ ├── ast.rs │ │ │ ├── error.rs │ │ │ ├── evaluation.rs │ │ │ ├── helpers.rs │ │ │ ├── mod.rs │ │ │ └── parsing.rs │ │ ├── filter_match.rs │ │ ├── filters │ │ │ ├── evm │ │ │ │ ├── evaluator.rs │ │ │ │ ├── filter.rs │ │ │ │ └── helpers.rs │ │ │ ├── mod.rs │ │ │ └── stellar │ │ │ │ ├── evaluator.rs │ │ │ │ ├── filter.rs │ │ │ │ └── helpers.rs │ │ └── mod.rs │ ├── mod.rs │ ├── notification │ │ ├── discord.rs │ │ ├── email.rs │ │ ├── error.rs │ │ ├── mod.rs │ │ ├── script.rs │ │ ├── slack.rs │ │ ├── telegram.rs │ │ └── webhook.rs │ └── trigger │ │ ├── error.rs │ │ ├── mod.rs │ │ ├── script │ │ ├── error.rs │ │ ├── executor.rs │ │ ├── factory.rs │ │ ├── mod.rs │ │ └── validation.rs │ │ └── service.rs └── utils │ ├── constants.rs │ ├── cron_utils.rs │ ├── expression.rs │ ├── logging │ ├── error.rs │ └── mod.rs │ ├── macros │ ├── deserialization.rs │ └── mod.rs │ ├── metrics │ ├── README.md │ ├── mod.rs │ └── server.rs │ ├── mod.rs │ ├── monitor │ ├── error.rs │ ├── execution.rs │ └── mod.rs │ ├── parsing.rs │ └── tests │ ├── builders │ ├── evm │ │ ├── monitor.rs │ │ ├── receipt.rs │ │ └── transaction.rs │ ├── network.rs │ ├── stellar │ │ └── monitor.rs │ └── trigger.rs │ └── mod.rs └── tests ├── integration.rs ├── integration ├── blockchain │ ├── clients │ │ ├── evm │ │ │ └── client.rs │ │ └── stellar │ │ │ └── client.rs │ ├── pool.rs │ └── transports │ │ ├── endpoint_manager.rs │ │ ├── evm │ │ ├── http.rs │ │ └── transport.rs │ │ ├── http.rs │ │ └── stellar │ │ ├── http.rs │ │ └── transport.rs ├── blockwatcher │ └── service.rs ├── bootstrap │ └── main.rs ├── filters │ ├── common.rs │ ├── evm │ │ └── filter.rs │ └── stellar │ │ └── filter.rs ├── fixtures │ ├── evm │ │ ├── blocks.json │ │ ├── contract_spec.json │ │ ├── monitors │ │ │ └── monitor.json │ │ ├── networks │ │ │ └── network.json │ │ ├── transaction_receipts.json │ │ └── triggers │ │ │ └── trigger.json │ ├── filters │ │ ├── evm_filter_block_number.js │ │ ├── evm_filter_block_number.py │ │ ├── evm_filter_block_number.sh │ │ ├── evm_filter_by_arguments.py │ │ ├── stellar_filter_block_number.js │ │ ├── stellar_filter_block_number.py │ │ └── stellar_filter_block_number.sh │ └── stellar │ │ ├── blocks.json │ │ ├── contract_spec.json │ │ ├── events.json │ │ ├── monitors │ │ └── monitor.json │ │ ├── networks │ │ └── network.json │ │ ├── transactions.json │ │ └── triggers │ │ └── trigger.json ├── mocks │ ├── clients.rs │ ├── logging.rs │ ├── mod.rs │ ├── models.rs │ ├── repositories.rs │ ├── services.rs │ └── transports.rs ├── monitor │ └── execution.rs ├── notifications │ ├── discord.rs │ ├── email.rs │ ├── script.rs │ ├── slack.rs │ ├── telegram.rs │ └── webhook.rs └── security │ └── secret.rs ├── properties.rs └── properties ├── filters ├── evm │ └── filter.rs └── stellar │ └── filter.rs ├── notifications ├── discord.rs ├── email.rs ├── slack.rs ├── telegram.rs └── webhook.rs ├── repositories ├── monitor.rs ├── network.rs └── trigger.rs ├── strategies.rs ├── triggers └── script.rs └── utils └── logging.rs /.dockerignore: -------------------------------------------------------------------------------- 1 | # ignore all .git files and directories 2 | .git* 3 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | RUST_LOG=info 2 | # Additional logging options (file mode)...by default is stdout 3 | # See docker-compose.yaml for more details 4 | # LOG_MODE=file 5 | # LOG_DATA_DIR=logs/ 6 | # MONITOR_DATA_DIR=data/ 7 | # LOG_MAX_SIZE=1073741824 8 | # METRICS_ENABLED=false 9 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | description: Create a bug report 4 | labels: [T-bug, S-needs-triage] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Thanks for taking the time to fill out this bug report! Please provide as much detail as possible. 10 | 11 | If you believe you have found a vulnerability, please provide details [here](mailto:security@openzeppelin.com) instead. 12 | - type: textarea 13 | id: what-happened 14 | attributes: 15 | label: Describe the bug 16 | description: | 17 | A clear and concise description of what the bug is. 18 | 19 | If the bug is in a crate you are using (i.e. you are not running the standard `openzeppelin-monitor` binary) please mention that as well. 20 | validations: 21 | required: true 22 | - type: textarea 23 | id: reproduction-steps 24 | attributes: 25 | label: Steps to reproduce 26 | description: Please provide any steps you think might be relevant to reproduce 27 | the bug. 28 | placeholder: | 29 | Steps to reproduce: 30 | 31 | 1. Start '...' 32 | 2. Then '...' 33 | 3. Check '...' 34 | 4. See error 35 | validations: 36 | required: true 37 | - type: textarea 38 | id: logs 39 | attributes: 40 | label: Application logs 41 | description: | 42 | Please provide the relevant application logs leading up to the bug. 43 | render: text 44 | validations: 45 | required: false 46 | - type: dropdown 47 | id: platform 48 | attributes: 49 | label: Platform(s) 50 | description: What platform(s) did this occur on? 51 | multiple: true 52 | options: 53 | - Linux (x86) 54 | - Linux (ARM) 55 | - Mac (Intel) 56 | - Mac (Apple Silicon) 57 | - Windows (x86) 58 | - Windows (ARM) 59 | - type: dropdown 60 | id: deployment 61 | attributes: 62 | label: Deployment Type 63 | description: How are you running openzeppelin-monitor? 64 | multiple: false 65 | options: 66 | - Binary from releases 67 | - Built from source 68 | - Docker container 69 | - Other 70 | validations: 71 | required: true 72 | - type: textarea 73 | id: additional 74 | attributes: 75 | label: Any other context that we need to know specific to the bug? 76 | - type: textarea 77 | id: version 78 | attributes: 79 | label: Version Information 80 | description: Run `openzeppelin-monitor --version` and paste the output 81 | validations: 82 | required: true 83 | - type: textarea 84 | id: monitor-config 85 | attributes: 86 | label: Monitor Configuration 87 | description: | 88 | Please provide the relevant monitor configuration file(s) from your config directory. 89 | Make sure to remove any sensitive information like private keys or API tokens. 90 | render: json 91 | validations: 92 | required: false 93 | - type: dropdown 94 | id: network-type 95 | attributes: 96 | label: Network Type 97 | description: Which blockchain network(s) are you monitoring? 98 | multiple: true 99 | options: 100 | - EVM 101 | - Stellar 102 | - Other 103 | validations: 104 | required: true 105 | - type: input 106 | id: build-command 107 | attributes: 108 | label: Build Command 109 | description: If you built from source, what command did you use? 110 | placeholder: cargo build --release 111 | validations: 112 | required: false 113 | - type: checkboxes 114 | id: terms 115 | attributes: 116 | label: Code of Conduct 117 | description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/openzeppelin/openzeppelin-monitor/blob/main/CONTRIBUTING.md#code-of-conduct) 118 | options: 119 | - label: I agree to follow the Code of Conduct 120 | required: true 121 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/docs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Documentation 3 | description: Suggest a change to our documentation 4 | labels: [T-documentation, S-needs-triage] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | If you are unsure if the docs are relevant or needed, please open up a discussion first. 10 | - type: textarea 11 | attributes: 12 | label: Describe the change 13 | description: | 14 | Please describe the documentation you want to change or add, and if it is for end-users or contributors. 15 | validations: 16 | required: true 17 | - type: textarea 18 | attributes: 19 | label: Additional context 20 | description: Add any other context to the feature (like screenshots, resources) 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | description: Suggest a feature 4 | labels: [T-feature, S-needs-triage] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Please ensure that the feature has not already been requested in the issue tracker. 10 | - type: textarea 11 | attributes: 12 | label: Describe the feature 13 | description: | 14 | Please describe the feature and what it is aiming to solve, if relevant. 15 | 16 | If the feature is for a crate, please include a proposed API surface. 17 | validations: 18 | required: true 19 | - type: textarea 20 | attributes: 21 | label: Additional context 22 | description: Add any other context to the feature (like screenshots, resources) 23 | -------------------------------------------------------------------------------- /.github/RELEASE.md: -------------------------------------------------------------------------------- 1 | # CI/CD Release Workflow 2 | 3 | --- 4 | 5 | ## Workflows 6 | 7 | - To trigger a release, use [rc.yml](workflows/rc.yml) workflow. 8 | - It will need specific commit SHA in long format to start the workflow. 9 | - All the commits until that specific commit sha will be included in the release. 10 | - Checks version from `Cargo.toml` and validates if it needs to creates a new release branch. If there is a release branch that already exists for the same version in `Cargo.toml` the workflow will fail. 11 | - Release branch is created in this pattern `release-v`. 12 | 13 | - Second workflow [release-please.yml](workflows/release-please.yml) will get triggered on push to release branch automatically. 14 | - This workflow checks if there is any "higher versioned" branches than the current one since this workflow will be triggered for any pushes ( eg. hotfixes ). 15 | - We use [release-please](https://github.com/googleapis/release-please) for managing releases. If there are no "higher versioned" branches release-please step will be triggered. 16 | - Release please automatically creates a PR with Changelog notes to release branch which keeps track of all commits in that release branch and adds a label `autorelease: pending`. It uses [config](release-please/.config.json) & [manifest](release-please/manifest.json) files to generate changelog and track versions. If there are any changes to `Cargo.lock` that commit is pushed to the PR. 17 | - Once approved merge the PR. On merging `release-please` automatically creates a github release with changelog notes & tags the release with that version. 18 | - Workflow has a step to unlock conversation in the now closed PR so that release-please can post a comment and update the label `autorelease: tagged`. 19 | - SBOM generation, Binaries creation for different arch & Docker build and push jobs are triggered. 20 | 21 | - If everything looks good post release, raise a PR and merge the `release-v` branch to main (manual step for now). 22 | -------------------------------------------------------------------------------- /.github/actions/prepare/action.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Cache 3 | description: Caches cargo dependencies 4 | inputs: 5 | components: 6 | description: Additional Rust components to install (comma separated). rustfmt and clippy are always included. 7 | required: false 8 | default: '' 9 | outputs: 10 | cache-hit: 11 | description: Cache Hit 12 | value: ${{ steps.cache.outputs.cache-hit }} 13 | runs: 14 | using: composite 15 | steps: 16 | - name: setup rust tool chain 17 | uses: dtolnay/rust-toolchain@1.86.0 # v1.86.0 18 | with: 19 | components: ${{ (inputs.components != '') && format('{0}, rustfmt, clippy', inputs.components) || 'rustfmt, clippy' }} 20 | - name: Prepare cache identifiers 21 | id: cache-info 22 | shell: bash 23 | run: | 24 | echo "DATE=$(date +'%Y-%m-%d')" >> $GITHUB_ENV 25 | echo "LOCK_HASH=$(sha256sum Cargo.lock | cut -d' ' -f1)" >> $GITHUB_ENV 26 | - name: Restore cargo dependencies from cache 27 | uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 28 | id: cache 29 | with: 30 | shared-key: ${{ env.DATE }}-${{ env.LOCK_HASH }} 31 | -------------------------------------------------------------------------------- /.github/codecov.yml: -------------------------------------------------------------------------------- 1 | --- 2 | coverage: 3 | range: 90..100 4 | round: down 5 | precision: 1 6 | status: 7 | project: 8 | default: 9 | target: 80% 10 | threshold: 1% 11 | flags: 12 | - integration 13 | - properties 14 | - unittests 15 | patch: 16 | default: 17 | target: 90% 18 | threshold: 1% 19 | 20 | ignore: 21 | - tests/**/* 22 | - src/main.rs 23 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | # opt in to updates for ecosystems that are not yet GA. 4 | enable-beta-ecosystems: true 5 | updates: 6 | # Maintain dependencies for GitHub Actions 7 | - package-ecosystem: github-actions 8 | directory: / 9 | schedule: 10 | interval: weekly 11 | commit-message: 12 | # Prefix all commit messages with "chore(deps): " 13 | prefix: 'chore(deps): ' 14 | reviewers: 15 | - OpenZeppelin/defender-sre 16 | - OpenZeppelin/defender-dev 17 | 18 | # Maintain dependencies for cargo 19 | - package-ecosystem: cargo 20 | directory: / 21 | schedule: 22 | interval: weekly 23 | ignore: 24 | - dependency-name: '*' 25 | update-types: [version-update:semver-major] 26 | commit-message: 27 | # Prefix all commit messages 28 | prefix: 'chore(deps): ' 29 | reviewers: 30 | - OpenZeppelin/defender-dev 31 | - OpenZeppelin/defender-sre 32 | labels: 33 | - dependabot 34 | - dependencies 35 | # Allow up to 10 open pull requests for testing 36 | open-pull-requests-limit: 5 37 | -------------------------------------------------------------------------------- /.github/pr-title-checker-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "LABEL": { 3 | "name": "PR title checker needs attention", 4 | "color": "EEEEEE" 5 | }, 6 | "CHECKS": { 7 | "regexp": "^(fix|feat|docs|chore|refactor|style|ci|revert|test)!?(\\(.*\\))?!?:.*" 8 | }, 9 | "MESSAGES": { 10 | "success": "PR title is valid", 11 | "failure": "PR title is invalid", 12 | "notice": "Title needs to pass regex '^(fix|feat|docs|chore|refactor|style|ci|revert|test)!?(\\(.*\\))?!?:.*" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | ## Testing Process 4 | 5 | ## Checklist 6 | 7 | - [ ] Add a reference to related issues in the PR description. 8 | - [ ] Add unit tests if applicable. 9 | - [ ] Add integration tests if applicable. 10 | - [ ] Add property-based tests if applicable. 11 | - [ ] Update documentation if applicable. 12 | -------------------------------------------------------------------------------- /.github/release-please/.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", 3 | "packages": { 4 | ".": {} 5 | }, 6 | "release-type": "rust", 7 | "include-component-in-tag": false, 8 | "pull-request-title-pattern": "chore: Release version ${version}", 9 | "pull-request-header": "🤖 Created a Release ⚡ ⚡", 10 | "pull-request-footer": "Merge this pull request to trigger the next release.", 11 | "changelog-sections": [ 12 | { 13 | "type": "feat", 14 | "section": "🚀 Features" 15 | }, 16 | { 17 | "type": "fix", 18 | "section": "🐛 Bug Fixes" 19 | }, 20 | { 21 | "type": "revert", 22 | "section": "◀️ Reverts" 23 | }, 24 | { 25 | "type": "chore", 26 | "section": "⚙️ Miscellaneous Chores", 27 | "hidden": true 28 | }, 29 | { 30 | "type": "docs", 31 | "section": "📚 Documentation", 32 | "hidden": true 33 | }, 34 | { 35 | "type": "style", 36 | "section": "🎨 Styles", 37 | "hidden": true 38 | }, 39 | { 40 | "type": "refactor", 41 | "section": "🚜 Code Refactoring", 42 | "hidden": true 43 | }, 44 | { 45 | "type": "test", 46 | "section": "🧪 Tests", 47 | "hidden": true 48 | }, 49 | { 50 | "type": "build", 51 | "section": "🛠️ Build System", 52 | "hidden": true 53 | }, 54 | { 55 | "type": "ci", 56 | "section": "🥏 Continuous Integration", 57 | "hidden": true 58 | } 59 | ], 60 | "extra-files": [ 61 | { 62 | "type": "toml", 63 | "path": "Cargo.toml", 64 | "jsonpath": "package.version" 65 | } 66 | ] 67 | } 68 | -------------------------------------------------------------------------------- /.github/release-please/manifest.json: -------------------------------------------------------------------------------- 1 | {".":"0.2.0"} 2 | -------------------------------------------------------------------------------- /.github/workflows/pr-title.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: PR Title 3 | 4 | on: 5 | pull_request: 6 | branches: 7 | - main 8 | types: [opened, edited, reopened, synchronize] 9 | 10 | jobs: 11 | validate: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: thehanimo/pr-title-checker@7fbfe05602bdd86f926d3fb3bccb6f3aed43bc70 # v1.4.3 15 | with: 16 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 17 | configuration_path: .github/pr-title-checker-config.json 18 | -------------------------------------------------------------------------------- /.github/workflows/rc.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: RC for Major/Minor Releases 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | commit_sha: 7 | description: Long form commit SHA to create release branch from 8 | required: true 9 | type: string 10 | permissions: 11 | contents: write 12 | pull-requests: write 13 | # run concurrency group for the workflow 14 | concurrency: 15 | group: ${{ github.workflow }}-${{ github.ref }} 16 | cancel-in-progress: false 17 | jobs: 18 | create-release-branch: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v2.0.2 22 | id: gh-app-token 23 | with: 24 | app-id: ${{ vars.GH_APP_ID }} 25 | private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} 26 | - name: Validate Commit SHA 27 | run: | 28 | if [[ ! "$INPUT_COMMIT_SHA" =~ ^[0-9a-f]{40}$ ]]; then 29 | echo "Invalid commit SHA: $INPUT_COMMIT_SHA. Please provide the full 40-character SHA." 30 | echo "Provided SHA: $INPUT_COMMIT_SHA" 31 | echo "Length: ${#$INPUT_COMMIT_SHA}" 32 | exit 1 33 | fi 34 | echo "Valid commit SHA: $INPUT_COMMIT_SHA" 35 | env: 36 | INPUT_COMMIT_SHA: ${{ github.event.inputs.commit_sha }} 37 | - name: Checkout repository at commit SHA 38 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 39 | with: 40 | ref: ${{ github.event.inputs.commit_sha }} 41 | fetch-depth: 0 42 | token: ${{ steps.gh-app-token.outputs.token }} 43 | - name: Get version from Cargo.toml 44 | id: get_version 45 | run: | 46 | # Extract the version from manifest.json 47 | version=$(jq -r '.[ "."]' .github/release-please/manifest.json) 48 | if [ -z "$version" ]; then 49 | echo "Error: Version not found in manifest.json" 50 | exit 1 51 | fi 52 | echo "Version found: $version" 53 | # Get current version 54 | IFS='.' read -r major minor patch <<< "$version" 55 | new_minor=$((minor + 1)) 56 | new_version="${major}.${new_minor}.0" 57 | echo "New version: $new_version" 58 | echo "version=$new_version" >> $GITHUB_OUTPUT 59 | - name: Set release branch name 60 | id: set_branch 61 | run: | 62 | branch="release-v${{ steps.get_version.outputs.version }}" 63 | echo "release_branch=$branch" >> $GITHUB_ENV 64 | echo "Release branch will be: $branch" 65 | - name: Check if release branch exists 66 | id: check_branch 67 | run: | 68 | branch="release-v${{ steps.get_version.outputs.version }}" 69 | if git ls-remote --exit-code --heads origin "$branch" > /dev/null 2>&1; then 70 | echo "exists=true" >> $GITHUB_OUTPUT 71 | else 72 | echo "exists=false" >> $GITHUB_OUTPUT 73 | fi 74 | - name: Create release branch 75 | id: update_branch 76 | shell: bash 77 | run: |- 78 | branch="release-v${{ steps.get_version.outputs.version }}" 79 | commit_sha="${{ github.event.inputs.commit_sha }}" 80 | echo "branch=$branch" >> $GITHUB_OUTPUT 81 | if [ "${{ steps.check_branch.outputs.exists }}" == "true" ]; then 82 | echo "Branch '$branch' already exists. Exiting with error." 83 | exit 1 84 | else 85 | echo "Branch '$branch' does not exist. Creating new branch from commit $commit_sha." 86 | git checkout -b $branch $commit_sha 87 | git push -f origin $branch 88 | fi 89 | -------------------------------------------------------------------------------- /.github/workflows/release-sbom.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Generate SBOM 3 | on: 4 | workflow_call: 5 | inputs: 6 | tag: 7 | type: string 8 | description: The tag to use for generating SBOM. 9 | required: true 10 | jobs: 11 | sbom: 12 | name: Generate SBOM 13 | runs-on: ubuntu-latest 14 | environment: release 15 | env: 16 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 17 | SLACK_CHANNEL: '#oss-releases' 18 | steps: 19 | - name: Get github app token 20 | uses: actions/create-github-app-token@af35edadc00be37caa72ed9f3e6d5f7801bfdf09 # v1.11.7 21 | id: gh-app-token 22 | with: 23 | app-id: ${{ vars.GH_APP_ID }} 24 | private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} 25 | - name: Checkout tag 26 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 27 | with: 28 | ref: ${{ inputs.tag }} 29 | token: ${{ steps.gh-app-token.outputs.token }} 30 | - name: Slack notification 31 | uses: act10ns/slack@44541246747a30eb3102d87f7a4cc5471b0ffb7d # v2.1.0 32 | with: 33 | status: starting 34 | steps: ${{ toJson(steps) }} 35 | channel: ${{ env.SLACK_CHANNEL }} 36 | message: Starting generating sbom for ${{ github.repository }} with tag ${{ inputs.tag }}...... 37 | if: always() 38 | - name: Run SBOM 39 | uses: anchore/sbom-action@9f7302141466aa6482940f15371237e9d9f4c34a # v0.19.0 40 | with: 41 | upload-artifact-retention: 7 42 | upload-release-assets: false 43 | github-token: ${{ steps.gh-app-token.outputs.token }} 44 | output-file: openzeppelin-monitor-${{ inputs.tag }}-spdx.json 45 | artifact-name: openzeppelin-monitor-${{ inputs.tag }}-spdx.json 46 | - name: Upload Release Artifact 47 | env: 48 | GH_TOKEN: ${{ steps.gh-app-token.outputs.token }} 49 | run: gh release upload openzeppelin-monitor-${{ inputs.tag }}-spdx.json 50 | - name: SBOM attestation 51 | uses: actions/attest-build-provenance@db473fddc028af60658334401dc6fa3ffd8669fd # main 52 | with: 53 | subject-path: ./openzeppelin-monitor-${{ inputs.tag }}-spdx.json 54 | github-token: ${{ steps.gh-app-token.outputs.token }} 55 | - name: Slack notification 56 | uses: act10ns/slack@44541246747a30eb3102d87f7a4cc5471b0ffb7d # v2.1.0 57 | with: 58 | status: ${{ job.status }} 59 | steps: ${{ toJson(steps) }} 60 | channel: ${{ env.SLACK_CHANNEL }} 61 | message: Generating sbom ${{ job.status }} 62 | if: always() 63 | -------------------------------------------------------------------------------- /.github/workflows/rust-docs-url.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Update Technical Docs Link 3 | on: 4 | workflow_dispatch: {} 5 | push: 6 | branches: [docs-v*] 7 | workflow_call: 8 | inputs: 9 | branch: 10 | type: string 11 | description: The branch to update the technical docs link. 12 | required: true 13 | permissions: 14 | actions: read 15 | contents: write 16 | pull-requests: write 17 | packages: write 18 | id-token: write 19 | jobs: 20 | update-docs-link: 21 | runs-on: ubuntu-latest 22 | environment: release 23 | steps: 24 | - uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v2.0.2 25 | id: gh-app-token 26 | with: 27 | app-id: ${{ vars.GH_APP_ID }} 28 | private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} 29 | - name: Checkout repository 30 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 31 | with: 32 | token: ${{ steps.gh-app-token.outputs.token }} 33 | - name: Use branch input 34 | id: get_branch 35 | run: | 36 | if [ -n "${{ inputs.branch }}" ]; then 37 | echo "Using provided branch input: ${{ inputs.branch }}" 38 | echo "branch=${{ inputs.branch }}" >> $GITHUB_OUTPUT 39 | else 40 | echo "No branch input provided, deriving from GITHUB_REF" 41 | branch="${GITHUB_REF#refs/heads/}" 42 | echo "Derived branch from ref: $branch" 43 | echo "branch=$branch" >> $GITHUB_OUTPUT 44 | fi 45 | - name: Update the technical docs link in nav.adoc 46 | id: update-docs 47 | run: | 48 | branch="${{ steps.get_branch.outputs.branch }}" 49 | # Netlify uses `-` instead of `.` in branch names for constructing the URL 50 | slug="${branch//./-}" 51 | target="https://${slug}%2D%2Dopenzeppelin-monitor.netlify.app" 52 | file="docs/modules/ROOT/nav.adoc" 53 | if grep -q "${target}" "${file}"; then 54 | echo "nav.adoc is already using ${target}" 55 | echo "changed=false" >> $GITHUB_OUTPUT 56 | exit 0 57 | fi 58 | echo "Updating to branch URL: ${target}" 59 | sed -i -E "s|(https://)[^/]*openzeppelin-monitor.netlify.app|${target}|g" "${file}" 60 | echo "Updated nav.adoc to use ${target}" 61 | echo "changed=true" >> $GITHUB_OUTPUT 62 | - name: Create Pull Request to update the technical docs version 63 | if: ${{ steps.get_branch.outputs.branch != '' && steps.update-docs.outputs.changed == 'true' }} 64 | uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8 65 | with: 66 | token: ${{ steps.gh-app-token.outputs.token }} 67 | title: 'docs: Update technical docs version in the nav.adoc file' 68 | body: Automatically generated PR to update technical docs version in the nav.adoc file. 69 | branch-suffix: short-commit-hash 70 | sign-commits: true 71 | commit-message: 'docs: Update technical docs version in the nav.adoc file' 72 | -------------------------------------------------------------------------------- /.github/workflows/scorecard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This workflow uses actions that are not certified by GitHub. They are provided 3 | # by a third-party and are governed by separate terms of service, privacy 4 | # policy, and support documentation. 5 | 6 | name: Scorecard supply-chain security 7 | on: 8 | # For Branch-Protection check. Only the default branch is supported. See 9 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection 10 | branch_protection_rule: 11 | # To guarantee Maintained check is occasionally updated. See 12 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained 13 | schedule: 14 | - cron: 35 1 * * 6 15 | push: 16 | branches: [main] 17 | 18 | # Declare default permissions as read only. 19 | permissions: read-all 20 | 21 | jobs: 22 | analysis: 23 | name: Scorecard analysis 24 | runs-on: ubuntu-latest 25 | permissions: 26 | # Needed to upload the results to code-scanning dashboard. 27 | security-events: write 28 | # Needed to publish results and get a badge (see publish_results below). 29 | id-token: write 30 | contents: read 31 | actions: read 32 | 33 | steps: 34 | - name: Harden Runner 35 | uses: step-security/harden-runner@91182cccc01eb5e619899d80e4e971d6181294a7 # v2.10.1 36 | with: 37 | egress-policy: audit 38 | 39 | - name: Checkout code 40 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 41 | with: 42 | persist-credentials: false 43 | 44 | - name: Run analysis 45 | uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 46 | with: 47 | results_file: results.sarif 48 | results_format: sarif 49 | # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: 50 | # - you want to enable the Branch-Protection check on a *public* repository, or 51 | # - you are installing Scorecard on a *private* repository 52 | # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. 53 | repo_token: ${{ secrets.GITHUB_TOKEN }} 54 | 55 | # Public repositories: 56 | # - Publish results to OpenSSF REST API for easy access by consumers 57 | # - Allows the repository to include the Scorecard badge. 58 | # - See https://github.com/ossf/scorecard-action#publishing-results. 59 | publish_results: true 60 | 61 | # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF 62 | # format to the repository Actions tab. 63 | - name: Upload artifact 64 | uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1 65 | with: 66 | name: SARIF file 67 | path: results.sarif 68 | retention-days: 5 69 | 70 | # Upload the results to GitHub's code scanning dashboard. 71 | - name: Upload to code-scanning 72 | uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 73 | with: 74 | sarif_file: results.sarif 75 | -------------------------------------------------------------------------------- /.github/workflows/update-lock.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Nightly Cargo.lock update 3 | on: 4 | schedule: 5 | - cron: 0 12 * * * 6 | workflow_dispatch: {} 7 | permissions: 8 | actions: read 9 | contents: write 10 | pull-requests: write 11 | id-token: write 12 | jobs: 13 | update-lock: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 18 | with: 19 | fetch-depth: 0 20 | token: ${{ secrets.GITHUB_TOKEN }} 21 | ref: main 22 | - name: Prepare 23 | id: init 24 | uses: ./.github/actions/prepare 25 | 26 | # Get the output of the prepare composite action 27 | - name: Get cache-hit output 28 | run: 'echo "Cache hit >>>>>: ${{ steps.init.outputs.cache-hit }}"' 29 | - name: Cargo Update 30 | id: lock-file-commit 31 | run: |- 32 | cargo update 33 | git add Cargo.lock 34 | if ! git diff --cached --quiet Cargo.lock; then 35 | echo "changes=true" >> $GITHUB_OUTPUT 36 | else 37 | echo "Cargo.lock has no changes, skipping commit and push." 38 | exit 0 39 | fi 40 | - name: Create or update pull request 41 | uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8 42 | with: 43 | token: ${{ secrets.GITHUB_TOKEN }} 44 | title: 'chore: Updating lock file' 45 | sign-commits: true 46 | branch: update-cargo-lock 47 | delete-branch: true 48 | commit-message: 'chore: Updating lock file' 49 | body: |- 50 | This PR is generated automatically by GitHub Actions. 51 | It contains all dependency updates since the last run. 52 | base: main 53 | labels: dependencies, automation 54 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # These are backup files generated by rustfmt 7 | **/*.rs.bk 8 | 9 | # MSVC Windows builds of rustc generate these, which store debugging information 10 | *.pdb 11 | 12 | # RustRover 13 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 14 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 15 | # and can be added to the global gitignore or merged into this file. For a more nuclear 16 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 17 | #.idea/ 18 | 19 | # Ignore data files 20 | data/**/*.txt 21 | data/**/*.json 22 | 23 | # Ignore actual config files but not examples 24 | config/**/*.json 25 | config/**/*.sh 26 | !examples/config/**/*.json 27 | 28 | # Ignore .env files 29 | .env 30 | 31 | # Ignore DS_Store files 32 | .DS_Store 33 | 34 | # Ignore proptest regressions 35 | tests/**/*.proptest-regressions 36 | 37 | # Coverage files 38 | coverage-*.profraw 39 | coverage-data/ 40 | coverage/ 41 | *-lcov.info 42 | 43 | # docs 44 | docs/build/ 45 | rust_docs 46 | 47 | # Ignore node_modules 48 | node_modules 49 | 50 | # Ignore logs dir 51 | logs/ 52 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | minimum_pre_commit_version: 3.5.0 3 | default_install_hook_types: [commit-msg, pre-commit, pre-push] 4 | default_stages: [pre-commit, pre-push] 5 | ci: 6 | autofix_commit_msg: 'chore(pre-commit): autofix run' 7 | autoupdate_commit_msg: 'chore(pre-commit): autoupdate hooks' 8 | repos: 9 | - repo: local 10 | hooks: 11 | - id: pre-commit 12 | name: Update pre-commit 13 | entry: pre-commit install --install-hooks -t pre-commit -t pre-push 14 | pass_filenames: false 15 | language: system 16 | files: ^\.pre-commit-config.yaml$ 17 | - id: rustfmt 18 | name: rustfmt 19 | entry: cargo fmt 20 | pass_filenames: false 21 | language: system 22 | types: [rust] 23 | - id: clippy 24 | name: clippy 25 | entry: cargo clippy --all-targets --all-features -- -D warnings 26 | pass_filenames: false 27 | language: system 28 | - repo: https://github.com/pre-commit/pre-commit-hooks 29 | rev: v5.0.0 30 | hooks: 31 | - id: check-json 32 | - id: check-toml 33 | - id: check-merge-conflict 34 | - id: check-case-conflict 35 | - id: detect-private-key 36 | - id: trailing-whitespace 37 | - id: end-of-file-fixer 38 | - repo: https://github.com/lyz-code/yamlfix/ 39 | rev: 1.17.0 40 | hooks: 41 | - id: yamlfix 42 | args: [-c, .yamlfix.toml] 43 | - repo: https://github.com/crate-ci/committed 44 | rev: v1.1.5 45 | hooks: 46 | - id: committed 47 | stages: [commit-msg] 48 | - repo: https://github.com/crate-ci/typos 49 | rev: v1.29.4 50 | hooks: 51 | - id: typos 52 | - repo: https://github.com/compilerla/conventional-pre-commit 53 | rev: v3.4.0 54 | hooks: 55 | - id: conventional-pre-commit 56 | stages: [commit-msg] 57 | args: [--strict, build, chore, ci, docs, feat, fix, perf, refactor, revert, style, test] 58 | -------------------------------------------------------------------------------- /.pre-commit-hooks.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - id: conventional-pre-commit 4 | name: Conventional Commit 5 | entry: conventional-pre-commit 6 | language: python 7 | description: Checks commit message for Conventional Commits formatting 8 | always_run: true 9 | stages: [commit-msg] 10 | -------------------------------------------------------------------------------- /.typos.toml: -------------------------------------------------------------------------------- 1 | [files] 2 | extend-exclude = ["CHANGELOG.md"] 3 | -------------------------------------------------------------------------------- /.yamlfix.toml: -------------------------------------------------------------------------------- 1 | allow_duplicate_keys = false 2 | line_length = 280 3 | sequence_style = "flow_style" 4 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @collins-w @d-carmo @NicoMolinaOZ @shahnami @tirumerla @zeljkoX 2 | 3 | SECURITY.md @shahnami @tirumerla @son-oz 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "openzeppelin-monitor" 3 | version = "0.2.0" 4 | edition = "2021" 5 | rust-version = "1.84" #MSRV 6 | 7 | [profile.release] 8 | opt-level = 0 9 | overflow-checks = false 10 | panic = 'abort' 11 | 12 | [profile.test] 13 | debug = true 14 | opt-level = 0 15 | overflow-checks = true 16 | panic = 'unwind' 17 | 18 | [dependencies] 19 | actix-rt = "2.0.0" 20 | actix-web = "4" 21 | alloy = { version = "0.15.6", features = ["full"] } 22 | anyhow = { version = "1.0.97", features = ["std"] } 23 | async-trait = "0.1" 24 | base64 = "0.22" 25 | byte-unit = "5.1.6" 26 | chrono = "0.4" 27 | clap = { version = "4.5", features = ["cargo", "derive"] } 28 | cron = "0.15.0" 29 | dotenvy = "0.15.7" 30 | email_address = "0.2.9" 31 | ethabi = "18.0.0" 32 | futures = "0.3" 33 | glob = "0.3" 34 | hex = "0.4" 35 | hmac = "0.12.0" 36 | lazy_static = "1.5" 37 | lettre = "0.11.11" 38 | libc = "0.2" 39 | log = "0.4" 40 | oz-keystore = "0.1.4" 41 | prometheus = "0.14" 42 | pulldown-cmark = "0.13.0" 43 | regex = "1.11.0" 44 | reqwest = { version = "=0.12.15", features = ["json"] } 45 | reqwest-middleware = "0.4.1" 46 | reqwest-retry = "0.7.0" 47 | rust_decimal = "1.37.1" 48 | serde = { version = "1.0", features = ["derive"] } 49 | serde_json = "1.0" 50 | sha2 = "0.10.0" 51 | soroban-spec = "22.0.7" 52 | stellar-rpc-client = "22.0.0" 53 | stellar-strkey = "0.0.13" 54 | stellar-xdr = "22.1.0" 55 | sysinfo = "0.34.2" 56 | thiserror = "2.0.12" 57 | tokio = { version = "1.0", features = ["full"] } 58 | tokio-cron-scheduler = "0.13.0" 59 | tracing = "0.1.41" 60 | tracing-appender = "0.2" 61 | tracing-core = "0.1.33" 62 | tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } 63 | url = "2.5" 64 | urlencoding = "2.1.3" 65 | uuid = "1.15.0" 66 | winnow = "0.7.9" 67 | zeroize = { version = "1.8.1", features = ["derive"] } 68 | 69 | [dev-dependencies] 70 | cargo-llvm-cov = "0.6" 71 | mockall = "0.13.1" 72 | mockito = "1.6.1" 73 | once_cell = "1.20.0" 74 | proptest = "1.6.0" 75 | rand = "0.9.0" 76 | tempfile = "3.2" 77 | tracing-test = "0.2.5" 78 | 79 | [lib] 80 | path = "src/lib.rs" 81 | 82 | [[bin]] 83 | path = "src/main.rs" 84 | name = "openzeppelin-monitor" 85 | 86 | [features] 87 | test-ci-only = [] 88 | -------------------------------------------------------------------------------- /DOCKER_README.md: -------------------------------------------------------------------------------- 1 | # OpenZeppelin Monitor 2 | 3 | > :warning: This software is in alpha. Use in production environments at your own risk. 4 | 5 | In the rapidly evolving world of blockchain technology, effective monitoring is crucial for ensuring security and performance. OpenZeppelin Monitor is a blockchain monitoring service that watches for specific on-chain activities and triggers notifications based on configurable conditions. The service offers multi-chain support with configurable monitoring schedules, flexible trigger conditions, and an extensible architecture for adding new chains. 6 | 7 | [Install](https://docs.openzeppelin.com/monitor#getting_started) | [User Docs](https://docs.openzeppelin.com/monitor) | [Quickstart](https://docs.openzeppelin.com/monitor/quickstart) | [Crate Docs](https://docs.openzeppelin.com/monitor/rust_docs/doc/openzeppelin_monitor/) 8 | 9 | ## Pre-requisites 10 | 11 | - Docker installed on your machine 12 | - Copy example configuration files to `./config` directory and modify according to your needs. See [examples](https://docs.openzeppelin.com/monitor/quickstart#examples) for more information. 13 | 14 | ## How to use images pushed to DockerHub 15 | 16 | - These images are automatically pulled when you use docker compose. See [using docker compose](https://docs.openzeppelin.com/monitor#run_with_docker) for more information. 17 | - If you are not using docker compose and you want to use these images, follow the steps below. 18 | 19 | ### 1. Pull the image 20 | 21 | You can pull the latest image using the following command: 22 | 23 | ```bash 24 | docker pull openzeppelin/openzeppelin-monitor:latest 25 | ``` 26 | 27 | ### 2. Run the image 28 | 29 | You can run the image using the following command: 30 | 31 | ```bash 32 | docker run -d \ 33 | --name monitor \ 34 | -v ./config:/app/config:ro \ 35 | openzeppelin/openzeppelin-monitor:latest 36 | ``` 37 | 38 | ### 3. Stop the container 39 | 40 | You can stop the container using the following command: 41 | 42 | ```bash 43 | docker stop monitor 44 | ``` 45 | 46 | ### 4. Remove the container 47 | 48 | You can remove the container using the following command: 49 | 50 | ```bash 51 | docker rm monitor 52 | ``` 53 | 54 | ### 5. Remove the image 55 | 56 | You can remove the image using the following command: 57 | 58 | ```bash 59 | docker rmi openzeppelin/openzeppelin-monitor:latest 60 | ``` 61 | 62 | ## Contributing 63 | 64 | We welcome contributions from the community. Please read our [contributing section](https://github.com/OpenZeppelin/openzeppelin-monitor/?tab=readme-ov-file#contributing) for more information. 65 | 66 | ## License 67 | 68 | This project is licensed under the GNU Affero General Public License v3.0 - see the [LICENSE](https://github.com/OpenZeppelin/openzeppelin-monitor/blob/main/LICENSE) file for details. 69 | 70 | ## Security 71 | 72 | For security concerns, please refer to our [Security Policy](https://github.com/OpenZeppelin/openzeppelin-monitor/blob/main/SECURITY.md). 73 | 74 | ## Get Help 75 | 76 | If you have any questions, first see if the answer to your question can be found in the [User Documentation](https://docs.openzeppelin.com/monitor). 77 | 78 | If the answer is not there: 79 | 80 | - Join the [Telegram](https://t.me/openzeppelin_tg/4) to get help, or 81 | - Open an issue with [the bug](https://github.com/openzeppelin/openzeppelin-monitor/issues/new?assignees=&labels=T-bug%2CS-needs-triage&projects=&template=bug.yml) 82 | 83 | We encourage you to reach out with any questions or feedback. 84 | -------------------------------------------------------------------------------- /Dockerfile.development: -------------------------------------------------------------------------------- 1 | # Base image 2 | FROM --platform=${BUILDPLATFORM} cgr.dev/chainguard/rust:latest-dev@sha256:faf49718aaa95c798ed1dfdf3e4edee2cdbc3790c8994705ca6ef35972128459 AS base 3 | 4 | WORKDIR /usr/app 5 | 6 | USER root 7 | RUN apk update && apk add openssl-dev 8 | 9 | # Copy 10 | COPY . . 11 | 12 | RUN --mount=type=cache,target=/usr/local/cargo/registry \ 13 | --mount=type=cache,target=/app/target \ 14 | RUST_BACKTRACE=1 cargo install --root /usr/app --path . --debug --locked 15 | 16 | # Wolfi image 17 | FROM --platform=${BUILDPLATFORM} cgr.dev/chainguard/wolfi-base:latest 18 | 19 | ARG version=3.12 20 | 21 | RUN apk add bash python-${version} py${version}-pip nodejs jq 22 | 23 | WORKDIR /app 24 | COPY --from=base --chown=nonroot:nonroot /usr/app/bin/openzeppelin-monitor /app/openzeppelin-monitor 25 | 26 | ENV METRICS_PORT=8081 27 | 28 | EXPOSE ${METRICS_PORT}/tcp 29 | 30 | # starting up 31 | ENTRYPOINT ["/app/openzeppelin-monitor"] 32 | -------------------------------------------------------------------------------- /Dockerfile.production: -------------------------------------------------------------------------------- 1 | # Base image 2 | FROM --platform=${BUILDPLATFORM} cgr.dev/chainguard/rust:latest-dev@sha256:faf49718aaa95c798ed1dfdf3e4edee2cdbc3790c8994705ca6ef35972128459 AS base 3 | 4 | WORKDIR /usr/app 5 | 6 | USER root 7 | RUN apk update && apk add openssl-dev 8 | 9 | COPY . . 10 | RUN --mount=type=cache,target=/usr/local/cargo/registry \ 11 | --mount=type=cache,target=/app/target \ 12 | cargo install --root /usr/app --path . --locked 13 | 14 | # Wolfi image 15 | FROM --platform=${BUILDPLATFORM} cgr.dev/chainguard/wolfi-base 16 | 17 | ARG version=3.12 18 | 19 | RUN apk add bash python-${version} py${version}-pip nodejs jq 20 | 21 | # removes apk and unneeded wolfi-base tools. 22 | RUN apk del wolfi-base apk-tools 23 | 24 | WORKDIR /app 25 | COPY --from=base --chown=nonroot:nonroot /usr/app/bin/openzeppelin-monitor /app/openzeppelin-monitor 26 | 27 | ENV METRICS_PORT=8081 28 | 29 | EXPOSE ${METRICS_PORT}/tcp 30 | 31 | # starting up 32 | ENTRYPOINT ["/app/openzeppelin-monitor"] 33 | -------------------------------------------------------------------------------- /Makefile.toml: -------------------------------------------------------------------------------- 1 | [tasks.rust-antora] 2 | description = "Build Antora site and copy rust_docs" 3 | script = [ 4 | "cargo doc --target-dir docs/rust_docs --release --no-deps --quiet --locked", 5 | "bash scripts/rust_antora.sh" 6 | ] 7 | 8 | [tasks.docker-compose-up] 9 | description = "Run docker compose up according to the user defined settings" 10 | script = [ 11 | "chmod +x ./scripts/docker_compose.sh", 12 | "./scripts/docker_compose.sh up" 13 | ] 14 | 15 | [tasks.docker-compose-down] 16 | description = "Run docker compose down according to the user defined settings" 17 | script = [ 18 | "chmod +x ./scripts/docker_compose.sh", 19 | "./scripts/docker_compose.sh down" 20 | ] 21 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | Security vulnerabilities should be [disclosed](#reporting-a-vulnerability) to the [project maintainers](./CODEOWNERS), or alternatively by email to security@openzeppelin.com. 4 | 5 | ## Supported Versions 6 | 7 | The following versions are currently supported and receive security updates. Alpha, Beta and Release candidates will not receive security updates. 8 | 9 | Security patches will be released for the latest minor of a given major release. For example, if an issue is found in versions >=1.13.0 and the latest is 1.14.0, the patch will be released only in version 1.14.1. 10 | 11 | Only critical severity bug fixes will be backported to past major releases. 12 | 13 | | Version | Supported | 14 | | --------- | ------------------ | 15 | | >= 0.1.x | :white_check_mark: | 16 | | <= 0.0.9 | :x: | 17 | 18 | ## Reporting a Vulnerability 19 | 20 | We're extremely grateful for security researchers and users that report vulnerabilities to us. 21 | All reports are thoroughly investigated by the project's security team. 22 | 23 | Vulnerabilities are reported privately via GitHub's [Security Advisories](https://docs.github.com/en/code-security/security-advisories) feature. 24 | Please use the following link to submit your vulnerability: [Report a vulnerability](https://github.com/openzeppelin/openzeppelin-monitor/security/advisories/new) 25 | 26 | Please see 27 | [Privately reporting a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability) 28 | for more information on how to submit a vulnerability using GitHub's interface. 29 | 30 | ## Legal 31 | 32 | OpenZeppelin Monitor is made available under the GNU AGPL 3.0 License, which disclaims all warranties in relation to the project and which limits the liability of those that contribute and maintain the project, including OpenZeppelin. Your use of the project is also governed by the terms found at www.openzeppelin.com/tos (the "Terms"). As set out in the Terms, you are solely responsible for any use of OpenZeppelin Monitor and you assume all risks associated with any such use. This Security Policy in no way evidences or represents an on-going duty by any contributor, including OpenZeppelin, to correct any flaws or alert you to all or any of the potential risks of utilizing the project. 33 | -------------------------------------------------------------------------------- /cmd/prometheus/dashboards/dashboard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: 1 3 | 4 | providers: 5 | # an unique provider name 6 | - name: OpenZeppelin Monitor 7 | # org id. will default to orgId 1 if not specified 8 | orgId: 1 9 | # name of the dashboard folder. Required 10 | folder: '' 11 | # folder UID. will be automatically generated if not specified 12 | folderUid: '' 13 | # provider type. Required 14 | type: file 15 | # disable dashboard deletion 16 | disableDeletion: false 17 | # enable dashboard editing 18 | editable: true 19 | # how often Grafana will scan for changed dashboards 20 | updateIntervalSeconds: 10 21 | # allow updating provisioned dashboards from the UI 22 | allowUiUpdates: true 23 | options: 24 | # path to dashboard files on disk. Required 25 | path: /etc/grafana/provisioning/dashboards 26 | foldersFromFilesStructure: true 27 | -------------------------------------------------------------------------------- /cmd/prometheus/datasources/prometheus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # config file version 3 | apiVersion: 1 4 | 5 | # list of datasources that should be deleted from the database 6 | deleteDatasources: 7 | - name: Prometheus 8 | orgId: 1 9 | 10 | # list of datasources to insert/update depending 11 | # whats available in the database 12 | datasources: 13 | # name of the datasource. Required 14 | - name: Prometheus 15 | # datasource type. Required 16 | type: prometheus 17 | # access mode. direct or proxy. Required 18 | access: proxy 19 | # org id. will default to orgId 1 if not specified 20 | orgId: 1 21 | # url 22 | url: http://prometheus:9090 23 | # database password, if used 24 | password: 25 | # database user, if used 26 | user: 27 | # database name, if used 28 | database: 29 | # enable/disable basic auth 30 | basicAuth: false 31 | # basic auth username, if used 32 | basicAuthUser: 33 | # basic auth password, if used 34 | basicAuthPassword: 35 | # enable/disable with credentials headers 36 | withCredentials: 37 | # mark as default datasource. Max one per org 38 | isDefault: true 39 | # fields that will be converted to json and stored in json_data 40 | jsonData: 41 | graphiteVersion: '1.1' 42 | tlsAuth: false 43 | tlsAuthWithCACert: false 44 | # json object of data that will be encrypted. 45 | secureJsonData: 46 | tlsCACert: '...' 47 | tlsClientCert: '...' 48 | tlsClientKey: '...' 49 | version: 1 50 | # allow users to edit datasources from the UI. 51 | editable: true 52 | -------------------------------------------------------------------------------- /cmd/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | global: 3 | scrape_interval: 10s 4 | scrape_timeout: 3s 5 | evaluation_interval: 5s 6 | 7 | scrape_configs: 8 | - job_name: monitor 9 | # Prometheus uses this api path to scrape metrics from the monitor container 10 | metrics_path: /metrics 11 | scheme: http 12 | static_configs: 13 | - targets: 14 | - monitor:8081 15 | -------------------------------------------------------------------------------- /committed.toml: -------------------------------------------------------------------------------- 1 | style="conventional" 2 | ignore_author_re="(dependabot|renovate)" 3 | merge_commit = false 4 | -------------------------------------------------------------------------------- /config/filters/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenZeppelin/openzeppelin-monitor/f02a13708b647f5451e4a6bc947f702886694865/config/filters/.gitkeep -------------------------------------------------------------------------------- /config/monitors/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenZeppelin/openzeppelin-monitor/f02a13708b647f5451e4a6bc947f702886694865/config/monitors/.gitkeep -------------------------------------------------------------------------------- /config/networks/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenZeppelin/openzeppelin-monitor/f02a13708b647f5451e4a6bc947f702886694865/config/networks/.gitkeep -------------------------------------------------------------------------------- /config/triggers/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenZeppelin/openzeppelin-monitor/f02a13708b647f5451e4a6bc947f702886694865/config/triggers/.gitkeep -------------------------------------------------------------------------------- /config/triggers/scripts/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenZeppelin/openzeppelin-monitor/f02a13708b647f5451e4a6bc947f702886694865/config/triggers/scripts/.gitkeep -------------------------------------------------------------------------------- /data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenZeppelin/openzeppelin-monitor/f02a13708b647f5451e4a6bc947f702886694865/data/.gitkeep -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Ports `8081` for metrics server. 3 | 4 | services: 5 | monitor: 6 | image: openzeppelin/openzeppelin-monitor:latest 7 | build: 8 | context: . 9 | dockerfile: ${DOCKERFILE:-Dockerfile.development} 10 | environment: 11 | METRICS_PORT: ${METRICS_PORT:-8081} 12 | METRICS_ENABLED: ${METRICS_ENABLED:-false} 13 | # Options: trace, debug, info, warn, error 14 | # Default: info 15 | LOG_LEVEL: ${RUST_LOG:-info} 16 | # Options: stdout, file 17 | # Default: stdout 18 | LOG_MODE: ${LOG_MODE:-stdout} 19 | # Only used if LOG_MODE is file 20 | # Default: 1GB (1073741824 bytes) 21 | LOG_MAX_SIZE: ${LOG_MAX_SIZE:-1073741824} 22 | IN_DOCKER: 'true' 23 | restart: on-failure:5 24 | mem_swappiness: 0 25 | security_opt: 26 | - no-new-privileges 27 | volumes: 28 | - ./config:/app/config/:ro 29 | - ${MONITOR_DATA_DIR:-/dev/null}:/app/data 30 | - ${LOG_DATA_DIR:-./logs}:/app/logs 31 | networks: 32 | - monitor-network 33 | - metrics-network 34 | 35 | prometheus: 36 | image: prom/prometheus:v3.1.0 37 | security_opt: 38 | - no-new-privileges 39 | command: --log.level=warn --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus 40 | --storage.tsdb.retention.time=30d 41 | ports: 42 | - 9090:9090/tcp 43 | networks: 44 | - metrics-network 45 | - monitor-network 46 | volumes: 47 | - ./cmd/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml 48 | restart: on-failure:5 49 | profiles: 50 | - metrics 51 | 52 | grafana: 53 | image: grafana/grafana:11.5.1 54 | security_opt: 55 | - no-new-privileges 56 | ports: [3000:3000/tcp] 57 | networks: 58 | - metrics-network 59 | - monitor-network 60 | volumes: 61 | - ./cmd/prometheus/grafana.ini:/etc/grafana/grafana.ini 62 | - ./cmd/prometheus/datasources:/etc/grafana/provisioning/datasources 63 | - ./cmd/prometheus/dashboards:/etc/grafana/provisioning/dashboards 64 | restart: on-failure:5 65 | profiles: 66 | - metrics 67 | 68 | networks: 69 | metrics-network: 70 | internal: true 71 | monitor-network: 72 | driver: bridge 73 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Antora Documentation 2 | 3 | ## Generate Documentation 4 | 5 | - To generate documentation locally, run the following command 6 | 7 | ```sh 8 | yarn docs:watch 9 | ``` 10 | 11 | - In separate terminal from root of the repo run: 12 | 13 | ```sh 14 | cargo make rust-antora 15 | ``` 16 | 17 | - You can view the site `localhost:8080` or other port if it's in use. 18 | -------------------------------------------------------------------------------- /docs/antora.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: monitor 3 | title: Monitor 4 | version: 0.2.x 5 | nav: [modules/ROOT/nav.adoc] 6 | -------------------------------------------------------------------------------- /docs/modules/ROOT/nav.adoc: -------------------------------------------------------------------------------- 1 | * xref:index.adoc[User Documentation] 2 | * xref:quickstart.adoc[Quickstart] 3 | * xref:structure.adoc[Project Structure] 4 | * xref:rpc.adoc[RPC Client] 5 | * xref:scripts.adoc[Custom scripts] 6 | * xref:error.adoc[Error Handling] 7 | * link:https://release-v0-2-0%2D%2Dopenzeppelin-monitor.netlify.app/openzeppelin_monitor/[Technical Rust Documentation^] 8 | -------------------------------------------------------------------------------- /docs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "docs", 3 | "version": "0.0.0", 4 | "scripts": { 5 | "docs": "oz-docs -c .", 6 | "docs:watch": "npm run docs watch" 7 | }, 8 | "keywords": [], 9 | "author": "", 10 | "devDependencies": { 11 | "@openzeppelin/docs-utils": "^0.1.3" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/config/filters/evm_filter_block_number.js: -------------------------------------------------------------------------------- 1 | /** 2 | * EVM Block Number Filter 3 | * 4 | * This script filters monitor matches based on the block number of the transaction. 5 | * It demonstrates a simple filter that only allows transactions from even-numbered blocks. 6 | * 7 | * Input: JSON object containing: 8 | * - monitor_match: The monitor match data with transaction details 9 | * - args: Additional arguments passed to the script 10 | * 11 | * Output: 12 | * - Prints 'true' for transactions in even-numbered blocks 13 | * - Prints 'false' for transactions in odd-numbered blocks or invalid input 14 | * 15 | * Note: Block numbers are extracted from the EVM transaction data and converted 16 | * from hexadecimal to decimal before processing. 17 | */ 18 | try { 19 | let inputData = ''; 20 | // Read from stdin 21 | process.stdin.on('data', chunk => { 22 | inputData += chunk; 23 | }); 24 | 25 | process.stdin.on('end', () => { 26 | const data = JSON.parse(inputData); 27 | const monitorMatch = data.monitor_match; 28 | const args = data.args; 29 | 30 | // Extract block_number 31 | let blockNumber = null; 32 | if (monitorMatch.EVM) { 33 | const hexBlock = monitorMatch.EVM.transaction?.blockNumber; 34 | if (hexBlock) { 35 | // Convert hex string to integer 36 | blockNumber = parseInt(hexBlock, 16); 37 | } 38 | } 39 | 40 | if (blockNumber === null) { 41 | console.log('false'); 42 | return; 43 | } 44 | 45 | const result = blockNumber % 2 === 0; 46 | console.log(`Block number ${blockNumber} is ${result ? 'even' : 'odd'}`); 47 | console.log(result.toString()); 48 | }); 49 | } catch (e) { 50 | console.log(`Error processing input: ${e}`); 51 | console.log('false'); 52 | } 53 | -------------------------------------------------------------------------------- /examples/config/filters/evm_filter_block_number.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | EVM Block Number Filter 4 | 5 | This script filters monitor matches based on the block number of the transaction. 6 | It demonstrates a simple filter that only allows transactions from even-numbered blocks. 7 | 8 | Input: JSON object containing: 9 | - monitor_match: The monitor match data with transaction details 10 | - args: Additional arguments passed to the script 11 | 12 | Output: 13 | - Prints 'true' for transactions in even-numbered blocks 14 | - Prints 'false' for transactions in odd-numbered blocks or invalid input 15 | 16 | Note: Block numbers are extracted from the EVM transaction data and converted 17 | from hexadecimal to decimal before processing. 18 | """ 19 | import sys 20 | import json 21 | 22 | def main(): 23 | try: 24 | # Read input from stdin 25 | input_data = sys.stdin.read() 26 | if not input_data: 27 | print("No input JSON provided", flush=True) 28 | return False 29 | 30 | # Parse input JSON 31 | try: 32 | data = json.loads(input_data) 33 | monitor_match = data['monitor_match'] 34 | args = data['args'] 35 | except json.JSONDecodeError as e: 36 | print(f"Invalid JSON input: {e}", flush=True) 37 | return False 38 | 39 | # Extract block_number 40 | block_number = None 41 | if "EVM" in monitor_match: 42 | hex_block = monitor_match['EVM']['transaction'].get('blockNumber') 43 | if hex_block: 44 | # Convert hex string to integer 45 | block_number = int(hex_block, 16) 46 | 47 | if block_number is None: 48 | print("Block number is None") 49 | return False 50 | 51 | result = block_number % 2 == 0 52 | print(f"Block number {block_number} is {'even' if result else 'odd'}", flush=True) 53 | return result 54 | 55 | except Exception as e: 56 | print(f"Error processing input: {e}", flush=True) 57 | return False 58 | 59 | if __name__ == "__main__": 60 | result = main() 61 | # Print the final boolean result 62 | print(str(result).lower(), flush=True) 63 | -------------------------------------------------------------------------------- /examples/config/filters/evm_filter_block_number.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | ################################################################################ 3 | # EVM Block Number Filter 4 | # 5 | # This script filters monitor matches based on the block number of the transaction. 6 | # It demonstrates a simple filter that only allows transactions from even-numbered blocks. 7 | # 8 | # Input: JSON object containing: 9 | # - monitor_match: The monitor match data with transaction details 10 | # - args: Additional arguments passed to the script 11 | # 12 | # Arguments: 13 | # --verbose: Enables detailed logging of the filtering process 14 | # 15 | # Output: 16 | # - Prints 'true' for transactions in even-numbered blocks 17 | # - Prints 'false' for transactions in odd-numbered blocks or invalid input 18 | # - Includes additional logging when verbose mode is enabled 19 | # 20 | # Note: Block numbers are extracted from the EVM transaction data and converted 21 | # from hexadecimal to decimal before processing. 22 | ################################################################################ 23 | 24 | # Enable error handling 25 | set -e 26 | 27 | main() { 28 | # Read JSON input from stdin 29 | input_json=$(cat) 30 | 31 | # Parse arguments from the input JSON and initialize verbose flag 32 | verbose=false 33 | args=$(echo "$input_json" | jq -r '.args[]? // empty') 34 | if [ ! -z "$args" ]; then 35 | while IFS= read -r arg; do 36 | if [ "$arg" = "--verbose" ]; then 37 | verbose=true 38 | echo "Verbose mode enabled" 39 | fi 40 | done <<< "$args" 41 | fi 42 | 43 | # Extract the monitor match data from the input 44 | monitor_data=$(echo "$input_json" | jq -r '.monitor_match') 45 | 46 | # Validate input 47 | if [ -z "$monitor_data" ]; then 48 | echo "No input JSON provided" 49 | echo "false" 50 | exit 1 51 | fi 52 | 53 | if [ "$verbose" = true ]; then 54 | echo "Input JSON received:" 55 | fi 56 | 57 | # Extract blockNumber from the EVM receipt or transaction 58 | block_number_hex=$(echo "$monitor_data" | jq -r '.EVM.transaction.blockNumber' || echo "") 59 | 60 | # Validate that block_number_hex is not empty 61 | if [ -z "$block_number_hex" ]; then 62 | echo "Invalid JSON or missing blockNumber" 63 | echo "false" 64 | exit 1 65 | fi 66 | 67 | # Remove 0x prefix if present and clean the string 68 | block_number_hex=$(echo "$block_number_hex" | tr -d '\n' | tr -d ' ') 69 | block_number_hex=${block_number_hex#0x} 70 | 71 | if [ "$verbose" = true ]; then 72 | echo "Extracted block number (hex): $block_number_hex" 73 | fi 74 | 75 | # Convert hex to decimal with error checking 76 | if ! block_number=$(printf "%d" $((16#${block_number_hex})) 2>/dev/null); then 77 | echo "Failed to convert hex to decimal" 78 | echo "false" 79 | exit 1 80 | fi 81 | 82 | if [ "$verbose" = true ]; then 83 | echo "Converted block number (decimal): $block_number" 84 | fi 85 | 86 | # Check if even or odd using modulo 87 | is_even=$((block_number % 2)) 88 | 89 | if [ $is_even -eq 0 ]; then 90 | echo "Block number $block_number is even" 91 | echo "Verbose mode: $verbose" 92 | echo "true" 93 | exit 0 94 | else 95 | echo "Block number $block_number is odd" 96 | echo "Verbose mode: $verbose" 97 | echo "false" 98 | exit 0 99 | fi 100 | } 101 | 102 | # Call main function 103 | main 104 | -------------------------------------------------------------------------------- /examples/config/filters/stellar_filter_block_number.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Stellar Block Number Filter 3 | * 4 | * This script filters monitor matches based on the block number of the transaction. 5 | * It demonstrates a simple filter that only allows transactions from even-numbered blocks. 6 | * 7 | * Input: JSON object containing: 8 | * - monitor_match: The monitor match data with transaction details 9 | * - args: Additional arguments passed to the script 10 | * 11 | * Output: 12 | * - Prints 'true' for transactions in even-numbered blocks 13 | * - Prints 'false' for transactions in odd-numbered blocks or invalid input 14 | */ 15 | try { 16 | // Read from stdin 17 | let inputData = ''; 18 | process.stdin.on('data', chunk => { 19 | inputData += chunk; 20 | }); 21 | 22 | process.stdin.on('end', () => { 23 | const data = JSON.parse(inputData); 24 | const monitorMatch = data.monitor_match; 25 | const args = data.args; 26 | 27 | // Extract ledger sequence number 28 | let ledgerNumber = null; 29 | if (monitorMatch.Stellar) { 30 | ledgerNumber = monitorMatch.Stellar.ledger.sequence; 31 | } 32 | 33 | if (ledgerNumber === null) { 34 | console.log('false'); 35 | return; 36 | } 37 | 38 | const result = ledgerNumber % 2 === 0; 39 | console.log(result.toString()); 40 | }); 41 | 42 | } catch (e) { 43 | console.log(`Error processing input: ${e}`); 44 | console.log('false'); 45 | } 46 | -------------------------------------------------------------------------------- /examples/config/filters/stellar_filter_block_number.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Stellar Block Number Filter 4 | 5 | This script filters monitor matches based on the block number of the transaction. 6 | It demonstrates a simple filter that only allows transactions from even-numbered blocks. 7 | 8 | Input: JSON object containing: 9 | - monitor_match: The monitor match data with transaction details 10 | - args: Additional arguments passed to the script 11 | 12 | Output: 13 | - Prints 'true' for transactions in even-numbered blocks 14 | - Prints 'false' for transactions in odd-numbered blocks or invalid input 15 | """ 16 | import sys 17 | import json 18 | import logging 19 | 20 | def main(): 21 | try: 22 | # Read input from stdin 23 | input_data = sys.stdin.read() 24 | if not input_data: 25 | print("No input JSON provided", flush=True) 26 | return False 27 | 28 | # Parse input JSON 29 | try: 30 | data = json.loads(input_data) 31 | monitor_match = data['monitor_match'] 32 | args = data['args'] 33 | except json.JSONDecodeError: 34 | print("Invalid JSON input", flush=True) 35 | return False 36 | 37 | # Extract ledger_number 38 | ledger_number = None 39 | if "Stellar" in monitor_match: 40 | ledger = monitor_match['Stellar']['ledger'].get('sequence') 41 | if ledger: 42 | ledger_number = int(ledger) 43 | 44 | if ledger_number is None: 45 | return False 46 | 47 | # Return True for even ledger numbers, False for odd 48 | result = ledger_number % 2 == 0 49 | return result 50 | 51 | except Exception as e: 52 | print(f"Error processing input: {e}", flush=True) 53 | return False 54 | 55 | if __name__ == "__main__": 56 | result = main() 57 | # Only print the final boolean result 58 | print(str(result).lower(), flush=True) 59 | -------------------------------------------------------------------------------- /examples/config/filters/stellar_filter_block_number.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ################################################################################ 3 | # Stellar Block Number Filter 4 | # 5 | # This script filters monitor matches based on the block number of the transaction. 6 | # It demonstrates a simple filter that only allows transactions from even-numbered blocks. 7 | # 8 | # Input: JSON object containing: 9 | # - monitor_match: The monitor match data with transaction details 10 | # - args: Additional arguments passed to the script 11 | # 12 | # Arguments: 13 | # --verbose: Enables detailed logging of the filtering process 14 | # 15 | # Output: 16 | # - Prints 'true' for transactions in even-numbered blocks 17 | # - Prints 'false' for transactions in odd-numbered blocks or invalid input 18 | # - Includes additional logging when verbose mode is enabled 19 | # 20 | ################################################################################ 21 | 22 | # Enable error handling 23 | set -e 24 | 25 | main() { 26 | # Read JSON input from stdin 27 | input_json=$(cat) 28 | 29 | # Parse arguments from the input JSON and initialize verbose flag 30 | verbose=false 31 | args=$(echo "$input_json" | jq -r '.args[]? // empty') 32 | if [ ! -z "$args" ]; then 33 | while IFS= read -r arg; do 34 | if [ "$arg" = "--verbose" ]; then 35 | verbose=true 36 | echo "Verbose mode enabled" 37 | fi 38 | done <<< "$args" 39 | fi 40 | 41 | # Extract the monitor match data from the input 42 | monitor_data=$(echo "$input_json" | jq -r '.monitor_match') 43 | 44 | # Validate input 45 | if [ -z "$monitor_data" ]; then 46 | echo "No input JSON provided" 47 | echo "false" 48 | exit 1 49 | fi 50 | 51 | # Extract ledger Number 52 | ledger_number=$(echo "$monitor_data" | jq -r '.Stellar.ledger.sequence' || echo "") 53 | 54 | # Validate ledger number 55 | if [ -z "$ledger_number" ] || [ "$ledger_number" = "null" ]; then 56 | echo "Invalid JSON or missing sequence number" 57 | echo "false" 58 | exit 1 59 | fi 60 | 61 | if [ "$verbose" = true ]; then 62 | echo "Ledger number: $ledger_number" 63 | fi 64 | 65 | # Check if even or odd using modulo 66 | is_even=$((ledger_number % 2)) 67 | 68 | if [ $is_even -eq 0 ]; then 69 | echo "Ledger number $ledger_number is even" 70 | echo "Verbose mode: $verbose" 71 | echo "true" 72 | exit 0 73 | else 74 | echo "Ledger number $ledger_number is odd" 75 | echo "Verbose mode: $verbose" 76 | echo "false" 77 | exit 0 78 | fi 79 | } 80 | 81 | # Call main function 82 | main 83 | -------------------------------------------------------------------------------- /examples/config/monitors/evm_transfer_usdc.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Large Transfer of USDC Token", 3 | "paused": false, 4 | "networks": ["ethereum_mainnet"], 5 | "addresses": [ 6 | { 7 | "address": "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48", 8 | "contract_spec": [ 9 | { 10 | "anonymous": false, 11 | "inputs": [ 12 | { 13 | "indexed": true, 14 | "internalType": "address", 15 | "name": "from", 16 | "type": "address" 17 | }, 18 | { 19 | "indexed": true, 20 | "internalType": "address", 21 | "name": "to", 22 | "type": "address" 23 | }, 24 | { 25 | "indexed": false, 26 | "internalType": "uint256", 27 | "name": "value", 28 | "type": "uint256" 29 | } 30 | ], 31 | "name": "Transfer", 32 | "type": "event" 33 | }, 34 | { 35 | "inputs": [ 36 | { "internalType": "address", "name": "to", "type": "address" }, 37 | { "internalType": "uint256", "name": "value", "type": "uint256" } 38 | ], 39 | "name": "transfer", 40 | "outputs": [{ "internalType": "bool", "name": "", "type": "bool" }], 41 | "stateMutability": "nonpayable", 42 | "type": "function" 43 | } 44 | ] 45 | } 46 | ], 47 | "match_conditions": { 48 | "functions": [ 49 | { 50 | "signature": "transfer(address,uint256)", 51 | "expression": null 52 | } 53 | ], 54 | "events": [ 55 | { 56 | "signature": "Transfer(address,address,uint256)", 57 | "expression": "value > 10000000000" 58 | } 59 | ], 60 | "transactions": [ 61 | { 62 | "status": "Success", 63 | "expression": null 64 | } 65 | ] 66 | }, 67 | "trigger_conditions": [{ 68 | "script_path": "./config/filters/evm_filter_block_number.sh", 69 | "language": "Bash", 70 | "arguments": ["--verbose"], 71 | "timeout_ms": 1000 72 | }], 73 | "triggers": ["evm_large_transfer_usdc_slack", "evm_large_transfer_usdc_email"] 74 | } 75 | -------------------------------------------------------------------------------- /examples/config/monitors/stellar_swap_dex.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Large Swap By Dex", 3 | "paused": false, 4 | "networks": [ 5 | "stellar_mainnet" 6 | ], 7 | "addresses": [ 8 | { 9 | "address": "CA6PUJLBYKZKUEKLZJMKBZLEKP2OTHANDEOWSFF44FTSYLKQPIICCJBE", 10 | "contract_spec": [ 11 | { 12 | "function_v0": { 13 | "doc": "", 14 | "name": "swap", 15 | "inputs": [ 16 | { 17 | "doc": "", 18 | "name": "user", 19 | "type_": "address" 20 | }, 21 | { 22 | "doc": "", 23 | "name": "in_idx", 24 | "type_": "u32" 25 | }, 26 | { 27 | "doc": "", 28 | "name": "out_idx", 29 | "type_": "u32" 30 | }, 31 | { 32 | "doc": "", 33 | "name": "in_amount", 34 | "type_": "u128" 35 | }, 36 | { 37 | "doc": "", 38 | "name": "out_min", 39 | "type_": "u128" 40 | } 41 | ], 42 | "outputs": [ 43 | "u128" 44 | ] 45 | } 46 | } 47 | ] 48 | } 49 | ], 50 | "match_conditions": { 51 | "functions": [ 52 | { 53 | "signature": "swap(Address,U32,U32,U128,U128)", 54 | "expression": "out_min > 1000000000" 55 | } 56 | ], 57 | "events": [], 58 | "transactions": [ 59 | { 60 | "status": "Success", 61 | "expression": null 62 | } 63 | ] 64 | }, 65 | "trigger_conditions": [ 66 | { 67 | "script_path": "./config/filters/stellar_filter_block_number.sh", 68 | "language": "Bash", 69 | "timeout_ms": 1000 70 | } 71 | ], 72 | "triggers": [ 73 | "stellar_large_swap_by_dex_slack" 74 | ] 75 | } 76 | -------------------------------------------------------------------------------- /examples/config/networks/arbitrum_nova.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "arbitrum_nova", 4 | "name": "Arbitrum Nova", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://nova.arbitrum.io/rpc" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 42170, 16 | "block_time_ms": 250, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "store_blocks": false 20 | } 21 | -------------------------------------------------------------------------------- /examples/config/networks/arbitrum_one.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "arbitrum_one", 4 | "name": "Arbitrum One", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://arb1.arbitrum.io/rpc" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 42161, 16 | "block_time_ms": 250, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "store_blocks": false 20 | } 21 | -------------------------------------------------------------------------------- /examples/config/networks/arbitrum_sepolia.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "arbitrum_sepolia", 4 | "name": "Arbitrum Sepolia", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://sepolia-rollup.arbitrum.io/rpc" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 421614, 16 | "block_time_ms": 250, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "store_blocks": false 20 | } 21 | -------------------------------------------------------------------------------- /examples/config/networks/base.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "base", 4 | "name": "Base", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://mainnet.base.org" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 8453, 16 | "block_time_ms": 2000, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "max_past_blocks": 43, 20 | "store_blocks": false 21 | } 22 | -------------------------------------------------------------------------------- /examples/config/networks/base_sepolia.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "base-sepolia", 4 | "name": "Base Sepolia", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://sepolia.base.org" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 84532, 16 | "block_time_ms": 2000, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "max_past_blocks": 43, 20 | "store_blocks": false 21 | } 22 | -------------------------------------------------------------------------------- /examples/config/networks/bsc_mainnet.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "bsc", 4 | "name": "Binance BSC", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://bsc-dataseed.bnbchain.org" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 56, 16 | "block_time_ms": 3000, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "max_past_blocks": 33, 20 | "store_blocks": false 21 | } 22 | -------------------------------------------------------------------------------- /examples/config/networks/bsc_testnet.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "bsctest", 4 | "name": "BSC Testnet", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://bsc-testnet-dataseed.bnbchain.org" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 97, 16 | "block_time_ms": 3000, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "max_past_blocks": 33, 20 | "store_blocks": false 21 | } 22 | -------------------------------------------------------------------------------- /examples/config/networks/ethereum_mainnet.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "ethereum_mainnet", 4 | "name": "Ethereum Mainnet", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://eth.drpc.org" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 1, 16 | "block_time_ms": 12000, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "max_past_blocks": 18, 20 | "store_blocks": false 21 | } 22 | -------------------------------------------------------------------------------- /examples/config/networks/ethereum_sepolia.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "ethereum_sepolia", 4 | "name": "Ethereum Sepolia", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://sepolia.drpc.org" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 11155111, 16 | "block_time_ms": 12000, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "max_past_blocks": 18, 20 | "store_blocks": false 21 | } 22 | -------------------------------------------------------------------------------- /examples/config/networks/optimism_mainnet.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "optimism_mainnet", 4 | "name": "OP Mainnet", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://mainnet.optimism.io" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 10, 16 | "block_time_ms": 2000, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "store_blocks": false 20 | } 21 | -------------------------------------------------------------------------------- /examples/config/networks/optimism_sepolia.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "optimism_sepolia", 4 | "name": "OP Sepolia", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://sepolia.optimism.io" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 11155420, 16 | "block_time_ms": 2000, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "store_blocks": false 20 | } 21 | -------------------------------------------------------------------------------- /examples/config/networks/polygon_amoy.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "polygon_amoy", 4 | "name": "Polygon Amoy", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://rpc-amoy.polygon.technology/" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 80002, 16 | "block_time_ms": 2000, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "store_blocks": false 20 | } 21 | -------------------------------------------------------------------------------- /examples/config/networks/polygon_mainnet.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "polygon_mainnet", 4 | "name": "Polygon Mainnet", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://polygon-rpc.com/" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 137, 16 | "block_time_ms": 2000, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "store_blocks": false 20 | } 21 | -------------------------------------------------------------------------------- /examples/config/networks/stellar_mainnet.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "Stellar", 3 | "slug": "stellar_mainnet", 4 | "name": "Stellar Mainnet", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://mainnet.sorobanrpc.com" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "network_passphrase": "Public Global Stellar Network ; September 2015", 16 | "block_time_ms": 5000, 17 | "confirmation_blocks": 1, 18 | "cron_schedule": "0 */1 * * * *", 19 | "max_past_blocks": 20, 20 | "store_blocks": false 21 | } 22 | -------------------------------------------------------------------------------- /examples/config/networks/stellar_testnet.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "Stellar", 3 | "slug": "stellar_testnet", 4 | "name": "Stellar Testnet", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://soroban-testnet.stellar.org" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "network_passphrase": "Test SDF Network ; September 2015", 16 | "block_time_ms": 5000, 17 | "confirmation_blocks": 1, 18 | "cron_schedule": "0 */1 * * * *", 19 | "max_past_blocks": 20, 20 | "store_blocks": false 21 | } 22 | -------------------------------------------------------------------------------- /examples/config/networks/unichain_sepolia.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "unichain_sepolia", 4 | "name": "Unichain Sepolia", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://sepolia.unichain.org" 11 | }, 12 | "weight": 90 13 | } 14 | ], 15 | "chain_id": 1301, 16 | "block_time_ms": 1000, 17 | "confirmation_blocks": 1, 18 | "cron_schedule": "0 */1 * * * *", 19 | "max_past_blocks": 62, 20 | "store_blocks": false 21 | } 22 | -------------------------------------------------------------------------------- /examples/config/networks/zksync_era_mainnet.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "zksync", 4 | "name": "ZkSync Era", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://mainnet.era.zksync.io" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 324, 16 | "block_time_ms": 5000, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "store_blocks": false 20 | } 21 | -------------------------------------------------------------------------------- /examples/config/networks/zksync_era_sepolia.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "zksync_era_sepolia", 4 | "name": "ZkSync Era Sepolia", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://sepolia.era.zksync.dev" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "chain_id": 300, 16 | "block_time_ms": 5000, 17 | "confirmation_blocks": 12, 18 | "cron_schedule": "0 */1 * * * *", 19 | "store_blocks": false 20 | } 21 | -------------------------------------------------------------------------------- /examples/config/triggers/discord_notifications.json: -------------------------------------------------------------------------------- 1 | { 2 | "evm_large_transfer_usdc_discord": { 3 | "name": "Large Transfer Discord Notification", 4 | "trigger_type": "discord", 5 | "config": { 6 | "discord_url": { 7 | "type": "plain", 8 | "value": "https://discord.com/api/webhooks/123-456-789" 9 | }, 10 | "message": { 11 | "title": "large_transfer_discord triggered", 12 | "body": "Large transfer of ${events.0.args.value} USDC from ${events.0.args.from} to ${events.0.args.to} | https://etherscan.io/tx/${transaction.hash}#eventlog" 13 | } 14 | } 15 | }, 16 | "evm_large_transfer_usdc_discord_markdown": { 17 | "name": "EVM Large Transfer USDC Discord Notification", 18 | "trigger_type": "discord", 19 | "config": { 20 | "discord_url": { 21 | "type": "plain", 22 | "value": "https://discord.com/api/webhooks/123-456-789" 23 | }, 24 | "message": { 25 | "title": "large_transfer_discord triggered", 26 | "body": "**Large transfer** of **${events.0.args.value} USDC** from `${events.0.args.from}` to `${events.0.args.to}`\n[View on Etherscan](https://etherscan.io/tx/${transaction.hash}#eventlog)" 27 | } 28 | } 29 | }, 30 | "stellar_large_swap_by_dex_discord": { 31 | "name": "Large Swap By Dex Discord Notification", 32 | "trigger_type": "discord", 33 | "config": { 34 | "discord_url": { 35 | "type": "plain", 36 | "value": "https://discord.com/api/webhooks/123-456-789" 37 | }, 38 | "message": { 39 | "title": "large_swap_by_dex_discord triggered", 40 | "body": "${monitor.name} triggered because of a large swap of ${functions.0.args.out_min} tokens | https://stellar.expert/explorer/public/tx/${transaction.hash}" 41 | } 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /examples/config/triggers/email_notifications.json: -------------------------------------------------------------------------------- 1 | { 2 | "evm_large_transfer_usdc_email": { 3 | "name": "Large Transfer Email Notification", 4 | "trigger_type": "email", 5 | "config": { 6 | "host": "smtp.gmail.com", 7 | "port": 465, 8 | "username": { 9 | "type": "plain", 10 | "value": "your_email@gmail.com" 11 | }, 12 | "password": { 13 | "type": "plain", 14 | "value": "your_password" 15 | }, 16 | "message": { 17 | "title": "large_transfer_usdc_email triggered", 18 | "body": "Large transfer of ${events.0.args.value} USDC from ${events.0.args.from} to ${events.0.args.to} | https://etherscan.io/tx/${transaction.hash}#eventlog" 19 | }, 20 | "sender": "your_email@gmail.com", 21 | "recipients": [ 22 | "recipient1@example.com", 23 | "recipient2@example.com" 24 | ] 25 | } 26 | }, 27 | "evm_large_transfer_usdc_email_markdown": { 28 | "name": "Large Transfer Email Notification", 29 | "trigger_type": "email", 30 | "config": { 31 | "host": "smtp.gmail.com", 32 | "port": 465, 33 | "username": { 34 | "type": "plain", 35 | "value": "your_email@gmail.com" 36 | }, 37 | "password": { 38 | "type": "plain", 39 | "value": "your_password" 40 | }, 41 | "message": { 42 | "title": "large_transfer_usdc_email triggered", 43 | "body": "## Large Transfer Alert\n\n**Amount:** ${events.0.args.value} USDC\n\n**From:** ${events.0.args.from}\n\n**To:** ${events.0.args.to}\n\n[View transaction on Etherscan](https://etherscan.io/tx/${transaction.hash}#eventlog)" 44 | }, 45 | "sender": "your_email@gmail.com", 46 | "recipients": [ 47 | "recipient1@example.com", 48 | "recipient2@example.com" 49 | ] 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /examples/config/triggers/script_notifications.json: -------------------------------------------------------------------------------- 1 | { 2 | "evm_large_transfer_usdc_script": { 3 | "name": "Large Transfer Action", 4 | "trigger_type": "script", 5 | "config": { 6 | "language": "Bash", 7 | "script_path": "./config/triggers/scripts/custom_notification.sh", 8 | "arguments": ["--verbose"], 9 | "timeout_ms": 1000 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /examples/config/triggers/scripts/custom_notification.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Custom Notification Script 3 | * This script validates monitor match data and logs validation results to stderr. 4 | * 5 | * Input: JSON object containing: 6 | * - monitor_match: The monitor match data with transaction details 7 | * - args: Additional arguments passed to the script (optional) 8 | * 9 | * Note: Only stderr output is monitored. If the script returns a non-zero exit code, the error will be logged. 10 | */ 11 | try { 12 | let inputData = ''; 13 | // Read from stdin 14 | process.stdin.on('data', chunk => { 15 | inputData += chunk; 16 | }); 17 | 18 | process.stdin.on('end', () => { 19 | // Parse input JSON 20 | const data = JSON.parse(inputData); 21 | const monitorMatch = data.monitor_match; 22 | const args = data.args; 23 | 24 | // Log args if they exist 25 | if (args && args.length > 0) { 26 | console.log(`Args: ${JSON.stringify(args)}`); 27 | } 28 | 29 | // Validate monitor match data 30 | if (!monitorMatch) { 31 | console.log("No monitor match data provided"); 32 | return; 33 | } 34 | }); 35 | } catch (e) { 36 | console.log(`Error processing input: ${e}`); 37 | } 38 | -------------------------------------------------------------------------------- /examples/config/triggers/scripts/custom_notification.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Custom Notification Script 4 | This script validates monitor match data and logs validation results to stderr. 5 | 6 | Input: JSON object containing: 7 | - monitor_match: The monitor match data with transaction details 8 | - args: Additional arguments passed to the script (optional) 9 | 10 | Note: Only stderr output is monitored. If the script returns a non-zero exit code, the error will be logged. 11 | """ 12 | import sys 13 | import json 14 | 15 | def main(): 16 | try: 17 | # Read input from stdin 18 | input_data = sys.stdin.read() 19 | if not input_data: 20 | print("No input JSON provided", flush=True) 21 | 22 | # Parse input JSON 23 | try: 24 | data = json.loads(input_data) 25 | monitor_match = data['monitor_match'] 26 | args = data['args'] 27 | if args: 28 | print(f"Args: {args}") 29 | except json.JSONDecodeError as e: 30 | print(f"Invalid JSON input: {e}", flush=True) 31 | 32 | 33 | except Exception as e: 34 | print(f"Error processing input: {e}", flush=True) 35 | 36 | if __name__ == "__main__": 37 | main() 38 | -------------------------------------------------------------------------------- /examples/config/triggers/scripts/custom_notification.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | ################################################################################ 3 | # Custom Notification Script 4 | # 5 | # This script validates JSON input and logs validation results to stderr. 6 | # 7 | # Input: JSON object containing: 8 | # - monitor_match: The monitor match data 9 | # - args: Additional arguments passed to the script (optional) 10 | # 11 | # Arguments: 12 | # --verbose: Enables detailed logging of the processing 13 | # 14 | # Note: Only stderr output is monitored. If the script returns a non-zero exit code, the error will be logged. 15 | ################################################################################ 16 | 17 | # Enable error handling 18 | set -e 19 | 20 | main() { 21 | # Read JSON input from stdin 22 | input_json=$(cat) 23 | 24 | # Parse arguments from the input JSON and initialize verbose flag 25 | verbose=false 26 | args=$(echo "$input_json" | jq -r '.args[]? // empty') 27 | if [ ! -z "$args" ]; then 28 | while IFS= read -r arg; do 29 | if [ "$arg" = "--verbose" ]; then 30 | verbose=true 31 | echo "Verbose mode enabled" 32 | fi 33 | done <<< "$args" 34 | fi 35 | 36 | # Extract the monitor match data from the input 37 | monitor_data=$(echo "$input_json" | jq -r '.monitor_match') 38 | 39 | # Validate input 40 | if [ -z "$input_json" ]; then 41 | echo "No input JSON provided" 42 | exit 1 43 | fi 44 | 45 | # Validate JSON structure 46 | if ! echo "$input_json" | jq . >/dev/null 2>&1; then 47 | echo "Invalid JSON input" 48 | exit 1 49 | fi 50 | 51 | if [ "$verbose" = true ]; then 52 | echo "Input JSON received:" 53 | echo "$input_json" | jq '.' 54 | echo "Monitor match data:" 55 | echo "$monitor_data" | jq '.' 56 | fi 57 | 58 | # Process args if they exist 59 | args_data=$(echo "$input_json" | jq -r '.args') 60 | if [ "$args_data" != "null" ]; then 61 | echo "Args: $args_data" 62 | fi 63 | 64 | # If we made it here, everything worked 65 | echo "Verbose mode: $verbose" 66 | 67 | } 68 | 69 | # Call main function 70 | main 71 | -------------------------------------------------------------------------------- /examples/config/triggers/slack_notifications.json: -------------------------------------------------------------------------------- 1 | { 2 | "evm_large_transfer_usdc_slack": { 3 | "name": "Large Transfer Slack Notification", 4 | "trigger_type": "slack", 5 | "config": { 6 | "slack_url": { 7 | "type": "plain", 8 | "value": "https://hooks.slack.com/services/A/B/C" 9 | }, 10 | "message": { 11 | "title": "large_transfer_slack triggered", 12 | "body": "Large transfer of ${events.0.args.value} USDC from ${events.0.args.from} to ${events.0.args.to} | https://etherscan.io/tx/${transaction.hash}#eventlog" 13 | } 14 | } 15 | }, 16 | "evm_large_transfer_usdc_slack_markdown": { 17 | "name": "Large Transfer Slack Notification", 18 | "trigger_type": "slack", 19 | "config": { 20 | "slack_url": { 21 | "type": "plain", 22 | "value": "https://hooks.slack.com/services/A/B/C" 23 | }, 24 | "message": { 25 | "title": "large_transfer_slack triggered", 26 | "body": "*USDC Transfer Details*\n*Amount:* ${events.0.args.value} USDC\n*From:* ${events.0.args.from}\n*To:* ${events.0.args.to}\n*Transaction:* " 27 | } 28 | } 29 | }, 30 | "stellar_large_swap_by_dex_slack": { 31 | "name": "Large Swap By Dex Slack Notification", 32 | "trigger_type": "slack", 33 | "config": { 34 | "slack_url": { 35 | "type": "plain", 36 | "value": "https://hooks.slack.com/services/A/B/C" 37 | }, 38 | "message": { 39 | "title": "large_swap_by_dex_slack triggered", 40 | "body": "${monitor.name} triggered because of a large swap of ${functions.0.args.out_min} tokens | https://stellar.expert/explorer/public/tx/${transaction.hash}" 41 | } 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /examples/config/triggers/telegram_notifications.json: -------------------------------------------------------------------------------- 1 | { 2 | "evm_large_transfer_usdc_telegram": { 3 | "name": "Large Transfer Telegram Notification", 4 | "trigger_type": "telegram", 5 | "config": { 6 | "token": { 7 | "type": "plain", 8 | "value": "1234567890:ABCDEFGHIJKLMNOPQRSTUVWXYZ" 9 | }, 10 | "chat_id": "9876543210", 11 | "disable_web_preview": true, 12 | "message": { 13 | "title": "large_transfer_telegram triggered", 14 | "body": "Large transfer of ${events.0.args.value} USDC from ${events.0.args.from} to ${events.0.args.to} | https://etherscan.io/tx/${transaction.hash}#eventlog" 15 | } 16 | } 17 | }, 18 | "evm_large_transfer_usdc_telegram_markdown": { 19 | "name": "Large Transfer Telegram Notification", 20 | "trigger_type": "telegram", 21 | "config": { 22 | "token": { 23 | "type": "plain", 24 | "value": "1234567890:ABCDEFGHIJKLMNOPQRSTUVWXYZ" 25 | }, 26 | "chat_id": "9876543210", 27 | "disable_web_preview": true, 28 | "message": { 29 | "title": "large_transfer_telegram triggered", 30 | "body": "*USDC Transfer Details*\n*Amount:* ${events.0.args.value} USDC\n*From:* ${events.0.args.from}\n*To:* ${events.0.args.to}\n*Transaction:* [View on Etherscan](https://etherscan.io/tx/${transaction.hash}#eventlog)" 31 | } 32 | } 33 | }, 34 | "stellar_large_swap_by_dex_telegram": { 35 | "name": "Large Swap By Dex Telegram Notification", 36 | "trigger_type": "telegram", 37 | "config": { 38 | "token": { 39 | "type": "plain", 40 | "value": "1234567890:ABCDEFGHIJKLMNOPQRSTUVWXYZ" 41 | }, 42 | "chat_id": "9876543210", 43 | "disable_web_preview": true, 44 | "message": { 45 | "title": "large_swap_by_dex_telegram triggered", 46 | "body": "${monitor.name} triggered because of a large swap of ${functions.0.args.out_min} tokens | https://stellar.expert/explorer/public/tx/${transaction.hash}" 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /examples/config/triggers/webhook_notifications.json: -------------------------------------------------------------------------------- 1 | { 2 | "evm_large_transfer_usdc_webhook": { 3 | "name": "Large Transfer Webhook Notification", 4 | "trigger_type": "webhook", 5 | "config": { 6 | "url": { 7 | "type": "plain", 8 | "value": "https://webhook.site/123-456-789" 9 | }, 10 | "method": "POST", 11 | "secret": { 12 | "type": "plain", 13 | "value": "some-secret" 14 | }, 15 | "headers": { 16 | "Content-Type": "application/json" 17 | }, 18 | "message": { 19 | "title": "large_transfer_webhook triggered", 20 | "body": "Large transfer of ${events.0.args.value} USDC from ${events.0.args.from} to ${events.0.args.to} | https://etherscan.io/tx/${transaction.hash}#eventlog" 21 | } 22 | } 23 | }, 24 | "evm_large_transfer_usdc_webhook_markdown": { 25 | "name": "Large Transfer Webhook Notification", 26 | "trigger_type": "webhook", 27 | "config": { 28 | "url": { 29 | "type": "plain", 30 | "value": "https://webhook.site/123-456-789" 31 | }, 32 | "method": "POST", 33 | "secret": { 34 | "type": "plain", 35 | "value": "some-secret" 36 | }, 37 | "headers": { 38 | "Content-Type": "application/json" 39 | }, 40 | "message": { 41 | "title": "large_transfer_webhook triggered", 42 | "body": "# USDC Transfer Alert\n\n**Large transfer detected**\n\n- **Amount:** ${events.0.args.value} USDC\n- **From:** `${events.0.args.from}`\n- **To:** `${events.0.args.to}`\n\n[View on Etherscan](https://etherscan.io/tx/${transaction.hash}#eventlog)" 43 | } 44 | } 45 | }, 46 | "stellar_large_swap_by_dex_webhook": { 47 | "name": "Large Swap By Dex Webhook Notification", 48 | "trigger_type": "webhook", 49 | "config": { 50 | "url": { 51 | "type": "plain", 52 | "value": "https://webhook.site/123-456-789" 53 | }, 54 | "method": "POST", 55 | "secret": { 56 | "type": "plain", 57 | "value": "some-secret" 58 | }, 59 | "headers": { 60 | "Content-Type": "application/json" 61 | }, 62 | "message": { 63 | "title": "large_swap_by_dex_webhook triggered", 64 | "body": "${monitor.name} triggered because of a large swap of ${functions.0.args.out_min} tokens | https://stellar.expert/explorer/public/tx/${transaction.hash}" 65 | } 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /netlify.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | base = "/" 3 | command = "rustup default stable && cargo install cargo-make --locked --force && cd docs && npm ci && npm run docs && cd .. && cargo make rust-antora" 4 | publish = "docs/build/site" 5 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.86.0" 3 | profile = "minimal" 4 | components = [ 5 | "rustc", 6 | "cargo", 7 | "rustfmt", 8 | "clippy", 9 | "rust-docs", 10 | "llvm-tools", 11 | "rust-src", 12 | ] 13 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # Basic formatting 2 | max_width = 100 3 | tab_spaces = 4 4 | hard_tabs = true 5 | edition = "2024" 6 | 7 | # Code organization 8 | reorder_imports = true 9 | reorder_modules = true 10 | 11 | # Formatting preferences 12 | merge_derives = true 13 | 14 | # Function and control flow formatting 15 | fn_params_layout = "Tall" 16 | 17 | # Code style preferences 18 | use_try_shorthand = true 19 | use_field_init_shorthand = true 20 | -------------------------------------------------------------------------------- /scripts/docker_compose.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | if [ -f .env ]; then 3 | export $(grep -v '^#' .env | xargs) 4 | fi 5 | 6 | # Function to run docker compose up 7 | # If METRICS_ENABLED is true, run docker compose up with the metrics profile 8 | docker_compose_up() { 9 | if [ "$METRICS_ENABLED" = "true" ]; then 10 | docker compose --profile metrics up -d 11 | else 12 | docker compose up -d 13 | fi 14 | } 15 | 16 | # Function to run docker compose down 17 | # If METRICS_ENABLED is true, run docker compose down with the metrics profile 18 | docker_compose_down() { 19 | if [ "$METRICS_ENABLED" = "true" ]; then 20 | docker compose --profile metrics down 21 | else 22 | docker compose down 23 | fi 24 | } 25 | 26 | # Check command-line argument 27 | case "$1" in 28 | up) 29 | docker_compose_up 30 | ;; 31 | down) 32 | docker_compose_down 33 | ;; 34 | *) 35 | echo "Usage: $0 {up|down}" 36 | exit 1 37 | ;; 38 | esac 39 | -------------------------------------------------------------------------------- /scripts/rust_antora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit immediately if a command exits with a non-zero status 4 | set -euo pipefail 5 | 6 | # Base directories 7 | REPO_ROOT="$PWD" 8 | DOCS_DIR="$REPO_ROOT/docs" 9 | NAME=$(grep '^name:' "$DOCS_DIR/antora.yml" | awk '{print $2}') 10 | VERSION=$(grep '^version:' "$DOCS_DIR/antora.yml" | awk '{print $2}') 11 | BUILD_DIR="$DOCS_DIR/build/site" 12 | RUST_DOCS_DIR="$DOCS_DIR/rust_docs" 13 | 14 | # Check if the target directory exists 15 | if [ ! -d "$BUILD_DIR" ]; then 16 | echo "Error: Build directory '$BUILD_DIR' not found." 17 | exit 1 18 | fi 19 | 20 | # Copy the Rust docs to the target directory 21 | if [ -d "$RUST_DOCS_DIR" ] && [ "$(ls -A "$RUST_DOCS_DIR")" ]; then 22 | echo "Copying '$RUST_DOCS_DIR' to '$BUILD_DIR'..." 23 | cp -r "$RUST_DOCS_DIR/doc/"* "$BUILD_DIR/" 24 | echo "Rust docs successfully copied to '$BUILD_DIR'." 25 | # Remove the original Rust docs directory 26 | echo "Removing original Rust docs directory '$RUST_DOCS_DIR'..." 27 | rm -rf "$RUST_DOCS_DIR" 28 | echo "Original Rust docs directory '$RUST_DOCS_DIR' removed." 29 | else 30 | echo "Source directory '$RUST_DOCS_DIR' does not exist or is empty." 31 | fi 32 | -------------------------------------------------------------------------------- /scripts/validate_network_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FOLDER_PATH="./config/networks" # Change to your folder path 4 | RET_CODE=0 5 | 6 | declare -a json_array 7 | declare -a summary_array 8 | 9 | # 10 | # $1 -> json schema for the network configuration (./config/networks/) 11 | # 12 | function test_rpcs { 13 | NETWORK_NAME=`echo ${1} | jq '.name'` 14 | NETWORK_TYPE=`echo ${1} | jq -r '.network_type // "EVM"' | tr '[:upper:]' '[:lower:]'` # Convert to lowercase using tr 15 | 16 | echo "Testing RPCs for ${NETWORK_NAME}" 17 | 18 | for u in `echo ${1} | jq '.rpc_urls[] | .url.value' | tr -d '"'` 19 | do 20 | URL=`echo ${u} | tr -d '"'` 21 | 22 | # Set the method based on network type 23 | case ${NETWORK_TYPE} in # Network type is already lowercase 24 | "evm") 25 | METHOD="net_version" 26 | ;; 27 | "stellar") 28 | METHOD="getNetwork" 29 | ;; 30 | "midnight") 31 | METHOD="system_chain" 32 | ;; 33 | *) 34 | METHOD="net_version" 35 | ;; 36 | esac 37 | 38 | # Store the response in a variable and check both HTTP status and JSON response 39 | RESPONSE=$(curl -s -w "\n%{http_code}" ${URL} -X POST -H "Content-Type: application/json" \ 40 | --data "{\"method\":\"${METHOD}\",\"params\":[],\"id\":1,\"jsonrpc\":\"2.0\"}") 41 | 42 | # Get HTTP status code (last line) 43 | HTTP_STATUS=$(echo "$RESPONSE" | tail -n1) 44 | # Get response body (all but last line) 45 | BODY=$(echo "$RESPONSE" | sed \$d) 46 | 47 | # Check both HTTP status and valid JSON response 48 | if [ $HTTP_STATUS -eq 200 ] && echo "$BODY" | jq empty > /dev/null 2>&1; then 49 | summary_array+=("✅ RPC ${URL} (${NETWORK_NAME}).") 50 | else 51 | summary_array+=("❌ RPC ${URL} (${NETWORK_NAME}).") 52 | RET_CODE=1 53 | fi 54 | done 55 | } 56 | 57 | # parsing arguments (if any) 58 | while getopts :hf: opt; do 59 | case ${opt} in 60 | h) 61 | echo "Usage: $0 [-h | -f ]" 62 | exit 0 63 | ;; 64 | f) 65 | FOLDER_PATH=${OPTARG} 66 | ;; 67 | :) 68 | echo "Option -${OPTARG} requires an argument" 69 | exit 1 70 | ;; 71 | esac 72 | done 73 | 74 | if [ -d "$FOLDER_PATH" ]; then 75 | for file in "$FOLDER_PATH"/*.json*; do 76 | if [ -f "$file" ]; then 77 | content=$(cat "$file") 78 | json_array+=("$content") 79 | fi 80 | done 81 | 82 | echo "Loaded ${#json_array[@]} JSON files from ${FOLDER_PATH}" 83 | 84 | for i in "${json_array[@]}" 85 | do 86 | test_rpcs "${i}" 87 | done 88 | else 89 | echo "Folder not found: $FOLDER_PATH" 90 | fi 91 | 92 | for i in "${summary_array[@]}" 93 | do 94 | echo ${i} 95 | done 96 | 97 | exit $RET_CODE 98 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Blockchain monitoring and notification service. 2 | //! 3 | //! This library provides functionality for monitoring blockchain networks and triggering 4 | //! notifications based on configurable conditions. It includes: 5 | //! 6 | //! - Configuration management through JSON files 7 | //! - Blockchain network monitoring and event filtering 8 | //! - Customizable notification triggers and actions 9 | //! - Extensible repository and service architecture 10 | //! 11 | //! # Module Structure 12 | //! 13 | //! - `bootstrap`: Bootstraps the application 14 | //! - `models`: Data structures for configuration and blockchain data 15 | //! - `repositories`: Configuration storage and management 16 | //! - `services`: Core business logic and blockchain interaction 17 | //! - `utils`: Common utilities and helper functions 18 | 19 | pub mod bootstrap; 20 | pub mod models; 21 | pub mod repositories; 22 | pub mod services; 23 | pub mod utils; 24 | -------------------------------------------------------------------------------- /src/models/blockchain/evm/mod.rs: -------------------------------------------------------------------------------- 1 | //! Ethereum Virtual Machine (EVM) blockchain specific implementations. 2 | //! 3 | //! This module contains data structures and implementations specific to EVM-based 4 | //! blockchains, including blocks, transactions, and monitoring functionality. 5 | 6 | mod block; 7 | mod monitor; 8 | mod receipt; 9 | mod transaction; 10 | 11 | pub use block::Block as EVMBlock; 12 | pub use monitor::{ 13 | ContractSpec as EVMContractSpec, EVMMonitorMatch, MatchArguments as EVMMatchArguments, 14 | MatchParamEntry as EVMMatchParamEntry, MatchParamsMap as EVMMatchParamsMap, 15 | }; 16 | pub use receipt::{ 17 | BaseLog as EVMReceiptLog, BaseReceipt as EVMBaseReceipt, 18 | TransactionReceipt as EVMTransactionReceipt, 19 | }; 20 | pub use transaction::{BaseTransaction as EVMBaseTransaction, Transaction as EVMTransaction}; 21 | -------------------------------------------------------------------------------- /src/models/blockchain/mod.rs: -------------------------------------------------------------------------------- 1 | //! Blockchain-specific model implementations. 2 | //! 3 | //! This module contains type definitions and implementations for different 4 | //! blockchain platforms (EVM, Stellar, etc). Each submodule implements the 5 | //! platform-specific logic for blocks, transactions, and event monitoring. 6 | 7 | use serde::{Deserialize, Serialize}; 8 | 9 | pub mod evm; 10 | pub mod stellar; 11 | 12 | /// Supported blockchain platform types 13 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] 14 | #[serde(deny_unknown_fields)] 15 | pub enum BlockChainType { 16 | /// Ethereum Virtual Machine based chains 17 | EVM, 18 | /// Stellar blockchain 19 | Stellar, 20 | /// Midnight blockchain (not yet implemented) 21 | Midnight, 22 | /// Solana blockchain (not yet implemented) 23 | Solana, 24 | } 25 | 26 | /// Block data from different blockchain platforms 27 | #[derive(Debug, Clone, Serialize, Deserialize)] 28 | pub enum BlockType { 29 | /// EVM block and transaction data 30 | /// 31 | /// # Note 32 | /// Box is used here to equalize the enum variants 33 | EVM(Box), 34 | /// Stellar ledger and transaction data 35 | /// 36 | /// # Note 37 | /// Box is used here to equalize the enum variants 38 | Stellar(Box), 39 | } 40 | 41 | impl BlockType { 42 | pub fn number(&self) -> Option { 43 | match self { 44 | BlockType::EVM(b) => b.number(), 45 | BlockType::Stellar(b) => b.number(), 46 | } 47 | } 48 | } 49 | 50 | /// Transaction data from different blockchain platforms 51 | #[derive(Debug, Clone, Serialize, Deserialize)] 52 | pub enum TransactionType { 53 | /// EVM transaction 54 | EVM(evm::EVMTransaction), 55 | /// Stellar transaction 56 | Stellar(Box), 57 | } 58 | 59 | /// Contract spec from different blockchain platforms 60 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] 61 | #[serde(untagged)] 62 | pub enum ContractSpec { 63 | /// EVM contract spec 64 | EVM(evm::EVMContractSpec), 65 | /// Stellar contract spec 66 | Stellar(stellar::StellarContractSpec), 67 | } 68 | 69 | /// Monitor match results from different blockchain platforms 70 | #[derive(Debug, Clone, Serialize, Deserialize)] 71 | pub enum MonitorMatch { 72 | /// Matched conditions from EVM chains 73 | /// 74 | /// # Note 75 | /// Box is used here to equalize the enum variants 76 | EVM(Box), 77 | /// Matched conditions from Stellar chains 78 | /// 79 | /// # Note 80 | /// Box is used here to equalize the enum variants 81 | Stellar(Box), 82 | } 83 | 84 | /// Structure to hold block processing results 85 | /// 86 | /// This is used to pass the results of block processing to the trigger handler 87 | #[derive(Debug, Clone, Serialize, Deserialize)] 88 | pub struct ProcessedBlock { 89 | pub block_number: u64, 90 | pub network_slug: String, 91 | pub processing_results: Vec, 92 | } 93 | -------------------------------------------------------------------------------- /src/models/blockchain/stellar/event.rs: -------------------------------------------------------------------------------- 1 | //! Stellar contract event data structures. 2 | //! 3 | //! Note: These structures are based on the Stellar RPC implementation: 4 | //! 5 | 6 | use serde::{Deserialize, Serialize}; 7 | 8 | /// Represents a contract event emitted during transaction execution 9 | /// 10 | /// This structure represents the response from the Stellar RPC endpoint 11 | /// and matches the format defined in the stellar-rpc repository. 12 | #[derive(Deserialize, Serialize, Debug, Clone, Default)] 13 | pub struct Event { 14 | /// Type of the event 15 | #[serde(rename = "type")] 16 | pub event_type: String, 17 | 18 | /// Ledger sequence number containing this event 19 | pub ledger: u32, 20 | 21 | /// Timestamp when the ledger was closed 22 | #[serde(rename = "ledgerClosedAt")] 23 | pub ledger_closed_at: String, 24 | 25 | /// Contract address that emitted the event 26 | #[serde(rename = "contractId")] 27 | pub contract_id: String, 28 | 29 | /// Unique identifier for this event 30 | pub id: String, 31 | 32 | /// Deprecated: Use cursor at top level for pagination 33 | #[serde(rename = "pagingToken")] 34 | pub paging_token: String, 35 | 36 | /// Whether the event was emitted during a successful contract call 37 | #[serde(rename = "inSuccessfulContractCall")] 38 | pub in_successful_contract_call: bool, 39 | 40 | /// Transaction hash that generated this event 41 | #[serde(rename = "txHash")] 42 | pub transaction_hash: String, 43 | 44 | /// Base64-encoded list of ScVals representing the event topics 45 | #[serde(rename = "topic", skip_serializing_if = "Option::is_none")] 46 | pub topic_xdr: Option>, 47 | 48 | /// Decoded JSON representation of the event topics 49 | #[serde(rename = "topicJson", skip_serializing_if = "Option::is_none")] 50 | pub topic_json: Option>, 51 | 52 | /// Base64-encoded ScVal representing the event value 53 | #[serde(rename = "value", skip_serializing_if = "Option::is_none")] 54 | pub value_xdr: Option, 55 | 56 | /// Decoded JSON representation of the event value 57 | #[serde(rename = "valueJson", skip_serializing_if = "Option::is_none")] 58 | pub value_json: Option, 59 | } 60 | -------------------------------------------------------------------------------- /src/models/blockchain/stellar/mod.rs: -------------------------------------------------------------------------------- 1 | //! Stellar blockchain specific implementations. 2 | //! 3 | //! This module contains data structures and implementations specific to the 4 | //! Stellar blockchain, including blocks (ledgers), transactions, events, 5 | //! and monitoring functionality. 6 | 7 | mod block; 8 | mod event; 9 | mod monitor; 10 | mod transaction; 11 | 12 | pub use block::{Block as StellarBlock, LedgerInfo as StellarLedgerInfo}; 13 | pub use event::Event as StellarEvent; 14 | pub use monitor::{ 15 | ContractFunction as StellarContractFunction, ContractInput as StellarContractInput, 16 | ContractSpec as StellarContractSpec, DecodedParamEntry as StellarDecodedParamEntry, 17 | FormattedContractSpec as StellarFormattedContractSpec, MatchArguments as StellarMatchArguments, 18 | MatchParamEntry as StellarMatchParamEntry, MatchParamsMap as StellarMatchParamsMap, 19 | MonitorMatch as StellarMonitorMatch, ParsedOperationResult as StellarParsedOperationResult, 20 | }; 21 | pub use transaction::{ 22 | DecodedTransaction as StellarDecodedTransaction, Transaction as StellarTransaction, 23 | TransactionInfo as StellarTransactionInfo, 24 | }; 25 | -------------------------------------------------------------------------------- /src/models/config/mod.rs: -------------------------------------------------------------------------------- 1 | //! Configuration loading and validation. 2 | //! 3 | //! This module provides traits and implementations for loading and validating 4 | //! configuration files for networks, monitors, and triggers. 5 | 6 | #![allow(clippy::result_large_err)] 7 | 8 | use async_trait::async_trait; 9 | use std::path::Path; 10 | 11 | mod error; 12 | mod monitor_config; 13 | mod network_config; 14 | mod trigger_config; 15 | 16 | pub use error::ConfigError; 17 | 18 | /// Common interface for loading configuration files 19 | #[async_trait] 20 | pub trait ConfigLoader: Sized { 21 | /// Load all configuration files from a directory 22 | /// 23 | /// If no path is provided, uses the default config directory. 24 | async fn load_all(path: Option<&Path>) -> Result 25 | where 26 | T: FromIterator<(String, Self)>; 27 | 28 | /// Load configuration from a specific file path 29 | async fn load_from_path(path: &Path) -> Result; 30 | 31 | /// Validate the configuration 32 | /// 33 | /// Returns Ok(()) if valid, or an error message if invalid. 34 | fn validate(&self) -> Result<(), error::ConfigError>; 35 | 36 | /// Validate safety of the protocol 37 | /// 38 | /// Returns if safe, or logs a warning message if unsafe. 39 | fn validate_protocol(&self); 40 | 41 | /// Check if a file is a JSON file based on extension 42 | fn is_json_file(path: &Path) -> bool { 43 | path.extension() 44 | .map(|ext| ext.to_string_lossy().to_lowercase() == "json") 45 | .unwrap_or(false) 46 | } 47 | 48 | /// Resolve all secrets in the configuration 49 | async fn resolve_secrets(&self) -> Result; 50 | 51 | /// Validate uniqueness of the configuration 52 | /// # Arguments 53 | /// * `instances` - The instances to validate uniqueness against 54 | /// * `current_instance` - The current instance to validate uniqueness for 55 | /// * `file_path` - The path to the file containing the current instance (for logging purposes) 56 | /// 57 | /// Returns Ok(()) if valid, or an error message if found duplicate names. 58 | fn validate_uniqueness( 59 | instances: &[&Self], 60 | current_instance: &Self, 61 | file_path: &str, 62 | ) -> Result<(), ConfigError>; 63 | } 64 | -------------------------------------------------------------------------------- /src/models/core/mod.rs: -------------------------------------------------------------------------------- 1 | //! Core domain models for the blockchain monitoring system. 2 | //! 3 | //! This module contains the fundamental data structures that represent: 4 | //! - Monitors: Configuration for watching blockchain activity 5 | //! - Networks: Blockchain network definitions and connection details 6 | //! - Triggers: Actions to take when monitored conditions are met 7 | 8 | mod monitor; 9 | mod network; 10 | mod trigger; 11 | 12 | pub use monitor::{ 13 | AddressWithSpec, EventCondition, FunctionCondition, MatchConditions, Monitor, ScriptLanguage, 14 | TransactionCondition, TransactionStatus, TriggerConditions, 15 | }; 16 | pub use network::{Network, RpcUrl}; 17 | pub use trigger::{NotificationMessage, Trigger, TriggerType, TriggerTypeConfig}; 18 | -------------------------------------------------------------------------------- /src/models/core/network.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | use crate::models::{BlockChainType, SecretValue}; 4 | 5 | /// Configuration for connecting to and interacting with a blockchain network. 6 | /// 7 | /// Defines connection details and operational parameters for a specific blockchain network, 8 | /// supporting both EVM and Stellar-based chains. 9 | #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] 10 | #[serde(deny_unknown_fields)] 11 | pub struct Network { 12 | /// Type of blockchain (EVM, Stellar, etc) 13 | pub network_type: BlockChainType, 14 | 15 | /// Unique identifier for this network 16 | pub slug: String, 17 | 18 | /// Human-readable name of the network 19 | pub name: String, 20 | 21 | /// List of RPC endpoints with their weights for load balancing 22 | pub rpc_urls: Vec, 23 | 24 | /// Chain ID for EVM networks 25 | pub chain_id: Option, 26 | 27 | /// Network passphrase for Stellar networks 28 | pub network_passphrase: Option, 29 | 30 | /// Average block time in milliseconds 31 | pub block_time_ms: u64, 32 | 33 | /// Number of blocks needed for confirmation 34 | pub confirmation_blocks: u64, 35 | 36 | /// Cron expression for how often to check for new blocks 37 | pub cron_schedule: String, 38 | 39 | /// Maximum number of past blocks to process 40 | pub max_past_blocks: Option, 41 | 42 | /// Whether to store processed blocks 43 | pub store_blocks: Option, 44 | } 45 | 46 | /// RPC endpoint configuration with load balancing weight 47 | #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] 48 | #[serde(deny_unknown_fields)] 49 | pub struct RpcUrl { 50 | /// Type of RPC endpoint (e.g. "rpc") 51 | pub type_: String, 52 | 53 | /// URL of the RPC endpoint (can be a secret value) 54 | pub url: SecretValue, 55 | 56 | /// Weight for load balancing (0-100) 57 | pub weight: u32, 58 | } 59 | -------------------------------------------------------------------------------- /src/models/core/trigger.rs: -------------------------------------------------------------------------------- 1 | use crate::models::{core::ScriptLanguage, SecretValue}; 2 | use email_address::EmailAddress; 3 | use serde::{Deserialize, Serialize}; 4 | 5 | /// Configuration for actions to take when monitored conditions are met. 6 | #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] 7 | #[serde(deny_unknown_fields)] 8 | pub struct Trigger { 9 | /// Unique name identifying this trigger 10 | pub name: String, 11 | 12 | /// Type of trigger (Email, Slack, Webhook, Telegram, Discord, Script) 13 | pub trigger_type: TriggerType, 14 | 15 | /// Configuration specific to the trigger type 16 | pub config: TriggerTypeConfig, 17 | } 18 | 19 | /// Supported trigger action types 20 | #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] 21 | #[serde(rename_all = "lowercase")] 22 | #[serde(deny_unknown_fields)] 23 | pub enum TriggerType { 24 | /// Send notification to Slack 25 | Slack, 26 | /// Send notification to email 27 | Email, 28 | /// Make HTTP request to webhook 29 | Webhook, 30 | /// Send notification to Telegram 31 | Telegram, 32 | /// Send notification to Discord 33 | Discord, 34 | /// Execute local script 35 | Script, 36 | } 37 | 38 | /// Notification message fields 39 | #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] 40 | #[serde(deny_unknown_fields)] 41 | pub struct NotificationMessage { 42 | /// Notification title or subject 43 | pub title: String, 44 | /// Message template 45 | pub body: String, 46 | } 47 | 48 | /// Type-specific configuration for triggers 49 | #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] 50 | #[serde(deny_unknown_fields)] 51 | #[serde(untagged)] 52 | pub enum TriggerTypeConfig { 53 | /// Slack notification configuration 54 | Slack { 55 | /// Slack webhook URL 56 | slack_url: SecretValue, 57 | /// Notification message 58 | message: NotificationMessage, 59 | }, 60 | /// Email notification configuration 61 | Email { 62 | /// SMTP host 63 | host: String, 64 | /// SMTP port (default 465) 65 | port: Option, 66 | /// SMTP username 67 | username: SecretValue, 68 | /// SMTP password 69 | password: SecretValue, 70 | /// Notification message 71 | message: NotificationMessage, 72 | /// Email sender 73 | sender: EmailAddress, 74 | /// Email recipients 75 | recipients: Vec, 76 | }, 77 | /// Webhook configuration 78 | Webhook { 79 | /// Webhook endpoint URL 80 | url: SecretValue, 81 | /// HTTP method to use 82 | method: Option, 83 | /// Secret 84 | secret: Option, 85 | /// Optional HTTP headers 86 | headers: Option>, 87 | /// Notification message 88 | message: NotificationMessage, 89 | }, 90 | /// Telegram notification configuration 91 | Telegram { 92 | /// Telegram bot token 93 | token: SecretValue, 94 | /// Telegram chat ID 95 | chat_id: String, 96 | /// Disable web preview 97 | disable_web_preview: Option, 98 | /// Notification message 99 | message: NotificationMessage, 100 | }, 101 | /// Discord notification configuration 102 | Discord { 103 | /// Discord webhook URL 104 | discord_url: SecretValue, 105 | /// Notification message 106 | message: NotificationMessage, 107 | }, 108 | /// Script execution configuration 109 | Script { 110 | /// Language of the script 111 | language: ScriptLanguage, 112 | /// Path to script file 113 | script_path: String, 114 | /// Command line arguments 115 | #[serde(default)] 116 | arguments: Option>, 117 | /// Timeout in milliseconds 118 | timeout_ms: u32, 119 | }, 120 | } 121 | -------------------------------------------------------------------------------- /src/models/mod.rs: -------------------------------------------------------------------------------- 1 | //! Domain models and data structures for blockchain monitoring. 2 | //! 3 | //! This module contains all the core data structures used throughout the application: 4 | //! 5 | //! - `blockchain`: Platform-specific implementations for different blockchains (EVM, Stellar) 6 | //! - `config`: Configuration loading and validation 7 | //! - `core`: Core domain models (Monitor, Network, Trigger) 8 | //! - `security`: Security models (Secret) 9 | 10 | mod blockchain; 11 | mod config; 12 | mod core; 13 | mod security; 14 | 15 | // Re-export blockchain types 16 | pub use blockchain::{ 17 | BlockChainType, BlockType, ContractSpec, MonitorMatch, ProcessedBlock, TransactionType, 18 | }; 19 | 20 | pub use blockchain::evm::{ 21 | EVMBaseReceipt, EVMBaseTransaction, EVMBlock, EVMContractSpec, EVMMatchArguments, 22 | EVMMatchParamEntry, EVMMatchParamsMap, EVMMonitorMatch, EVMReceiptLog, EVMTransaction, 23 | EVMTransactionReceipt, 24 | }; 25 | 26 | pub use blockchain::stellar::{ 27 | StellarBlock, StellarContractFunction, StellarContractInput, StellarContractSpec, 28 | StellarDecodedParamEntry, StellarDecodedTransaction, StellarEvent, 29 | StellarFormattedContractSpec, StellarLedgerInfo, StellarMatchArguments, StellarMatchParamEntry, 30 | StellarMatchParamsMap, StellarMonitorMatch, StellarParsedOperationResult, StellarTransaction, 31 | StellarTransactionInfo, 32 | }; 33 | 34 | // Re-export core types 35 | pub use core::{ 36 | AddressWithSpec, EventCondition, FunctionCondition, MatchConditions, Monitor, Network, 37 | NotificationMessage, RpcUrl, ScriptLanguage, TransactionCondition, TransactionStatus, Trigger, 38 | TriggerConditions, TriggerType, TriggerTypeConfig, 39 | }; 40 | 41 | // Re-export config types 42 | pub use config::{ConfigError, ConfigLoader}; 43 | 44 | // Re-export security types 45 | pub use security::{SecretString, SecretValue, SecurityError}; 46 | -------------------------------------------------------------------------------- /src/models/security/mod.rs: -------------------------------------------------------------------------------- 1 | //! Security models 2 | //! 3 | //! This module contains the security models for the application. 4 | //! 5 | //! - `error`: Error types for security operations 6 | //! - `secret`: Secret management and zeroization 7 | 8 | mod error; 9 | mod secret; 10 | 11 | use std::env; 12 | 13 | pub use error::{SecurityError, SecurityResult}; 14 | pub use secret::{SecretString, SecretValue}; 15 | 16 | pub fn get_env_var(key: &str) -> SecurityResult { 17 | env::var(key).map_err(|e| { 18 | Box::new(SecurityError::parse_error( 19 | format!("Missing {} environment variable", key), 20 | Some(e.into()), 21 | None, 22 | )) 23 | }) 24 | } 25 | 26 | #[cfg(test)] 27 | mod tests { 28 | use super::*; 29 | use std::env; 30 | 31 | #[test] 32 | fn test_get_env_var_success() { 33 | env::set_var("TEST_ENV_VAR", "test_value"); 34 | let result = get_env_var("TEST_ENV_VAR"); 35 | assert!(result.is_ok()); 36 | assert_eq!(result.unwrap(), "test_value".to_string()); 37 | env::remove_var("TEST_ENV_VAR"); 38 | } 39 | 40 | #[test] 41 | fn test_get_env_var_missing() { 42 | let result = get_env_var("NON_EXISTING_ENV_VAR"); 43 | assert!(result.is_err()); 44 | assert!(result 45 | .err() 46 | .unwrap() 47 | .to_string() 48 | .contains("Missing NON_EXISTING_ENV_VAR environment variable")); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/repositories/mod.rs: -------------------------------------------------------------------------------- 1 | //! Repository implementations for configuration management. 2 | //! 3 | //! This module provides traits and implementations for loading and managing 4 | //! configuration data from the filesystem. Each repository type handles a specific 5 | //! configuration type and provides: 6 | //! 7 | //! - Loading configurations from JSON files 8 | //! - Validating configuration references between different types 9 | //! - Accessing configurations through a service layer 10 | //! 11 | //! Currently supported repositories: 12 | //! - Monitor: Loads and validates monitor configurations, ensuring referenced networks and triggers 13 | //! exist 14 | //! - Network: Loads network configurations defining blockchain connection details 15 | //! - Trigger: Loads trigger configurations defining actions to take when conditions match 16 | 17 | mod error; 18 | mod monitor; 19 | mod network; 20 | mod trigger; 21 | 22 | pub use error::RepositoryError; 23 | pub use monitor::{MonitorRepository, MonitorRepositoryTrait, MonitorService}; 24 | pub use network::{NetworkRepository, NetworkRepositoryTrait, NetworkService}; 25 | pub use trigger::{TriggerRepository, TriggerRepositoryTrait, TriggerService}; 26 | -------------------------------------------------------------------------------- /src/services/blockchain/client.rs: -------------------------------------------------------------------------------- 1 | //! Core blockchain client interface and traits. 2 | //! 3 | //! This module defines the common interface that all blockchain implementations 4 | //! must follow, ensuring consistent behavior across different blockchain types. 5 | 6 | use async_trait::async_trait; 7 | 8 | use crate::{ 9 | models::{BlockType, ContractSpec}, 10 | services::filter::BlockFilter, 11 | }; 12 | 13 | /// Defines the core interface for blockchain clients 14 | /// 15 | /// This trait must be implemented by all blockchain-specific clients to provide 16 | /// standardized access to blockchain data and operations. 17 | #[async_trait] 18 | pub trait BlockChainClient: Send + Sync + Clone { 19 | /// Retrieves the latest block number from the blockchain 20 | /// 21 | /// # Returns 22 | /// * `Result` - The latest block number or an error 23 | async fn get_latest_block_number(&self) -> Result; 24 | 25 | /// Retrieves a range of blocks from the blockchain 26 | /// 27 | /// # Arguments 28 | /// * `start_block` - The starting block number 29 | /// * `end_block` - Optional ending block number. If None, only fetches start_block 30 | /// 31 | /// # Returns 32 | /// * `Result, anyhow::Error>` - Vector of blocks or an error 33 | /// 34 | /// # Note 35 | /// The implementation should handle cases where end_block is None by returning 36 | /// only the start_block data. 37 | async fn get_blocks( 38 | &self, 39 | start_block: u64, 40 | end_block: Option, 41 | ) -> Result, anyhow::Error>; 42 | 43 | /// Retrieves the contract spec for a given contract ID 44 | /// 45 | /// # Arguments 46 | /// * `contract_id` - The ID of the contract to retrieve the spec for 47 | /// 48 | /// # Returns 49 | /// * `Result` - The contract spec or an error 50 | async fn get_contract_spec(&self, _contract_id: &str) -> Result { 51 | Err(anyhow::anyhow!("get_contract_spec not implemented")) 52 | } 53 | } 54 | 55 | /// Defines the factory interface for creating block filters 56 | /// 57 | /// This trait must be implemented by all blockchain-specific clients to provide 58 | /// a way to create block filters. 59 | pub trait BlockFilterFactory { 60 | type Filter: BlockFilter + Send; 61 | fn filter() -> Self::Filter; 62 | } 63 | -------------------------------------------------------------------------------- /src/services/blockchain/clients/mod.rs: -------------------------------------------------------------------------------- 1 | //! Blockchain client implementations. 2 | //! 3 | //! Contains specific implementations for different blockchain types: 4 | //! - EVM client for Ethereum-compatible chains 5 | //! - Stellar client for Stellar network 6 | 7 | mod evm { 8 | pub mod client; 9 | } 10 | mod stellar { 11 | pub mod client; 12 | } 13 | 14 | pub use evm::client::{EvmClient, EvmClientTrait}; 15 | pub use stellar::client::{StellarClient, StellarClientTrait}; 16 | -------------------------------------------------------------------------------- /src/services/blockchain/mod.rs: -------------------------------------------------------------------------------- 1 | //! Blockchain client interfaces and implementations. 2 | //! 3 | //! Provides abstractions and concrete implementations for interacting with 4 | //! different blockchain networks. Includes: 5 | //! 6 | //! - Generic blockchain client trait 7 | //! - EVM and Stellar specific clients 8 | //! - Network transport implementations 9 | //! - Error handling for blockchain operations 10 | //! - Client pool for managing multiple clients 11 | 12 | mod client; 13 | mod clients; 14 | mod error; 15 | mod pool; 16 | mod transports; 17 | 18 | pub use client::{BlockChainClient, BlockFilterFactory}; 19 | pub use clients::{EvmClient, EvmClientTrait, StellarClient, StellarClientTrait}; 20 | pub use error::BlockChainError; 21 | pub use pool::{ClientPool, ClientPoolTrait}; 22 | pub use transports::{ 23 | BlockchainTransport, EVMTransportClient, EndpointManager, HttpTransportClient, 24 | RotatingTransport, StellarTransportClient, TransientErrorRetryStrategy, TransportError, 25 | }; 26 | -------------------------------------------------------------------------------- /src/services/blockchain/transports/mod.rs: -------------------------------------------------------------------------------- 1 | //! Network transport implementations for blockchain clients. 2 | //! 3 | //! Provides concrete implementations for different blockchain network protocols: 4 | //! 5 | //! - Generic HTTP transport for all chains 6 | 7 | mod evm { 8 | pub mod http; 9 | } 10 | mod stellar { 11 | pub mod http; 12 | } 13 | 14 | mod endpoint_manager; 15 | mod error; 16 | mod http; 17 | 18 | pub use endpoint_manager::EndpointManager; 19 | pub use error::TransportError; 20 | pub use evm::http::EVMTransportClient; 21 | pub use http::HttpTransportClient; 22 | pub use stellar::http::StellarTransportClient; 23 | 24 | use reqwest_middleware::ClientWithMiddleware; 25 | use reqwest_retry::{ 26 | default_on_request_failure, default_on_request_success, policies::ExponentialBackoff, 27 | Retryable, RetryableStrategy, 28 | }; 29 | use serde::Serialize; 30 | use serde_json::{json, Value}; 31 | 32 | /// HTTP status codes that trigger RPC endpoint rotation 33 | /// - 429: Too Many Requests - indicates rate limiting from the current endpoint 34 | pub const ROTATE_ON_ERROR_CODES: [u16; 1] = [429]; 35 | 36 | /// Base trait for all blockchain transport clients 37 | #[async_trait::async_trait] 38 | pub trait BlockchainTransport: Send + Sync { 39 | /// Get the current URL being used by the transport 40 | async fn get_current_url(&self) -> String; 41 | 42 | /// Send a raw request to the blockchain 43 | async fn send_raw_request

( 44 | &self, 45 | method: &str, 46 | params: Option

, 47 | ) -> Result 48 | where 49 | P: Into + Send + Clone + Serialize; 50 | 51 | /// Customizes the request for specific blockchain requirements 52 | async fn customize_request

(&self, method: &str, params: Option

) -> Value 53 | where 54 | P: Into + Send + Clone + Serialize, 55 | { 56 | // Default implementation for JSON-RPC 57 | json!({ 58 | "jsonrpc": "2.0", 59 | "id": 1, 60 | "method": method, 61 | "params": params.map(|p| p.into()) 62 | }) 63 | } 64 | 65 | /// Sets the retry policy for the transport 66 | fn set_retry_policy( 67 | &mut self, 68 | retry_policy: ExponentialBackoff, 69 | retry_strategy: Option, 70 | ) -> Result<(), anyhow::Error>; 71 | 72 | /// Update endpoint manager with a new client 73 | fn update_endpoint_manager_client( 74 | &mut self, 75 | client: ClientWithMiddleware, 76 | ) -> Result<(), anyhow::Error>; 77 | } 78 | 79 | /// Extension trait for transports that support URL rotation 80 | #[async_trait::async_trait] 81 | pub trait RotatingTransport: BlockchainTransport { 82 | /// Attempts to establish a connection with a new URL 83 | async fn try_connect(&self, url: &str) -> Result<(), anyhow::Error>; 84 | 85 | /// Updates the client with a new URL 86 | async fn update_client(&self, url: &str) -> Result<(), anyhow::Error>; 87 | } 88 | 89 | /// A default retry strategy that retries on requests based on the status code 90 | /// This can be used to customise the retry strategy 91 | pub struct TransientErrorRetryStrategy; 92 | impl RetryableStrategy for TransientErrorRetryStrategy { 93 | fn handle( 94 | &self, 95 | res: &Result, 96 | ) -> Option { 97 | match res { 98 | Ok(success) => default_on_request_success(success), 99 | Err(error) => default_on_request_failure(error), 100 | } 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/services/blockwatcher/mod.rs: -------------------------------------------------------------------------------- 1 | //! Block watcher service implementation. 2 | //! 3 | //! This module provides functionality to watch and process blockchain blocks across 4 | //! different networks. It includes: 5 | //! - Block watching service for multiple networks 6 | //! - Block storage implementations 7 | //! - Error handling specific to block watching operations 8 | 9 | mod error; 10 | mod service; 11 | mod storage; 12 | mod tracker; 13 | 14 | pub use error::BlockWatcherError; 15 | pub use service::{ 16 | process_new_blocks, BlockWatcherService, JobSchedulerTrait, NetworkBlockWatcher, 17 | }; 18 | pub use storage::{BlockStorage, FileBlockStorage}; 19 | pub use tracker::{BlockTracker, BlockTrackerTrait}; 20 | -------------------------------------------------------------------------------- /src/services/filter/expression/evaluation.rs: -------------------------------------------------------------------------------- 1 | //! This module contains the `ConditionEvaluator` trait and the `EvaluationError` enum. 2 | //! The `ConditionEvaluator` trait defines methods for getting base parameters, comparing values, 3 | //! and getting the kind of a value from a JSON value. 4 | //! The `ConditionEvaluator` trait is implemented by specific evaluators that provide the logic 5 | //! for evaluating conditions based on the context of the chain. 6 | 7 | use super::error::EvaluationError; 8 | use crate::services::filter::expression::ast::{ComparisonOperator, LiteralValue}; 9 | 10 | /// The `ConditionEvaluator` trait defines methods for evaluating conditions in filter expressions. 11 | pub trait ConditionEvaluator { 12 | /// Gets the raw string value and kind for a base variable name 13 | fn get_base_param(&self, name: &str) -> Result<(&str, &str), EvaluationError>; 14 | 15 | /// Performs the final comparison between the left resolved value (after all path traversal) and the literal value 16 | fn compare_final_values( 17 | &self, 18 | left_kind: &str, 19 | left_resolved_value: &str, 20 | operator: &ComparisonOperator, 21 | right_literal: &LiteralValue, 22 | ) -> Result; 23 | 24 | /// Gets the chain-specific kind of a value from a JSON value 25 | fn get_kind_from_json_value(&self, value: &serde_json::Value) -> String; 26 | } 27 | -------------------------------------------------------------------------------- /src/services/filter/expression/mod.rs: -------------------------------------------------------------------------------- 1 | //! Shared logic for parsing and evaluating expressions 2 | 3 | mod ast; 4 | mod error; 5 | mod evaluation; 6 | mod helpers; 7 | mod parsing; 8 | 9 | pub use ast::{ComparisonOperator, LiteralValue}; 10 | pub use error::EvaluationError; 11 | pub use evaluation::ConditionEvaluator; 12 | pub use helpers::{compare_ordered_values, evaluate}; 13 | pub use parsing::parse; 14 | -------------------------------------------------------------------------------- /src/services/filter/filters/mod.rs: -------------------------------------------------------------------------------- 1 | //! Block filtering implementations. 2 | //! 3 | //! Provides trait definition and implementations for filtering blocks 4 | //! across different blockchain types. Includes: 5 | //! - Generic BlockFilter trait 6 | //! - EVM-specific implementation 7 | //! - Stellar-specific implementation 8 | 9 | pub mod evm { 10 | pub mod evaluator; 11 | pub mod filter; 12 | pub mod helpers; 13 | } 14 | pub mod stellar { 15 | pub mod evaluator; 16 | pub mod filter; 17 | pub mod helpers; 18 | } 19 | 20 | use async_trait::async_trait; 21 | 22 | use crate::{ 23 | models::{BlockType, ContractSpec, Monitor, MonitorMatch, Network}, 24 | services::{blockchain::BlockFilterFactory, filter::error::FilterError}, 25 | }; 26 | pub use evm::filter::EVMBlockFilter; 27 | pub use stellar::filter::StellarBlockFilter; 28 | 29 | /// Trait for filtering blockchain data 30 | /// 31 | /// This trait must be implemented by all blockchain-specific clients to provide 32 | /// a way to filter blockchain data. 33 | #[async_trait] 34 | pub trait BlockFilter { 35 | type Client; 36 | async fn filter_block( 37 | &self, 38 | client: &Self::Client, 39 | network: &Network, 40 | block: &BlockType, 41 | monitors: &[Monitor], 42 | contract_specs: Option<&[(String, ContractSpec)]>, 43 | ) -> Result, FilterError>; 44 | } 45 | 46 | /// Service for filtering blockchain data 47 | /// 48 | /// This service provides a way to filter blockchain data based on a set of monitors. 49 | pub struct FilterService {} 50 | 51 | impl FilterService { 52 | pub fn new() -> Self { 53 | FilterService {} 54 | } 55 | } 56 | 57 | impl Default for FilterService { 58 | fn default() -> Self { 59 | Self::new() 60 | } 61 | } 62 | 63 | impl FilterService { 64 | pub async fn filter_block>( 65 | &self, 66 | client: &T, 67 | network: &Network, 68 | block: &BlockType, 69 | monitors: &[Monitor], 70 | contract_specs: Option<&[(String, ContractSpec)]>, 71 | ) -> Result, FilterError> { 72 | let filter = T::filter(); 73 | filter 74 | .filter_block(client, network, block, monitors, contract_specs) 75 | .await 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/services/filter/mod.rs: -------------------------------------------------------------------------------- 1 | //! Transaction and event filtering functionality. 2 | //! 3 | //! Implements the core filtering logic for monitoring blockchain activity: 4 | //! - Block filtering for different chain types 5 | //! - Match handling and processing 6 | //! - Chain-specific helper functions 7 | 8 | mod error; 9 | mod expression; 10 | mod filter_match; 11 | mod filters; 12 | 13 | pub use error::FilterError; 14 | pub use filter_match::handle_match; 15 | 16 | pub use filters::{ 17 | evm::helpers as evm_helpers, stellar::helpers as stellar_helpers, BlockFilter, EVMBlockFilter, 18 | FilterService, StellarBlockFilter, 19 | }; 20 | -------------------------------------------------------------------------------- /src/services/mod.rs: -------------------------------------------------------------------------------- 1 | //! Core services implementing the business logic. 2 | //! 3 | //! This module contains the main service implementations: 4 | //! - `blockchain`: Blockchain client interfaces and implementations 5 | //! - `blockwatcher`: Block monitoring and processing 6 | //! - `filter`: Transaction and event filtering logic 7 | //! - `notification`: Alert and notification handling 8 | //! - `trigger`: Trigger evaluation and execution 9 | 10 | pub mod blockchain; 11 | pub mod blockwatcher; 12 | pub mod filter; 13 | pub mod notification; 14 | pub mod trigger; 15 | -------------------------------------------------------------------------------- /src/services/trigger/mod.rs: -------------------------------------------------------------------------------- 1 | //! Trigger service implementation. 2 | //! 3 | //! This module provides functionality to manage and execute triggers, 4 | //! which are configurable actions that can be initiated based on 5 | //! various conditions. 6 | 7 | mod error; 8 | mod script; 9 | mod service; 10 | 11 | pub use error::TriggerError; 12 | pub use script::{ 13 | process_script_output, validate_script_config, ScriptError, ScriptExecutor, 14 | ScriptExecutorFactory, 15 | }; 16 | pub use service::{TriggerExecutionService, TriggerExecutionServiceTrait}; 17 | -------------------------------------------------------------------------------- /src/services/trigger/script/factory.rs: -------------------------------------------------------------------------------- 1 | //! Trigger script factory implementation. 2 | //! 3 | //! This module provides functionality to create script executors based on the script language. 4 | 5 | use crate::{ 6 | models::ScriptLanguage, 7 | services::trigger::script::executor::{ 8 | BashScriptExecutor, JavaScriptScriptExecutor, PythonScriptExecutor, ScriptExecutor, 9 | }, 10 | }; 11 | 12 | /// Factory for creating script executors based on the script language. 13 | pub struct ScriptExecutorFactory; 14 | 15 | impl ScriptExecutorFactory { 16 | /// Creates a new script executor for the specified language and script path. 17 | /// 18 | /// # Arguments 19 | /// 20 | /// * `language` - The programming language of the script 21 | /// * `script_content` - The content of the script 22 | /// 23 | /// # Returns 24 | /// 25 | /// Returns a boxed (Rust will allocate on the heap) trait object implementing the 26 | /// `ScriptExecutor` trait 27 | pub fn create(language: &ScriptLanguage, script_content: &str) -> Box { 28 | match language { 29 | ScriptLanguage::Python => Box::new(PythonScriptExecutor { 30 | script_content: script_content.to_string(), 31 | }), 32 | ScriptLanguage::JavaScript => Box::new(JavaScriptScriptExecutor { 33 | script_content: script_content.to_string(), 34 | }), 35 | ScriptLanguage::Bash => Box::new(BashScriptExecutor { 36 | script_content: script_content.to_string(), 37 | }), 38 | } 39 | } 40 | } 41 | 42 | #[cfg(test)] 43 | mod tests { 44 | use super::*; 45 | use crate::models::ScriptLanguage; 46 | 47 | #[test] 48 | fn test_create_python_executor() { 49 | let script = "print('Hello')"; 50 | let executor = ScriptExecutorFactory::create(&ScriptLanguage::Python, script); 51 | assert!( 52 | executor 53 | .as_any() 54 | .downcast_ref::() 55 | .unwrap() 56 | .script_content 57 | == script 58 | ); 59 | 60 | // Test with empty script 61 | let empty_script = ""; 62 | let executor = ScriptExecutorFactory::create(&ScriptLanguage::Python, empty_script); 63 | assert!(executor 64 | .as_any() 65 | .downcast_ref::() 66 | .unwrap() 67 | .script_content 68 | .is_empty()); 69 | } 70 | 71 | #[test] 72 | fn test_create_javascript_executor() { 73 | let script = "console.log('Hello')"; 74 | let executor = ScriptExecutorFactory::create(&ScriptLanguage::JavaScript, script); 75 | assert!( 76 | executor 77 | .as_any() 78 | .downcast_ref::() 79 | .unwrap() 80 | .script_content 81 | == script 82 | ); 83 | 84 | // Test with empty script 85 | let empty_script = ""; 86 | let executor = ScriptExecutorFactory::create(&ScriptLanguage::JavaScript, empty_script); 87 | assert!(executor 88 | .as_any() 89 | .downcast_ref::() 90 | .unwrap() 91 | .script_content 92 | .is_empty()); 93 | } 94 | 95 | #[test] 96 | fn test_create_bash_executor() { 97 | let script = "echo 'Hello'"; 98 | let executor = ScriptExecutorFactory::create(&ScriptLanguage::Bash, script); 99 | assert!( 100 | executor 101 | .as_any() 102 | .downcast_ref::() 103 | .unwrap() 104 | .script_content 105 | == script 106 | ); 107 | 108 | // Test with empty script 109 | let empty_script = ""; 110 | let executor = ScriptExecutorFactory::create(&ScriptLanguage::Bash, empty_script); 111 | assert!(executor 112 | .as_any() 113 | .downcast_ref::() 114 | .unwrap() 115 | .script_content 116 | .is_empty()); 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/services/trigger/script/mod.rs: -------------------------------------------------------------------------------- 1 | //! Trigger script service implementation. 2 | //! 3 | //! This module provides functionality to manage and execute triggers, 4 | //! which are configurable actions that can be initiated based on 5 | //! various conditions. 6 | 7 | mod error; 8 | mod executor; 9 | mod factory; 10 | mod validation; 11 | pub use error::ScriptError; 12 | pub use executor::{process_script_output, ScriptExecutor}; 13 | pub use factory::ScriptExecutorFactory; 14 | pub use validation::validate_script_config; 15 | -------------------------------------------------------------------------------- /src/services/trigger/script/validation.rs: -------------------------------------------------------------------------------- 1 | //! Trigger script validation implementation. 2 | //! 3 | //! This module provides functionality to validate script configuration parameters. 4 | 5 | use crate::models::{ConfigError, ScriptLanguage}; 6 | use std::path::Path; 7 | 8 | /// Validates script configuration parameters 9 | /// 10 | /// # Arguments 11 | /// * `script_path` - Path to the script file 12 | /// * `language` - The supported script language 13 | /// * `timeout_ms` - Timeout in milliseconds 14 | /// 15 | /// # Returns 16 | /// * `Ok(())` if validation passes 17 | /// * `Err(ConfigError)` if any validation fails 18 | #[allow(clippy::result_large_err)] 19 | pub fn validate_script_config( 20 | script_path: &str, 21 | language: &ScriptLanguage, 22 | timeout_ms: &u32, 23 | ) -> Result<(), ConfigError> { 24 | // Validate script path exists 25 | if !Path::new(script_path).exists() { 26 | return Err(ConfigError::validation_error( 27 | format!("Script path does not exist: {}", script_path), 28 | None, 29 | None, 30 | )); 31 | } 32 | 33 | let script_path_instance = Path::new(script_path); 34 | // Validate file extension matches language 35 | let extension = script_path_instance 36 | .extension() 37 | .and_then(|ext| ext.to_str()) 38 | .unwrap_or(""); 39 | 40 | let valid_extension = match language { 41 | ScriptLanguage::Python => extension == "py", 42 | ScriptLanguage::JavaScript => extension == "js", 43 | ScriptLanguage::Bash => extension == "sh", 44 | }; 45 | 46 | if !valid_extension { 47 | return Err(ConfigError::validation_error( 48 | format!( 49 | "Script file extension does not match specified language {:?}: {}", 50 | language, script_path 51 | ), 52 | None, 53 | None, 54 | )); 55 | } 56 | 57 | // Validate timeout 58 | if *timeout_ms == 0 { 59 | return Err(ConfigError::validation_error( 60 | "Timeout must be greater than 0".to_string(), 61 | None, 62 | None, 63 | )); 64 | } 65 | 66 | Ok(()) 67 | } 68 | 69 | #[cfg(test)] 70 | mod tests { 71 | use super::*; 72 | use std::fs; 73 | use tempfile::NamedTempFile; 74 | 75 | #[test] 76 | fn test_validate_script_config_valid_python() { 77 | let temp_file = NamedTempFile::new().unwrap(); 78 | let path = temp_file.path().to_str().unwrap().to_string(); 79 | let python_path = path + ".py"; 80 | fs::rename(temp_file.path(), &python_path).unwrap(); 81 | 82 | let result = validate_script_config(&python_path, &ScriptLanguage::Python, &1000); 83 | 84 | assert!(result.is_ok()); 85 | fs::remove_file(python_path).unwrap(); 86 | } 87 | 88 | #[test] 89 | fn test_validate_script_config_invalid_path() { 90 | let result = 91 | validate_script_config("nonexistent_script.py", &ScriptLanguage::Python, &1000); 92 | 93 | assert!(result.is_err()); 94 | if let Err(e) = result { 95 | assert!(e.to_string().contains("Script path does not exist")); 96 | } 97 | } 98 | 99 | #[test] 100 | fn test_validate_script_config_wrong_extension() { 101 | let temp_file = NamedTempFile::new().unwrap(); 102 | let path = temp_file.path().to_str().unwrap().to_string(); 103 | let wrong_path = path + ".py"; 104 | fs::rename(temp_file.path(), &wrong_path).unwrap(); 105 | 106 | let result = validate_script_config(&wrong_path, &ScriptLanguage::JavaScript, &1000); 107 | 108 | assert!(result.is_err()); 109 | if let Err(e) = result { 110 | assert!(e.to_string().contains("does not match specified language")); 111 | } 112 | fs::remove_file(wrong_path).unwrap(); 113 | } 114 | 115 | #[test] 116 | fn test_validate_script_config_zero_timeout() { 117 | let temp_file = NamedTempFile::new().unwrap(); 118 | let path = temp_file.path().to_str().unwrap().to_string(); 119 | let python_path = path + ".py"; 120 | fs::rename(temp_file.path(), &python_path).unwrap(); 121 | 122 | let result = validate_script_config(&python_path, &ScriptLanguage::Python, &0); 123 | 124 | assert!(result.is_err()); 125 | if let Err(e) = result { 126 | assert!(e.to_string().contains("Timeout must be greater than 0")); 127 | } 128 | fs::remove_file(python_path).unwrap(); 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /src/utils/constants.rs: -------------------------------------------------------------------------------- 1 | pub const DOCUMENTATION_URL: &str = "https://docs.openzeppelin.com/monitor"; 2 | -------------------------------------------------------------------------------- /src/utils/cron_utils.rs: -------------------------------------------------------------------------------- 1 | //! Utility functions for working with cron schedules and time intervals 2 | //! 3 | //! This module provides helper functions for parsing and analyzing cron expressions, 4 | 5 | use chrono::Utc; 6 | use cron::Schedule; 7 | 8 | /// Calculates the time interval between two consecutive occurrences of a cron schedule 9 | /// 10 | /// This function takes a cron expression and determines how many milliseconds will elapse 11 | /// between two consecutive runs of the schedule. 12 | /// 13 | /// # Arguments 14 | /// 15 | /// * `cron_schedule` - A string slice containing a valid cron expression (e.g., "0 0 * * *") 16 | /// 17 | /// # Returns 18 | /// 19 | /// * `Some(i64)` - The number of milliseconds between consecutive schedule runs 20 | /// * `None` - If the cron expression is invalid or if two consecutive occurrences cannot be 21 | /// determined 22 | pub fn get_cron_interval_ms(cron_schedule: &str) -> Option { 23 | // Parse the cron schedule 24 | let schedule = match cron_schedule.parse::() { 25 | Ok(schedule) => schedule, 26 | Err(_) => return None, // Return None if the cron string is invalid 27 | }; 28 | 29 | // Get the current time 30 | let now = Utc::now(); 31 | 32 | // Get the next two occurrences of the schedule 33 | let mut occurrences = schedule.after(&now).take(2); 34 | 35 | if let (Some(first), Some(second)) = (occurrences.next(), occurrences.next()) { 36 | // Calculate the interval in milliseconds 37 | let interval_ms = (second - first).num_milliseconds(); 38 | Some(interval_ms) 39 | } else { 40 | None // Return None if we cannot find two occurrences 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/utils/expression.rs: -------------------------------------------------------------------------------- 1 | /// Splits an expression into a tuple of (left, operator, right) 2 | /// 3 | /// # Arguments 4 | /// * `expr` - The expression to split 5 | /// 6 | /// # Returns 7 | /// An Option containing the split expression if successful, None otherwise 8 | pub fn split_expression(expr: &str) -> Option<(&str, &str, &str)> { 9 | // Find the operator position while respecting quotes 10 | let mut in_quotes = false; 11 | let mut operator_start = None; 12 | let mut operator_end = None; 13 | 14 | let operators = [ 15 | "==", 16 | "!=", 17 | ">=", 18 | "<=", 19 | ">", 20 | "<", 21 | "contains", 22 | "starts_with", 23 | "ends_with", 24 | ]; 25 | 26 | // First pass - find operator position 27 | for (i, c) in expr.char_indices() { 28 | if c == '\'' || c == '"' { 29 | in_quotes = !in_quotes; 30 | continue; 31 | } 32 | 33 | if !in_quotes { 34 | // Check each operator 35 | for op in operators { 36 | if expr[i..].starts_with(op) { 37 | operator_start = Some(i); 38 | operator_end = Some(i + op.len()); 39 | break; 40 | } 41 | } 42 | if operator_start.is_some() { 43 | break; 44 | } 45 | } 46 | } 47 | 48 | // Split based on operator position 49 | if let (Some(op_start), Some(op_end)) = (operator_start, operator_end) { 50 | let left = expr[..op_start].trim(); 51 | let operator = expr[op_start..op_end].trim(); 52 | let right = expr[op_end..].trim(); 53 | 54 | // Remove surrounding quotes from right side if present 55 | let right = right.trim_matches(|c| c == '\'' || c == '"'); 56 | 57 | Some((left, operator, right)) 58 | } else { 59 | None 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/utils/macros/mod.rs: -------------------------------------------------------------------------------- 1 | //! Utility macros. 2 | 3 | /// Case-insensitive enum deserialization utilities 4 | pub mod deserialization; 5 | -------------------------------------------------------------------------------- /src/utils/metrics/README.md: -------------------------------------------------------------------------------- 1 | # Metrics 2 | 3 | ## Overview 4 | 5 | The metrics system provides monitoring capabilities for the OpenZeppelin Monitor application through Prometheus and Grafana integration. 6 | 7 | ## Architecture 8 | 9 | - A metrics server runs on port `8081` 10 | - Middleware intercepts requests across all endpoints 11 | - Metrics are exposed via the `/metrics` endpoint 12 | - Prometheus collects and stores the metrics data 13 | - Grafana provides visualization through customizable dashboards 14 | 15 | ## Access Points 16 | 17 | - Prometheus UI: `http://localhost:9090` 18 | - Grafana Dashboard: `http://localhost:3000` 19 | - Raw Metrics: `http://localhost:8081/metrics` 20 | -------------------------------------------------------------------------------- /src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | //! Utility modules for common functionality. 2 | //! 3 | //! This module provides various utility functions and types that are used across 4 | //! the application. Currently includes: 5 | //! 6 | //! - constants: Constants for the application 7 | //! - cron_utils: Utilities for working with cron schedules and time intervals 8 | //! - expression: Utilities for working with cron expressions 9 | //! - logging: Logging utilities 10 | //! - macros: Macros for common functionality 11 | //! - metrics: Metrics utilities 12 | //! - monitor: Monitor utilities 13 | //! - parsing: Parsing utilities 14 | //! - tests: Test utilities 15 | 16 | mod cron_utils; 17 | mod expression; 18 | 19 | pub mod constants; 20 | pub mod logging; 21 | pub mod macros; 22 | pub mod metrics; 23 | pub mod monitor; 24 | pub mod parsing; 25 | pub mod tests; 26 | 27 | pub use constants::*; 28 | pub use cron_utils::*; 29 | pub use expression::*; 30 | pub use macros::*; 31 | pub use parsing::*; 32 | -------------------------------------------------------------------------------- /src/utils/monitor/error.rs: -------------------------------------------------------------------------------- 1 | //! Monitor execution error types and handling. 2 | //! 3 | //! Provides error types for monitor execution against a specific block, 4 | use crate::utils::logging::error::{ErrorContext, TraceableError}; 5 | use std::collections::HashMap; 6 | use thiserror::Error as ThisError; 7 | use uuid::Uuid; 8 | 9 | /// Represents possible errors during monitor execution 10 | #[derive(ThisError, Debug)] 11 | pub enum MonitorExecutionError { 12 | /// Errors related to not found errors 13 | #[error("Not found error: {0}")] 14 | NotFound(ErrorContext), 15 | 16 | /// Errors related to execution failures 17 | #[error("Execution error: {0}")] 18 | ExecutionError(ErrorContext), 19 | 20 | /// Other errors that don't fit into the categories above 21 | #[error(transparent)] 22 | Other(#[from] anyhow::Error), 23 | } 24 | 25 | impl MonitorExecutionError { 26 | // Not found error 27 | pub fn not_found( 28 | msg: impl Into, 29 | source: Option>, 30 | metadata: Option>, 31 | ) -> Self { 32 | Self::NotFound(ErrorContext::new_with_log(msg, source, metadata)) 33 | } 34 | 35 | // Execution error 36 | pub fn execution_error( 37 | msg: impl Into, 38 | source: Option>, 39 | metadata: Option>, 40 | ) -> Self { 41 | Self::ExecutionError(ErrorContext::new_with_log(msg, source, metadata)) 42 | } 43 | } 44 | 45 | impl TraceableError for MonitorExecutionError { 46 | fn trace_id(&self) -> String { 47 | match self { 48 | Self::NotFound(ctx) => ctx.trace_id.clone(), 49 | Self::ExecutionError(ctx) => ctx.trace_id.clone(), 50 | Self::Other(_) => Uuid::new_v4().to_string(), 51 | } 52 | } 53 | } 54 | 55 | #[cfg(test)] 56 | mod tests { 57 | use super::*; 58 | use std::io::{Error as IoError, ErrorKind}; 59 | 60 | #[test] 61 | fn test_not_found_error_formatting() { 62 | let error = MonitorExecutionError::not_found("test error", None, None); 63 | assert_eq!(error.to_string(), "Not found error: test error"); 64 | 65 | let source_error = IoError::new(ErrorKind::NotFound, "test source"); 66 | let error = MonitorExecutionError::not_found( 67 | "test error", 68 | Some(Box::new(source_error)), 69 | Some(HashMap::from([("key1".to_string(), "value1".to_string())])), 70 | ); 71 | assert_eq!( 72 | error.to_string(), 73 | "Not found error: test error [key1=value1]" 74 | ); 75 | } 76 | 77 | #[test] 78 | fn test_execution_error_formatting() { 79 | let error = MonitorExecutionError::execution_error("test error", None, None); 80 | assert_eq!(error.to_string(), "Execution error: test error"); 81 | 82 | let source_error = IoError::new(ErrorKind::NotFound, "test source"); 83 | let error = MonitorExecutionError::execution_error( 84 | "test error", 85 | Some(Box::new(source_error)), 86 | Some(HashMap::from([("key1".to_string(), "value1".to_string())])), 87 | ); 88 | assert_eq!( 89 | error.to_string(), 90 | "Execution error: test error [key1=value1]" 91 | ); 92 | } 93 | 94 | #[test] 95 | fn test_from_anyhow_error() { 96 | let anyhow_error = anyhow::anyhow!("test anyhow error"); 97 | let script_error: MonitorExecutionError = anyhow_error.into(); 98 | assert!(matches!(script_error, MonitorExecutionError::Other(_))); 99 | assert_eq!(script_error.to_string(), "test anyhow error"); 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /src/utils/monitor/mod.rs: -------------------------------------------------------------------------------- 1 | //! Monitor utilities for executing and managing blockchain monitors. 2 | //! 3 | //! This module provides functionality for executing monitors against a specific block 4 | //! 5 | //! - execution: Monitor execution logic against a specific block 6 | //! - error: Error types for monitor execution 7 | 8 | mod error; 9 | pub use error::MonitorExecutionError; 10 | pub mod execution; 11 | -------------------------------------------------------------------------------- /src/utils/parsing.rs: -------------------------------------------------------------------------------- 1 | //! Parsing utilities 2 | //! 3 | //! This module provides utilities for parsing various types of data. 4 | 5 | use byte_unit::Byte; 6 | use std::str::FromStr; 7 | 8 | /// Parses a string argument into a `u64` value representing a file size. 9 | /// 10 | /// Accepts human-readable formats like "1GB", "500MB", "1024KB", etc. 11 | /// Returns an error if the format is invalid. 12 | pub fn parse_string_to_bytes_size(s: &str) -> Result { 13 | match Byte::from_str(s) { 14 | Ok(byte) => Ok(byte.as_u64()), 15 | Err(e) => Err(format!("Invalid size format: '{}'. Error: {}", s, e)), 16 | } 17 | } 18 | 19 | /// Normalizes a string by trimming whitespace and converting to lowercase. 20 | /// 21 | /// This is useful for case-insensitive comparisons and removing leading/trailing whitespace. 22 | /// 23 | /// # Arguments 24 | /// * `input` - The string to normalize 25 | /// 26 | /// # Returns 27 | /// * `String` - The normalized string (trimmed and lowercase) 28 | pub fn normalize_string(input: &str) -> String { 29 | input.trim().to_lowercase() 30 | } 31 | 32 | #[cfg(test)] 33 | mod tests { 34 | use super::*; 35 | 36 | #[test] 37 | fn test_valid_size_formats() { 38 | let test_cases = vec![ 39 | ("1B", 1), 40 | ("1KB", 1000), 41 | ("1KiB", 1024), 42 | ("1MB", 1000 * 1000), 43 | ("1MiB", 1024 * 1024), 44 | ("1GB", 1000 * 1000 * 1000), 45 | ("1GiB", 1024 * 1024 * 1024), 46 | ("1.5GB", (1.5 * 1000.0 * 1000.0 * 1000.0) as u64), 47 | ("500MB", 500 * 1000 * 1000), 48 | ("0B", 0), 49 | ]; 50 | 51 | for (input, expected) in test_cases { 52 | let result = parse_string_to_bytes_size(input); 53 | assert!(result.is_ok(), "Failed to parse valid input: {}", input); 54 | assert_eq!( 55 | result.unwrap(), 56 | expected, 57 | "Incorrect parsing for input: {}", 58 | input 59 | ); 60 | } 61 | } 62 | 63 | #[test] 64 | fn test_invalid_size_formats() { 65 | let invalid_inputs = vec!["", "invalid", "GB", "-1GB", "1.5.5GB", "1GB2"]; 66 | 67 | for input in invalid_inputs { 68 | let result = parse_string_to_bytes_size(input); 69 | assert!( 70 | result.is_err(), 71 | "Expected error for invalid input: {}", 72 | input 73 | ); 74 | } 75 | } 76 | 77 | #[test] 78 | fn test_normalize_string() { 79 | let test_cases = vec![ 80 | ("Hello World", "hello world"), 81 | (" UPPERCASE ", "uppercase"), 82 | ("MixedCase", "mixedcase"), 83 | (" trim me ", "trim me"), 84 | ("", ""), 85 | (" ", ""), 86 | ("already lowercase", "already lowercase"), 87 | ]; 88 | 89 | for (input, expected) in test_cases { 90 | let result = normalize_string(input); 91 | println!("result: {}", result); 92 | println!("expected: {}", expected); 93 | assert_eq!(result, expected, "Failed to normalize: '{}'", input); 94 | } 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/utils/tests/builders/evm/receipt.rs: -------------------------------------------------------------------------------- 1 | use crate::models::{EVMBaseReceipt, EVMReceiptLog, EVMTransactionReceipt}; 2 | use alloy::{ 3 | primitives::{Address, Bytes, LogData, B256, U256, U64}, 4 | rpc::types::Index, 5 | }; 6 | use std::str::FromStr; 7 | 8 | /// A builder for creating test EVM transaction receipts with default values. 9 | #[derive(Debug, Default)] 10 | pub struct ReceiptBuilder { 11 | transaction_hash: Option, 12 | status: Option, 13 | gas_used: Option, 14 | logs: Option>, 15 | from: Option

, 16 | to: Option
, 17 | contract_address: Option
, 18 | transaction_index: Option, 19 | } 20 | 21 | impl ReceiptBuilder { 22 | /// Creates a new ReceiptBuilder instance. 23 | pub fn new() -> Self { 24 | Self::default() 25 | } 26 | 27 | /// Sets the transaction hash of the receipt. 28 | pub fn transaction_hash(mut self, transaction_hash: B256) -> Self { 29 | self.transaction_hash = Some(transaction_hash); 30 | self 31 | } 32 | 33 | /// Sets the status of the transaction. Default is success. 34 | pub fn status(mut self, status: bool) -> Self { 35 | self.status = Some(status); 36 | self 37 | } 38 | 39 | /// Sets the gas used for the transaction. 40 | pub fn gas_used(mut self, gas_used: U256) -> Self { 41 | self.gas_used = Some(gas_used); 42 | self 43 | } 44 | 45 | /// Sets the transaction index in the block. 46 | pub fn transaction_index(mut self, transaction_index: usize) -> Self { 47 | self.transaction_index = Some(Index::from(transaction_index)); 48 | self 49 | } 50 | 51 | /// Sets the logs associated with the transaction. 52 | pub fn logs(mut self, logs: Vec) -> Self { 53 | self.logs = Some(logs); 54 | self 55 | } 56 | 57 | /// Sets the sender address of the transaction. 58 | pub fn from(mut self, from: Address) -> Self { 59 | self.from = Some(from); 60 | self 61 | } 62 | 63 | /// Sets the recipient address of the transaction 64 | pub fn to(mut self, to: Address) -> Self { 65 | self.to = Some(to); 66 | self 67 | } 68 | 69 | /// Sets the contract address for contract creation transactions 70 | pub fn contract_address(mut self, contract_address: Address) -> Self { 71 | self.contract_address = Some(contract_address); 72 | self 73 | } 74 | 75 | /// Set log with specified value transfer event 76 | pub fn value(mut self, value: U256) -> Self { 77 | let event_signature = "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"; 78 | let contract_address = self.contract_address.unwrap_or_default(); 79 | let from_address = self.from.unwrap_or_default(); 80 | let to_address = self.to.unwrap_or_default(); 81 | let value_hex = format!("{:064x}", value); 82 | 83 | let alloy_log = alloy::primitives::Log { 84 | address: contract_address, 85 | data: LogData::new_unchecked( 86 | vec![ 87 | B256::from_str(event_signature).unwrap(), 88 | B256::from_slice(&[&[0u8; 12], from_address.as_slice()].concat()), 89 | B256::from_slice(&[&[0u8; 12], to_address.as_slice()].concat()), 90 | ], 91 | Bytes(hex::decode(value_hex).unwrap().into()), 92 | ), 93 | }; 94 | 95 | let base_log = EVMReceiptLog::from(alloy_log); 96 | self.logs = Some(vec![base_log]); 97 | self 98 | } 99 | 100 | /// Builds the TransactionReceipt instance. 101 | pub fn build(self) -> EVMTransactionReceipt { 102 | let status_success = self.status.unwrap_or(true); 103 | let status_u64 = if status_success { 104 | U64::from(1) 105 | } else { 106 | U64::from(0) 107 | }; 108 | 109 | let base = EVMBaseReceipt { 110 | transaction_hash: self.transaction_hash.unwrap_or_default(), 111 | status: Some(status_u64), 112 | gas_used: self.gas_used, 113 | logs: self.logs.unwrap_or_default(), 114 | from: self.from.unwrap_or_default(), 115 | to: self.to, 116 | contract_address: self.contract_address, 117 | transaction_index: self.transaction_index.unwrap_or_default(), 118 | ..Default::default() 119 | }; 120 | 121 | EVMTransactionReceipt::from(base) 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /src/utils/tests/builders/evm/transaction.rs: -------------------------------------------------------------------------------- 1 | use crate::models::{EVMBaseTransaction, EVMTransaction}; 2 | use alloy::{ 3 | primitives::{Address, Bytes, B256, U256}, 4 | rpc::types::Index, 5 | }; 6 | 7 | /// A builder for creating test EVM transactions with default values. 8 | #[derive(Debug, Default)] 9 | pub struct TransactionBuilder { 10 | hash: Option, 11 | from: Option
, 12 | to: Option
, 13 | value: Option, 14 | input: Option, 15 | gas_price: Option, 16 | max_fee_per_gas: Option, 17 | max_priority_fee_per_gas: Option, 18 | gas_limit: Option, 19 | nonce: Option, 20 | transaction_index: Option, 21 | } 22 | 23 | impl TransactionBuilder { 24 | /// Creates a new TransactionBuilder instance. 25 | pub fn new() -> Self { 26 | Self::default() 27 | } 28 | 29 | /// Sets the hash of the transaction. 30 | pub fn hash(mut self, hash: B256) -> Self { 31 | self.hash = Some(hash); 32 | self 33 | } 34 | 35 | /// Sets the sender address of the transaction. 36 | pub fn from(mut self, from: Address) -> Self { 37 | self.from = Some(from); 38 | self 39 | } 40 | 41 | /// Sets the recipient address of the transaction. 42 | pub fn to(mut self, to: Address) -> Self { 43 | self.to = Some(to); 44 | self 45 | } 46 | 47 | /// Sets the transaction value (amount sent). 48 | pub fn value(mut self, value: U256) -> Self { 49 | self.value = Some(value); 50 | self 51 | } 52 | 53 | /// Sets the transaction input data. 54 | pub fn input(mut self, input: Bytes) -> Self { 55 | self.input = Some(input); 56 | self 57 | } 58 | 59 | /// Sets the gas price for legacy transactions. 60 | pub fn gas_price(mut self, gas_price: U256) -> Self { 61 | self.gas_price = Some(gas_price); 62 | self 63 | } 64 | 65 | /// Sets the max fee per gas for EIP-1559 transactions. 66 | pub fn max_fee_per_gas(mut self, max_fee_per_gas: U256) -> Self { 67 | self.max_fee_per_gas = Some(max_fee_per_gas); 68 | self 69 | } 70 | 71 | /// Sets the max priority fee per gas for EIP-1559 transactions. 72 | pub fn max_priority_fee_per_gas(mut self, max_priority_fee_per_gas: U256) -> Self { 73 | self.max_priority_fee_per_gas = Some(max_priority_fee_per_gas); 74 | self 75 | } 76 | 77 | /// Sets the gas limit for the transaction. 78 | pub fn gas_limit(mut self, gas_limit: U256) -> Self { 79 | self.gas_limit = Some(gas_limit); 80 | self 81 | } 82 | 83 | /// Sets the nonce for the transaction. 84 | pub fn nonce(mut self, nonce: U256) -> Self { 85 | self.nonce = Some(nonce); 86 | self 87 | } 88 | 89 | /// Sets the transaction index for the transaction. 90 | pub fn transaction_index(mut self, transaction_index: usize) -> Self { 91 | self.transaction_index = Some(Index(transaction_index)); 92 | self 93 | } 94 | 95 | /// Builds the Transaction instance. 96 | pub fn build(self) -> EVMTransaction { 97 | let default_gas_limit = U256::from(21000); 98 | 99 | let base_tx = EVMBaseTransaction { 100 | hash: self.hash.unwrap_or_default(), 101 | from: self.from, 102 | to: self.to, 103 | gas_price: self.gas_price, 104 | max_fee_per_gas: self.max_fee_per_gas, 105 | max_priority_fee_per_gas: self.max_priority_fee_per_gas, 106 | gas: self.gas_limit.unwrap_or(default_gas_limit), 107 | nonce: self.nonce.unwrap_or_default(), 108 | value: self.value.unwrap_or_default(), 109 | input: self.input.unwrap_or_default(), 110 | transaction_index: self.transaction_index, 111 | ..Default::default() 112 | }; 113 | 114 | EVMTransaction(base_tx) 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/utils/tests/mod.rs: -------------------------------------------------------------------------------- 1 | //! Test helper utilities 2 | //! 3 | //! This module contains test helper utilities for the application. 4 | //! 5 | //! - `builders`: Test helper utilities for creating test instances of models 6 | 7 | pub mod builders { 8 | // Chain specific test helpers 9 | pub mod evm { 10 | pub mod monitor; 11 | pub mod receipt; 12 | pub mod transaction; 13 | } 14 | pub mod stellar { 15 | pub mod monitor; 16 | } 17 | 18 | // Chain agnostic test helpers 19 | pub mod network; 20 | pub mod trigger; 21 | } 22 | 23 | pub use builders::*; 24 | -------------------------------------------------------------------------------- /tests/integration.rs: -------------------------------------------------------------------------------- 1 | //! Integration tests for the OpenZeppelin Monitor. 2 | //! 3 | //! Contains tests for blockchain monitoring functionality across different 4 | //! chains (EVM and Stellar) and mock implementations for testing. 5 | 6 | mod integration { 7 | mod blockchain { 8 | mod pool; 9 | mod clients { 10 | mod evm { 11 | mod client; 12 | } 13 | mod stellar { 14 | mod client; 15 | } 16 | } 17 | mod transports { 18 | mod evm { 19 | mod http; 20 | mod transport; 21 | } 22 | mod stellar { 23 | mod http; 24 | mod transport; 25 | } 26 | mod endpoint_manager; 27 | mod http; 28 | } 29 | } 30 | mod bootstrap { 31 | mod main; 32 | } 33 | mod mocks; 34 | 35 | mod blockwatcher { 36 | mod service; 37 | } 38 | mod filters { 39 | pub mod common; 40 | mod evm { 41 | mod filter; 42 | } 43 | mod stellar { 44 | mod filter; 45 | } 46 | } 47 | mod notifications { 48 | mod discord; 49 | mod email; 50 | mod script; 51 | mod slack; 52 | mod telegram; 53 | mod webhook; 54 | } 55 | mod monitor { 56 | mod execution; 57 | } 58 | 59 | mod security { 60 | mod secret; 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /tests/integration/fixtures/evm/contract_spec.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "anonymous": false, 4 | "inputs": [ 5 | { 6 | "indexed": true, 7 | "internalType": "address", 8 | "name": "from", 9 | "type": "address" 10 | }, 11 | { 12 | "indexed": true, 13 | "internalType": "address", 14 | "name": "to", 15 | "type": "address" 16 | }, 17 | { 18 | "indexed": false, 19 | "internalType": "uint256", 20 | "name": "value", 21 | "type": "uint256" 22 | } 23 | ], 24 | "name": "Transfer", 25 | "type": "event" 26 | }, 27 | { 28 | "inputs": [ 29 | { 30 | "internalType": "address", 31 | "name": "to", 32 | "type": "address" 33 | }, 34 | { 35 | "internalType": "uint256", 36 | "name": "value", 37 | "type": "uint256" 38 | } 39 | ], 40 | "name": "transfer", 41 | "outputs": [ 42 | { 43 | "internalType": "bool", 44 | "name": "", 45 | "type": "bool" 46 | } 47 | ], 48 | "stateMutability": "nonpayable", 49 | "type": "function" 50 | }, 51 | { 52 | "inputs": [], 53 | "name": "increment", 54 | "outputs": [], 55 | "stateMutability": "nonpayable", 56 | "type": "function" 57 | } 58 | ] 59 | -------------------------------------------------------------------------------- /tests/integration/fixtures/evm/networks/network.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "EVM", 3 | "slug": "ethereum_mainnet", 4 | "name": "Ethereum Mainnet", 5 | "rpc_urls": [ 6 | { 7 | "url": { 8 | "type": "Plain", 9 | "value": "https://eth.drpc.org" 10 | }, 11 | "type_": "rpc", 12 | "weight": 100 13 | }, 14 | { 15 | "type_": "rpc", 16 | "url": { 17 | "type": "Plain", 18 | "value": "https://ethereum-rpc.publicnode.com" 19 | }, 20 | "weight": 90 21 | } 22 | ], 23 | "chain_id": 1, 24 | "block_time_ms": 12000, 25 | "confirmation_blocks": 12, 26 | "cron_schedule": "0 */1 * * * *", 27 | "max_past_blocks": 50, 28 | "store_blocks": true 29 | } 30 | -------------------------------------------------------------------------------- /tests/integration/fixtures/evm/triggers/trigger.json: -------------------------------------------------------------------------------- 1 | { 2 | "example_trigger_slack": { 3 | "name": "Example Trigger Slack Notification", 4 | "trigger_type": "slack", 5 | "config": { 6 | "slack_url": { 7 | "type": "plain", 8 | "value": "https://hooks.slack.com/services/AAA/BBB/CCC" 9 | }, 10 | "message": { 11 | "title": "example_trigger_slack triggered", 12 | "body": "Large transfer of ${events.0.args.value} USDC from ${events.0.args.from} to ${events.0.args.to} | https://etherscan.io/tx/${transaction.hash}#eventlog" 13 | } 14 | } 15 | }, 16 | "example_trigger_webhook": { 17 | "name": "Example Trigger Webhook Notification", 18 | "trigger_type": "webhook", 19 | "config": { 20 | "url": { 21 | "type": "plain", 22 | "value": "https://webhook.site/123" 23 | }, 24 | "method": "POST", 25 | "secret": { 26 | "type": "plain", 27 | "value": "some-secret" 28 | }, 29 | "headers": { 30 | "Content-Type": "application/json" 31 | }, 32 | "message": { 33 | "title": "example_trigger_webhook triggered", 34 | "body": "${monitor.name} triggered because someone called the ${functions.0.signature} function with value ${functions.0.args.amount} | https://etherscan.io/tx/${transaction.hash}" 35 | } 36 | } 37 | }, 38 | "example_trigger_discord": { 39 | "name": "Example Trigger Discord Notification", 40 | "trigger_type": "discord", 41 | "config": { 42 | "discord_url": { 43 | "type": "plain", 44 | "value": "https://discord.com/api/webhooks/123/123" 45 | }, 46 | "message": { 47 | "title": "example_trigger_discord triggered", 48 | "body": "${monitor.name} triggered because someone called the ${functions.0.signature} function with value ${functions.0.args.amount} | https://etherscan.io/tx/${transaction.hash}" 49 | } 50 | } 51 | }, 52 | "example_trigger_telegram": { 53 | "name": "Example Trigger Telegram Notification", 54 | "trigger_type": "telegram", 55 | "config": { 56 | "token": { 57 | "type": "plain", 58 | "value": "123" 59 | }, 60 | "chat_id": "123", 61 | "disable_web_preview": true, 62 | "message": { 63 | "title": "example_trigger_telegram triggered", 64 | "body": "${monitor.name} triggered because someone called the ${functions.0.signature} function with value ${functions.0.args.value} | https://etherscan.io/tx/${transaction.hash}" 65 | } 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /tests/integration/fixtures/filters/evm_filter_block_number.js: -------------------------------------------------------------------------------- 1 | try { 2 | let inputData = ''; 3 | // Read from stdin 4 | process.stdin.on('data', chunk => { 5 | inputData += chunk; 6 | }); 7 | 8 | process.stdin.on('end', () => { 9 | const data = JSON.parse(inputData); 10 | const monitorMatch = data.monitor_match; 11 | const args = data.args; 12 | 13 | // Extract block_number 14 | let blockNumber = null; 15 | if (monitorMatch.EVM) { 16 | const hexBlock = monitorMatch.EVM.transaction?.blockNumber; 17 | if (hexBlock) { 18 | // Convert hex string to integer 19 | blockNumber = parseInt(hexBlock, 16); 20 | console.log(`BLOCK NUMBER INTEGER ==>: ${blockNumber}`); 21 | } 22 | } 23 | 24 | if (blockNumber === null) { 25 | console.log("Block number is None"); 26 | console.log('false'); 27 | return; 28 | } 29 | 30 | const result = blockNumber % 2 === 0; 31 | console.log(`Block number ${blockNumber} is ${result ? 'even' : 'odd'}`); 32 | console.log(result.toString()); 33 | }); 34 | } catch (e) { 35 | console.log(`Error processing input: ${e}`); 36 | console.log('false'); 37 | } 38 | -------------------------------------------------------------------------------- /tests/integration/fixtures/filters/evm_filter_block_number.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import json 4 | import logging 5 | 6 | def main(): 7 | try: 8 | # Read input from stdin 9 | input_data = sys.stdin.read() 10 | if not input_data: 11 | print("No input JSON provided", flush=True) 12 | return False 13 | 14 | # Parse input JSON 15 | try: 16 | data = json.loads(input_data) 17 | monitor_match = data['monitor_match'] 18 | args = data['args'] 19 | except json.JSONDecodeError as e: 20 | print(f"Invalid JSON input: {e}", flush=True) 21 | return False 22 | 23 | # Extract block_number 24 | block_number = None 25 | if "EVM" in monitor_match: 26 | hex_block = monitor_match['EVM']['transaction'].get('blockNumber') 27 | if hex_block: 28 | # Convert hex string to integer 29 | block_number = int(hex_block, 16) 30 | 31 | if block_number is None: 32 | print("Block number is None") 33 | return False 34 | 35 | result = block_number % 2 == 0 36 | print(f"Block number {block_number} is {'even' if result else 'odd'}", flush=True) 37 | logging.info(f"Block number {block_number} is {'even' if result else 'odd'}") 38 | return result 39 | 40 | except Exception as e: 41 | print(f"Error processing input: {e}", flush=True) 42 | return False 43 | 44 | if __name__ == "__main__": 45 | result = main() 46 | # Print the final boolean result 47 | print(str(result).lower(), flush=True) 48 | -------------------------------------------------------------------------------- /tests/integration/fixtures/filters/evm_filter_block_number.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Enable error handling 4 | set -e 5 | 6 | main() { 7 | verbose=false 8 | 9 | # Read JSON input from stdin 10 | input_json=$(cat) 11 | 12 | # Parse arguments from the input JSON 13 | args=$(echo "$input_json" | jq -r '.args // empty') 14 | if [ ! -z "$args" ]; then 15 | if [[ $args == *"--verbose"* ]]; then 16 | verbose=true 17 | echo "Verbose mode enabled" 18 | fi 19 | fi 20 | 21 | # Extract the monitor match data from the input 22 | monitor_data=$(echo "$input_json" | jq -r '.monitor_match') 23 | 24 | # Validate input 25 | if [ -z "$monitor_data" ]; then 26 | echo "No input JSON provided" 27 | echo "false" 28 | exit 1 29 | fi 30 | 31 | if [ "$verbose" = true ]; then 32 | echo "Input JSON received:" 33 | fi 34 | 35 | # Extract blockNumber from the EVM receipt or transaction 36 | block_number_hex=$(echo "$monitor_data" | jq -r '.EVM.transaction.blockNumber' || echo "") 37 | 38 | # Validate that block_number_hex is not empty 39 | if [ -z "$block_number_hex" ]; then 40 | echo "Invalid JSON or missing blockNumber" 41 | echo "false" 42 | exit 1 43 | fi 44 | 45 | # Remove 0x prefix if present and clean the string 46 | block_number_hex=$(echo "$block_number_hex" | tr -d '\n' | tr -d ' ') 47 | block_number_hex=${block_number_hex#0x} 48 | 49 | if [ "$verbose" = true ]; then 50 | echo "Extracted block number (hex): $block_number_hex" 51 | fi 52 | 53 | # Convert hex to decimal with error checking 54 | if ! block_number=$(printf "%d" $((16#${block_number_hex})) 2>/dev/null); then 55 | echo "Failed to convert hex to decimal" 56 | echo "false" 57 | exit 1 58 | fi 59 | 60 | if [ "$verbose" = true ]; then 61 | echo "Converted block number (decimal): $block_number" 62 | fi 63 | 64 | # Check if even or odd using modulo 65 | is_even=$((block_number % 2)) 66 | 67 | if [ $is_even -eq 0 ]; then 68 | echo "Block number $block_number is even" 69 | echo "Verbose mode: $verbose" 70 | echo "true" 71 | exit 0 72 | else 73 | echo "Block number $block_number is odd" 74 | echo "Verbose mode: $verbose" 75 | echo "false" 76 | exit 0 77 | fi 78 | } 79 | 80 | # Call main function 81 | main 82 | -------------------------------------------------------------------------------- /tests/integration/fixtures/filters/evm_filter_by_arguments.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import json 4 | import logging 5 | 6 | def main(): 7 | try: 8 | # Read input from stdin 9 | input_data = sys.stdin.read() 10 | if not input_data: 11 | print("No input JSON provided", flush=True) 12 | return False 13 | 14 | # Parse input JSON 15 | try: 16 | data = json.loads(input_data) 17 | args = data['args'] 18 | except json.JSONDecodeError as e: 19 | print(f"Invalid JSON input: {e}", flush=True) 20 | return False 21 | 22 | # Check if --verbose is in args 23 | result = '--verbose' in args 24 | print(f"Verbose mode is {'enabled' if result else 'disabled'}", flush=True) 25 | logging.info(f"Verbose mode is {'enabled' if result else 'disabled'}") 26 | return result 27 | 28 | except Exception as e: 29 | print(f"Error processing input: {e}", flush=True) 30 | return False 31 | 32 | if __name__ == "__main__": 33 | result = main() 34 | # Print the final boolean result 35 | print(str(result).lower(), flush=True) 36 | -------------------------------------------------------------------------------- /tests/integration/fixtures/filters/stellar_filter_block_number.js: -------------------------------------------------------------------------------- 1 | try { 2 | // Read from stdin 3 | let inputData = ''; 4 | process.stdin.on('data', chunk => { 5 | inputData += chunk; 6 | }); 7 | 8 | process.stdin.on('end', () => { 9 | const data = JSON.parse(inputData); 10 | const monitorMatch = data.monitor_match; 11 | const args = data.args; 12 | 13 | // Extract ledger sequence number 14 | let ledgerNumber = null; 15 | if (monitorMatch.Stellar) { 16 | ledgerNumber = monitorMatch.Stellar.ledger.sequence; 17 | } 18 | 19 | if (ledgerNumber === null) { 20 | console.log("Ledger number is None"); 21 | console.log('false'); 22 | return; 23 | } 24 | 25 | const result = ledgerNumber % 2 === 0; 26 | console.log(`Ledger number ${ledgerNumber} is ${result ? 'even' : 'odd'}`); 27 | console.log(result.toString()); 28 | }); 29 | 30 | } catch (e) { 31 | console.log(`Error processing input: ${e}`); 32 | console.log('false'); 33 | } 34 | -------------------------------------------------------------------------------- /tests/integration/fixtures/filters/stellar_filter_block_number.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import json 4 | import logging 5 | 6 | def main(): 7 | try: 8 | # Read input from stdin 9 | input_data = sys.stdin.read() 10 | if not input_data: 11 | print("No input JSON provided", flush=True) 12 | return False 13 | 14 | # Parse input JSON 15 | try: 16 | data = json.loads(input_data) 17 | monitor_match = data['monitor_match'] 18 | args = data['args'] 19 | except json.JSONDecodeError: 20 | print("Invalid JSON input", flush=True) 21 | return False 22 | 23 | # Extract ledger_number 24 | ledger_number = None 25 | if "Stellar" in monitor_match: 26 | ledger = monitor_match['Stellar']['ledger'].get('sequence') 27 | if ledger: 28 | ledger_number = int(ledger) 29 | 30 | if ledger_number is None: 31 | print("Ledger number is None", flush=True) 32 | return False 33 | 34 | # Return True for even ledger numbers, False for odd 35 | result = ledger_number % 2 == 0 36 | print(f"Ledger number {ledger_number} is {'even' if result else 'odd'}", flush=True) 37 | return result 38 | 39 | except Exception as e: 40 | print(f"Error processing input: {e}", flush=True) 41 | return False 42 | 43 | if __name__ == "__main__": 44 | result = main() 45 | # Only print the final boolean result 46 | print(str(result).lower(), flush=True) 47 | -------------------------------------------------------------------------------- /tests/integration/fixtures/filters/stellar_filter_block_number.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Enable error handling 4 | set -e 5 | 6 | main() { 7 | verbose=false 8 | 9 | # Read JSON input from stdin 10 | input_json=$(cat) 11 | 12 | # Parse arguments from the input JSON 13 | args=$(echo "$input_json" | jq -r '.args // empty') 14 | if [ ! -z "$args" ]; then 15 | if [[ $args == *"--verbose"* ]]; then 16 | verbose=true 17 | echo "Verbose mode enabled" 18 | fi 19 | fi 20 | 21 | # Validate input 22 | if [ -z "$input_json" ]; then 23 | echo "No input JSON provided" 24 | echo "false" 25 | exit 1 26 | fi 27 | 28 | if [ "$verbose" = true ]; then 29 | echo "Input JSON received:" 30 | fi 31 | 32 | # Extract ledger number from the nested monitor_match.Stellar structure 33 | ledger_number=$(echo "$input_json" | jq -r '.monitor_match.Stellar.ledger.sequence // empty') 34 | 35 | # Validate ledger number 36 | if [ -z "$ledger_number" ]; then 37 | echo "Invalid JSON or missing sequence number" 38 | echo "false" 39 | exit 1 40 | fi 41 | 42 | # Remove any whitespace 43 | ledger_number=$(echo "$ledger_number" | tr -d '\n' | tr -d ' ') 44 | 45 | # Check if even or odd using modulo 46 | is_even=$((ledger_number % 2)) 47 | 48 | if [ $is_even -eq 0 ]; then 49 | echo "Ledger number $ledger_number is even" 50 | echo "Verbose mode: $verbose" 51 | echo "true" 52 | exit 0 53 | else 54 | echo "Ledger number $ledger_number is odd" 55 | echo "Verbose mode: $verbose" 56 | echo "false" 57 | exit 0 58 | fi 59 | } 60 | 61 | # Call main function without arguments, input will be read from stdin 62 | main 63 | -------------------------------------------------------------------------------- /tests/integration/fixtures/stellar/contract_spec.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "function_v0": { 4 | "doc": "", 5 | "name": "transfer", 6 | "inputs": [ 7 | { 8 | "doc": "", 9 | "name": "from", 10 | "type_": "address" 11 | }, 12 | { 13 | "doc": "", 14 | "name": "to", 15 | "type_": "address" 16 | }, 17 | { 18 | "doc": "", 19 | "name": "amount", 20 | "type_": "i128" 21 | } 22 | ], 23 | "outputs": [] 24 | } 25 | }, 26 | { 27 | "function_v0": { 28 | "doc": "", 29 | "name": "upsert_data", 30 | "inputs": [ 31 | { 32 | "doc": "", 33 | "name": "data", 34 | "type_": { 35 | "map": { 36 | "key_type": "string", 37 | "value_type": "string" 38 | } 39 | } 40 | } 41 | ], 42 | "outputs": [] 43 | } 44 | }, 45 | { 46 | "function_v0": { 47 | "doc": "", 48 | "name": "increment", 49 | "inputs": [], 50 | "outputs": [] 51 | } 52 | } 53 | ] 54 | -------------------------------------------------------------------------------- /tests/integration/fixtures/stellar/events.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "contract", 4 | "ledger": 317598, 5 | "ledgerClosedAt": "2024-12-29T02:50:10Z", 6 | "contractId": "CC5WP4L2CXUBZXZY3ZHK2XURV4H7VS6GKYF7K7WIHQSMEUDJYQ2E5TLK", 7 | "id": "0001364073023291392-0000000001", 8 | "pagingToken": "0001364073023291392-0000000001", 9 | "inSuccessfulContractCall": true, 10 | "txHash": "5a7bf196f1db3ab56089de59985bbf5a6c3e0e6a4672cd91e01680b0fff260d8", 11 | "topic": [ 12 | "AAAADwAAAA9jb250cmFjdF9jYWxsZWQA", 13 | "AAAAEgAAAAAAAAAACMEAtVPau/0s+2y4o3aWt1MAtjmdqWNzPmy6MRVcdfo=", 14 | "AAAADgAAAAlnYW5hY2hlLTAAAAA=", 15 | "AAAADgAAACoweDY4QjkzMDQ1ZmU3RDg3OTRhN2NBRjMyN2U3Zjg1NUNENkNkMDNCQjgAAA==", 16 | "AAAADQAAACAaemkIzyqB6sH3VVev7iSjYHderf04InYUVZQLYhCsdg==" 17 | ], 18 | "value": "AAAADQAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiY2FsbCBmcm9tIHN0ZWxsYXIgYXQgMTczNTQ0MDYwNjk3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" 19 | }, 20 | { 21 | "type": "contract", 22 | "ledger": 317598, 23 | "ledgerClosedAt": "2024-12-29T02:50:10Z", 24 | "contractId": "CBIELTK6YBZJU5UP2WWQEUCYKLPU6AUNZ2BQ4WWFEIE3USCIHMXQDAMA", 25 | "id": "0001364073023295488-0000000001", 26 | "pagingToken": "0001364073023295488-0000000001", 27 | "inSuccessfulContractCall": true, 28 | "txHash": "2c89fc3311bc275415ed6a764c77d7b0349cb9f4ce37fd2bbfc6604920811503", 29 | "topic": [ 30 | "AAAADwAAAAh0cmFuc2Zlcg==", 31 | "AAAAEgAAAAAAAAAAy70KCGxcPZNLYI2aDqy07iSWPZBxWKf2ABAJbf8Yq2w=", 32 | "AAAAEgAAAAG/hhWYyzRyedtM+ibMoT1uLM6/ETE++QYJ48uVszTb7Q==", 33 | "AAAADgAAAD1VU0RDOkdCQkQ0N0lGNkxXSzdQN01ERVZTQ1dSN0RQVVdWM05ZM0RUUUVWRkw0TkFUNEFRSDNaTExGTEE1AAAA" 34 | ], 35 | "value": "AAAACgAAAAAAAAAAAAAAAAAACMA=" 36 | } 37 | ] 38 | -------------------------------------------------------------------------------- /tests/integration/fixtures/stellar/monitors/monitor.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Large Transfer of USDC Token", 3 | "paused": false, 4 | "networks": [ 5 | "stellar_testnet" 6 | ], 7 | "addresses": [ 8 | { 9 | "address": "CAVLP5DH2GJPZMVO7IJY4CVOD5MWEFTJFVPD2YY2FQXOQHRGHK4D6HLP" 10 | }, 11 | { 12 | "address": "CBIELTK6YBZJU5UP2WWQEUCYKLPU6AUNZ2BQ4WWFEIE3USCIHMXQDAMA" 13 | }, 14 | { 15 | "address": "CDLZFC3SYJYDZT7K67VZ75HPJVIEUVNIXF47ZG2FB2RMQQVU2HHGCYSC" 16 | }, 17 | { 18 | "address": "CB7ZA2CJ4EECSV62PJSSV6DVYK25IRGRKWIB5354TGANJGXRUFK4LNO3" 19 | }, 20 | { 21 | "address": "CBWRWC2IFNRXKAW2HG5473V5U25OMUKVIE3BFZBIWOOD3VLEIBUIOQG6" 22 | }, 23 | { 24 | "address": "CDMZ6LU66KEMLKI3EJBIGXTZ4KZ2CRTSHZETMY3QQZBWRKVKB5EIOHTX" 25 | } 26 | ], 27 | "match_conditions": { 28 | "functions": [ 29 | { 30 | "signature": "transfer(Address,Address,I128)", 31 | "expression": "amount > 1000" 32 | }, 33 | { 34 | "signature": "upsert_data(Map)", 35 | "expression": "data.myKey1 >= 1234 AND data.myKey2 == 'Hello, world!'" 36 | } 37 | ], 38 | "events": [ 39 | { 40 | "signature": "transfer(Address,Address,String,I128)", 41 | "expression": "0 == GDF32CQINROD3E2LMCGZUDVMWTXCJFR5SBYVRJ7WAAIAS3P7DCVWZEFY AND 3 >= 2240" 42 | } 43 | ], 44 | "transactions": [ 45 | { 46 | "status": "Success", 47 | "expression": "value >= 0" 48 | } 49 | ] 50 | }, 51 | "trigger_conditions": [], 52 | "triggers": [ 53 | "example_trigger_slack" 54 | ] 55 | } 56 | -------------------------------------------------------------------------------- /tests/integration/fixtures/stellar/networks/network.json: -------------------------------------------------------------------------------- 1 | { 2 | "network_type": "Stellar", 3 | "slug": "stellar_testnet", 4 | "name": "Stellar Testnet", 5 | "rpc_urls": [ 6 | { 7 | "type_": "rpc", 8 | "url": { 9 | "type": "plain", 10 | "value": "https://soroban-testnet.stellar.org" 11 | }, 12 | "weight": 100 13 | } 14 | ], 15 | "network_passphrase": "Test SDF Network ; September 2015", 16 | "block_time_ms": 5000, 17 | "confirmation_blocks": 2, 18 | "cron_schedule": "0 */1 * * * *", 19 | "max_past_blocks": 50, 20 | "store_blocks": true 21 | } 22 | -------------------------------------------------------------------------------- /tests/integration/fixtures/stellar/triggers/trigger.json: -------------------------------------------------------------------------------- 1 | { 2 | "example_trigger_slack": { 3 | "name": "Example Trigger Slack Notification", 4 | "trigger_type": "slack", 5 | "config": { 6 | "slack_url": { 7 | "type": "plain", 8 | "value": "https://hooks.slack.com/services/AAA/BBB/CCC" 9 | }, 10 | "message": { 11 | "title": "example_trigger_slack triggered", 12 | "body": "${monitor.name} triggered because of a large transfer of ${functions.0.args.amount} USDC to ${functions.0.args.to} | https://stellar.expert/explorer/testnet/tx/${transaction.hash}" 13 | } 14 | } 15 | }, 16 | "example_trigger_webhook": { 17 | "name": "Example Trigger Webhook Notification", 18 | "trigger_type": "webhook", 19 | "config": { 20 | "url": { 21 | "type": "plain", 22 | "value": "https://webhook.site/123" 23 | }, 24 | "method": "POST", 25 | "secret": { 26 | "type": "plain", 27 | "value": "some-secret" 28 | }, 29 | "headers": { 30 | "Content-Type": "application/json" 31 | }, 32 | "message": { 33 | "title": "example_trigger_webhook triggered", 34 | "body": "${monitor.name} triggered because someone called the ${functions.0.signature} function with value ${functions.0.args.amount} | https://stellar.expert/explorer/testnet/tx/${transaction.hash}" 35 | } 36 | } 37 | }, 38 | "example_trigger_discord": { 39 | "name": "Example Trigger Discord Notification", 40 | "trigger_type": "discord", 41 | "config": { 42 | "discord_url": { 43 | "type": "plain", 44 | "value": "https://discord.com/api/webhooks/123/123" 45 | }, 46 | "message": { 47 | "title": "example_trigger_discord triggered", 48 | "body": "${monitor.name} triggered because someone called the ${functions.0.signature} function with value ${functions.0.args.amount} | https://stellar.expert/explorer/testnet/tx/${transaction.hash}" 49 | } 50 | } 51 | }, 52 | "example_trigger_telegram": { 53 | "name": "Example Trigger Telegram Notification", 54 | "trigger_type": "telegram", 55 | "config": { 56 | "token": { 57 | "type": "plain", 58 | "value": "123" 59 | }, 60 | "chat_id": "123", 61 | "disable_web_preview": true, 62 | "message": { 63 | "title": "example_trigger_telegram triggered", 64 | "body": "${monitor.name} triggered because someone called the ${functions.0.signature} function with value ${functions.0.args.amount} | https://stellar.expert/explorer/testnet/tx/${transaction.hash}" 65 | } 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /tests/integration/mocks/mod.rs: -------------------------------------------------------------------------------- 1 | //! Mock implementations for testing purposes. 2 | //! 3 | //! This module contains mock implementations of various traits used throughout 4 | //! the application, primarily for testing. It includes mocks for: 5 | //! - Blockchain clients (EVM and Stellar) 6 | //! - Repository interfaces 7 | //! 8 | //! The mocks are implemented using the `mockall` crate. 9 | 10 | mod clients; 11 | mod logging; 12 | mod models; 13 | mod repositories; 14 | mod services; 15 | mod transports; 16 | #[allow(unused_imports)] 17 | pub use clients::*; 18 | #[allow(unused_imports)] 19 | pub use logging::*; 20 | #[allow(unused_imports)] 21 | pub use models::*; 22 | #[allow(unused_imports)] 23 | pub use repositories::*; 24 | #[allow(unused_imports)] 25 | pub use services::*; 26 | #[allow(unused_imports)] 27 | pub use transports::*; 28 | -------------------------------------------------------------------------------- /tests/integration/mocks/services.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use mockall::mock; 3 | use std::collections::HashMap; 4 | 5 | use openzeppelin_monitor::{ 6 | models::{BlockType, Monitor, MonitorMatch, Network, ScriptLanguage}, 7 | repositories::{TriggerRepositoryTrait, TriggerService}, 8 | services::{ 9 | blockchain::BlockFilterFactory, 10 | blockwatcher::{BlockStorage, BlockTrackerTrait, JobSchedulerTrait}, 11 | filter::FilterError, 12 | notification::NotificationService, 13 | trigger::{TriggerError, TriggerExecutionServiceTrait}, 14 | }, 15 | }; 16 | 17 | mock! { 18 | pub TriggerExecutionService { 19 | pub fn new(trigger_service: TriggerService, notification_service: NotificationService) -> Self; 20 | } 21 | 22 | #[async_trait] 23 | impl TriggerExecutionServiceTrait for TriggerExecutionService { 24 | async fn execute( 25 | &self, 26 | trigger_slugs: &[String], 27 | variables: HashMap, 28 | monitor_match: &MonitorMatch, 29 | trigger_scripts: &HashMap, 30 | ) -> Result<(), TriggerError>; 31 | async fn load_scripts(&self, monitors: &[Monitor]) -> Result, TriggerError>; 32 | } 33 | } 34 | 35 | mock! { 36 | pub FilterService { 37 | pub fn new() -> Self; 38 | 39 | pub async fn filter_block + Send + Sync + 'static>( 40 | &self, 41 | client: &T, 42 | network: &Network, 43 | block: &BlockType, 44 | monitors: &[Monitor], 45 | ) -> Result, FilterError>; 46 | } 47 | } 48 | 49 | mock! { 50 | pub BlockStorage {} 51 | #[async_trait] 52 | impl BlockStorage for BlockStorage { 53 | async fn save_missed_block(&self, network_slug: &str, block_number: u64) -> Result<(), anyhow::Error>; 54 | async fn save_last_processed_block(&self, network_slug: &str, block_number: u64) -> Result<(), anyhow::Error>; 55 | async fn get_last_processed_block(&self, network_slug: &str) -> Result, anyhow::Error>; 56 | async fn save_blocks(&self, network_slug: &str, blocks: &[BlockType]) -> Result<(), anyhow::Error>; 57 | async fn delete_blocks(&self, network_slug: &str) -> Result<(), anyhow::Error>; 58 | } 59 | 60 | impl Clone for BlockStorage { 61 | fn clone(&self) -> Self { 62 | self.clone() 63 | } 64 | } 65 | } 66 | 67 | mock! { 68 | pub BlockTracker {} 69 | 70 | #[async_trait] 71 | impl BlockTrackerTrait for BlockTracker { 72 | fn new(history_size: usize, storage: Option >) -> Self; 73 | async fn record_block(&self, network: &Network, block_number: u64) -> Result<(), anyhow::Error>; 74 | async fn get_last_block(&self, network_slug: &str) -> Option; 75 | } 76 | } 77 | 78 | mock! { 79 | pub JobScheduler {} 80 | 81 | #[async_trait] 82 | impl JobSchedulerTrait for JobScheduler { 83 | async fn new() -> Result> { 84 | Ok(Self::default()) 85 | } 86 | 87 | async fn add(&self, _job: tokio_cron_scheduler::Job) -> Result<(), Box> { 88 | Ok(()) 89 | } 90 | 91 | async fn start(&self) -> Result<(), Box> { 92 | Ok(()) 93 | } 94 | 95 | async fn shutdown(&mut self) -> Result<(), Box> { 96 | Ok(()) 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /tests/integration/mocks/transports.rs: -------------------------------------------------------------------------------- 1 | use mockall::mock; 2 | use reqwest_middleware::ClientWithMiddleware; 3 | use reqwest_retry::policies::ExponentialBackoff; 4 | use serde_json::Value; 5 | 6 | use openzeppelin_monitor::services::blockchain::{ 7 | BlockchainTransport, RotatingTransport, TransientErrorRetryStrategy, TransportError, 8 | }; 9 | 10 | // Mock implementation of a EVM transport client. 11 | // Used for testing Ethereum compatible blockchain interactions. 12 | // Provides functionality to simulate raw JSON-RPC request handling. 13 | mock! { 14 | pub EVMTransportClient { 15 | pub async fn send_raw_request(&self, method: &str, params: Option>) -> Result; 16 | pub async fn get_current_url(&self) -> String; 17 | } 18 | 19 | impl Clone for EVMTransportClient { 20 | fn clone(&self) -> Self; 21 | } 22 | } 23 | 24 | #[async_trait::async_trait] 25 | impl BlockchainTransport for MockEVMTransportClient { 26 | async fn get_current_url(&self) -> String { 27 | self.get_current_url().await 28 | } 29 | 30 | async fn send_raw_request

( 31 | &self, 32 | method: &str, 33 | params: Option

, 34 | ) -> Result 35 | where 36 | P: Into + Send + Clone, 37 | { 38 | let params_value = params.map(|p| p.into()); 39 | self.send_raw_request(method, params_value.and_then(|v| v.as_array().cloned())) 40 | .await 41 | } 42 | 43 | fn set_retry_policy( 44 | &mut self, 45 | _: ExponentialBackoff, 46 | _: Option, 47 | ) -> Result<(), anyhow::Error> { 48 | Ok(()) 49 | } 50 | 51 | fn update_endpoint_manager_client( 52 | &mut self, 53 | _: ClientWithMiddleware, 54 | ) -> Result<(), anyhow::Error> { 55 | Ok(()) 56 | } 57 | } 58 | 59 | #[async_trait::async_trait] 60 | impl RotatingTransport for MockEVMTransportClient { 61 | async fn try_connect(&self, _url: &str) -> Result<(), anyhow::Error> { 62 | Ok(()) 63 | } 64 | 65 | async fn update_client(&self, _url: &str) -> Result<(), anyhow::Error> { 66 | Ok(()) 67 | } 68 | } 69 | 70 | // Mock implementation of a Stellar transport client. 71 | // Used for testing Stellar blockchain interactions. 72 | // Provides functionality to simulate raw JSON-RPC request handling. 73 | mock! { 74 | pub StellarTransportClient { 75 | pub async fn send_raw_request(&self, method: &str, params: Option) -> Result; 76 | pub async fn get_current_url(&self) -> String; 77 | } 78 | 79 | impl Clone for StellarTransportClient { 80 | fn clone(&self) -> Self; 81 | } 82 | } 83 | 84 | #[async_trait::async_trait] 85 | impl BlockchainTransport for MockStellarTransportClient { 86 | async fn get_current_url(&self) -> String { 87 | self.get_current_url().await 88 | } 89 | 90 | async fn send_raw_request

( 91 | &self, 92 | method: &str, 93 | params: Option

, 94 | ) -> Result 95 | where 96 | P: Into + Send + Clone, 97 | { 98 | self.send_raw_request(method, params.map(|p| p.into())) 99 | .await 100 | } 101 | 102 | fn set_retry_policy( 103 | &mut self, 104 | _: ExponentialBackoff, 105 | _: Option, 106 | ) -> Result<(), anyhow::Error> { 107 | Ok(()) 108 | } 109 | 110 | fn update_endpoint_manager_client( 111 | &mut self, 112 | _: ClientWithMiddleware, 113 | ) -> Result<(), anyhow::Error> { 114 | Ok(()) 115 | } 116 | } 117 | 118 | #[async_trait::async_trait] 119 | impl RotatingTransport for MockStellarTransportClient { 120 | async fn try_connect(&self, _url: &str) -> Result<(), anyhow::Error> { 121 | Ok(()) 122 | } 123 | 124 | async fn update_client(&self, _url: &str) -> Result<(), anyhow::Error> { 125 | Ok(()) 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /tests/properties.rs: -------------------------------------------------------------------------------- 1 | //! PBT tests for the OpenZeppelin Monitor. 2 | //! 3 | //! Contains tests for blockchain monitoring functionality across different 4 | //! chains (EVM and Stellar) and mock implementations for testing. 5 | 6 | mod properties { 7 | mod filters { 8 | mod evm { 9 | mod filter; 10 | } 11 | mod stellar { 12 | mod filter; 13 | } 14 | } 15 | mod notifications { 16 | mod discord; 17 | mod email; 18 | mod slack; 19 | mod telegram; 20 | mod webhook; 21 | } 22 | mod repositories { 23 | mod monitor; 24 | mod network; 25 | mod trigger; 26 | } 27 | mod triggers { 28 | mod script; 29 | } 30 | mod utils { 31 | mod logging; 32 | } 33 | mod strategies; 34 | } 35 | -------------------------------------------------------------------------------- /tests/properties/notifications/discord.rs: -------------------------------------------------------------------------------- 1 | //! Property-based tests for Discord notifications. 2 | //! 3 | //! These tests verify the behavior of the Discord notification system using property testing, 4 | //! focusing on template variable substitution, message formatting consistency, and edge cases. 5 | //! The tests ensure that the Discord notification system handles template variables correctly 6 | //! and produces consistent, well-formed output across various input combinations. 7 | 8 | use openzeppelin_monitor::services::notification::DiscordNotifier; 9 | use proptest::{prelude::*, test_runner::Config}; 10 | use std::collections::HashMap; 11 | 12 | /// Generates a strategy for creating HashMaps containing template variable key-value pairs. 13 | /// Keys are alphanumeric strings of length 1-10, values are alphanumeric strings (with spaces) of 14 | /// length 1-20. 15 | fn template_variables_strategy() -> impl Strategy> { 16 | prop::collection::hash_map("[a-zA-Z0-9_]{1,10}", "[a-zA-Z0-9 ]{1,20}", 1..5) 17 | } 18 | 19 | proptest! { 20 | #![proptest_config(Config { 21 | failure_persistence: None, 22 | ..Config::default() 23 | })] 24 | 25 | /// Tests that template formatting is idempotent - applying the same variables multiple times 26 | /// should produce identical results. 27 | /// 28 | /// # Properties tested 29 | /// - Multiple calls to format_message with the same variables should return identical results 30 | /// - Template can contain alphanumeric characters, spaces, $, {, }, and _ 31 | #[test] 32 | fn test_notification_template_idempotency( 33 | template in "[a-zA-Z0-9 ${}_]{1,100}", 34 | vars in template_variables_strategy() 35 | ) { 36 | let notifier = DiscordNotifier::new( 37 | "https://discord.com/test".to_string(), 38 | "Test".to_string(), 39 | template.clone() 40 | ).unwrap(); 41 | 42 | let first_pass = notifier.format_message(&vars); 43 | let second_pass = notifier.format_message(&vars); 44 | 45 | prop_assert_eq!(first_pass, second_pass); 46 | } 47 | 48 | /// Tests that variable substitution handles variable boundaries correctly and doesn't result 49 | /// in partial or malformed substitutions. 50 | /// 51 | /// # Properties tested 52 | /// - Templates containing ${variable} patterns are processed correctly 53 | /// - No partial substitution artifacts (${, }) remain in the output 54 | #[test] 55 | fn test_notification_variable_boundaries( 56 | template in "[a-zA-Z0-9 ]{0,50}\\$\\{[a-z_]+\\}[a-zA-Z0-9 ]{0,50}", 57 | vars in template_variables_strategy() 58 | ) { 59 | let notifier = DiscordNotifier::new( 60 | "https://discord.com/test".to_string(), 61 | "Test".to_string(), 62 | template.clone() 63 | ).unwrap(); 64 | 65 | let formatted = notifier.format_message(&vars); 66 | 67 | // Verify no partial variable substitutions occurred 68 | prop_assert!(!formatted.contains("${{")); 69 | prop_assert!(!formatted.contains("}}")); 70 | } 71 | 72 | /// Tests that templates with no matching variables remain unchanged. 73 | /// 74 | /// # Properties tested 75 | /// - Template remains identical when processed with an empty variables map 76 | /// - The formatted message follows the expected Discord format: "*Title*\n\ntemplate" 77 | #[test] 78 | fn test_notification_empty_variables( 79 | template in "[a-zA-Z0-9 ${}_]{1,100}" 80 | ) { 81 | let notifier = DiscordNotifier::new( 82 | "https://discord.com/test".to_string(), 83 | "Test".to_string(), 84 | template.clone() 85 | ).unwrap(); 86 | 87 | let empty_vars = HashMap::new(); 88 | let formatted = notifier.format_message(&empty_vars); 89 | 90 | // Template should remain unchanged when no variables are provided 91 | prop_assert_eq!(formatted, format!("*Test*\n\n{}", template)); 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /tests/properties/notifications/slack.rs: -------------------------------------------------------------------------------- 1 | //! Property-based tests for Slack notifications. 2 | //! 3 | //! These tests verify the behavior of the Slack notification system using property testing, 4 | //! focusing on template variable substitution, message formatting consistency, and edge cases. 5 | //! The tests ensure that the Slack notification system handles template variables correctly 6 | //! and produces consistent, well-formed output across various input combinations. 7 | 8 | use openzeppelin_monitor::services::notification::SlackNotifier; 9 | use proptest::{prelude::*, test_runner::Config}; 10 | use std::collections::HashMap; 11 | 12 | /// Generates a strategy for creating HashMaps containing template variable key-value pairs. 13 | /// Keys are alphanumeric strings of length 1-10, values are alphanumeric strings (with spaces) of 14 | /// length 1-20. 15 | fn template_variables_strategy() -> impl Strategy> { 16 | prop::collection::hash_map("[a-zA-Z0-9_]{1,10}", "[a-zA-Z0-9 ]{1,20}", 1..5) 17 | } 18 | 19 | proptest! { 20 | #![proptest_config(Config { 21 | failure_persistence: None, 22 | ..Config::default() 23 | })] 24 | 25 | /// Tests that template formatting is idempotent - applying the same variables multiple times 26 | /// should produce identical results. 27 | /// 28 | /// # Properties tested 29 | /// - Multiple calls to format_message with the same variables should return identical results 30 | /// - Template can contain alphanumeric characters, spaces, $, {, }, and _ 31 | #[test] 32 | fn test_notification_template_idempotency( 33 | template in "[a-zA-Z0-9 ${}_]{1,100}", 34 | vars in template_variables_strategy() 35 | ) { 36 | let notifier = SlackNotifier::new( 37 | "https://hooks.slack.com/test".to_string(), 38 | "Test".to_string(), 39 | template.clone() 40 | ).unwrap(); 41 | 42 | let first_pass = notifier.format_message(&vars); 43 | let second_pass = notifier.format_message(&vars); 44 | 45 | prop_assert_eq!(first_pass, second_pass); 46 | } 47 | 48 | /// Tests that variable substitution handles variable boundaries correctly and doesn't result 49 | /// in partial or malformed substitutions. 50 | /// 51 | /// # Properties tested 52 | /// - Templates containing ${variable} patterns are processed correctly 53 | /// - No partial substitution artifacts (${, }) remain in the output 54 | #[test] 55 | fn test_notification_variable_boundaries( 56 | template in "[a-zA-Z0-9 ]{0,50}\\$\\{[a-z_]+\\}[a-zA-Z0-9 ]{0,50}", 57 | vars in template_variables_strategy() 58 | ) { 59 | let notifier = SlackNotifier::new( 60 | "https://hooks.slack.com/test".to_string(), 61 | "Test".to_string(), 62 | template.clone() 63 | ).unwrap(); 64 | 65 | let formatted = notifier.format_message(&vars); 66 | 67 | // Verify no partial variable substitutions occurred 68 | prop_assert!(!formatted.contains("${{")); 69 | prop_assert!(!formatted.contains("}}")); 70 | } 71 | 72 | /// Tests that templates with no matching variables remain unchanged. 73 | /// 74 | /// # Properties tested 75 | /// - Template remains identical when processed with an empty variables map 76 | /// - The formatted message follows the expected Slack format: "*Title*\n\ntemplate" 77 | #[test] 78 | fn test_notification_empty_variables( 79 | template in "[a-zA-Z0-9 ${}_]{1,100}" 80 | ) { 81 | let notifier = SlackNotifier::new( 82 | "https://hooks.slack.com/test".to_string(), 83 | "Test".to_string(), 84 | template.clone() 85 | ).unwrap(); 86 | 87 | let empty_vars = HashMap::new(); 88 | let formatted = notifier.format_message(&empty_vars); 89 | 90 | // Template should remain unchanged when no variables are provided 91 | prop_assert_eq!(formatted, format!("*Test*\n\n{}", template)); 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /tests/properties/notifications/telegram.rs: -------------------------------------------------------------------------------- 1 | //! Property-based tests for Telegram notifications. 2 | //! 3 | //! These tests verify the behavior of the Telegram notification system using property testing, 4 | //! focusing on template variable substitution, message formatting consistency, and edge cases. 5 | //! The tests ensure that the Telegram notification system handles template variables correctly 6 | //! and produces consistent, well-formed output across various input combinations. 7 | 8 | use openzeppelin_monitor::services::notification::TelegramNotifier; 9 | use proptest::{prelude::*, test_runner::Config}; 10 | use std::collections::HashMap; 11 | 12 | /// Generates a strategy for creating HashMaps containing template variable key-value pairs. 13 | /// Keys are alphanumeric strings of length 1-10, values are alphanumeric strings (with spaces) of 14 | /// length 1-20. 15 | fn template_variables_strategy() -> impl Strategy> { 16 | prop::collection::hash_map("[a-zA-Z0-9_]{1,10}", "[a-zA-Z0-9 ]{1,20}", 1..5) 17 | } 18 | 19 | proptest! { 20 | #![proptest_config(Config { 21 | failure_persistence: None, 22 | ..Config::default() 23 | })] 24 | 25 | /// Tests that template formatting is idempotent - applying the same variables multiple times 26 | /// should produce identical results. 27 | /// 28 | /// # Properties tested 29 | /// - Multiple calls to format_message with the same variables should return identical results 30 | /// - Template can contain alphanumeric characters, spaces, $, {, }, and _ 31 | #[test] 32 | fn test_notification_template_idempotency( 33 | template in "[a-zA-Z0-9 ${}_]{1,100}", 34 | vars in template_variables_strategy() 35 | ) { 36 | let notifier = TelegramNotifier::new( 37 | Some("https://telegram.com/test".to_string()), 38 | "".to_string(), 39 | "".to_string(), 40 | None, 41 | "Test".to_string(), 42 | template.clone(), 43 | ).unwrap(); 44 | 45 | let first_pass = notifier.format_message(&vars); 46 | let second_pass = notifier.format_message(&vars); 47 | 48 | prop_assert_eq!(first_pass, second_pass); 49 | } 50 | 51 | /// Tests that variable substitution handles variable boundaries correctly and doesn't result 52 | /// in partial or malformed substitutions. 53 | /// 54 | /// # Properties tested 55 | /// - Templates containing ${variable} patterns are processed correctly 56 | /// - No partial substitution artifacts (${, }) remain in the output 57 | #[test] 58 | fn test_notification_variable_boundaries( 59 | template in "[a-zA-Z0-9 ]{0,50}\\$\\{[a-z_]+\\}[a-zA-Z0-9 ]{0,50}", 60 | vars in template_variables_strategy() 61 | ) { 62 | let notifier = TelegramNotifier::new( 63 | Some( "https://telegram.com/test".to_string()), 64 | "".to_string(), 65 | "".to_string(), 66 | None, 67 | "Test".to_string(), 68 | 69 | template.clone(), 70 | ).unwrap(); 71 | 72 | let formatted = notifier.format_message(&vars); 73 | 74 | // Verify no partial variable substitutions occurred 75 | prop_assert!(!formatted.contains("${{")); 76 | prop_assert!(!formatted.contains("}}")); 77 | } 78 | 79 | /// Tests that templates with no matching variables remain unchanged. 80 | /// 81 | /// # Properties tested 82 | /// - Template remains identical when processed with an empty variables map 83 | /// - The formatted message follows the expected Telegram format: "*Title* \n\ntemplate" 84 | #[test] 85 | fn test_notification_empty_variables( 86 | template in "[a-zA-Z0-9 ${}_]{1,100}" 87 | ) { 88 | let notifier = TelegramNotifier::new( 89 | Some("https://telegram.com/test".to_string()), 90 | "".to_string(), 91 | "".to_string(), 92 | None, 93 | "Test".to_string(), 94 | template.clone(), 95 | ).unwrap(); 96 | 97 | let empty_vars = HashMap::new(); 98 | let formatted = notifier.format_message(&empty_vars); 99 | let escaped = TelegramNotifier::escape_markdown_v2(&template); 100 | 101 | // Template should remain unchanged when no variables are provided 102 | prop_assert_eq!(formatted, format!("*Test* \n\n{}", escaped)); 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /tests/properties/repositories/network.rs: -------------------------------------------------------------------------------- 1 | use crate::properties::strategies::network_strategy; 2 | 3 | use openzeppelin_monitor::{ 4 | models::{ConfigLoader, SecretString, SecretValue}, 5 | repositories::{NetworkRepository, NetworkRepositoryTrait}, 6 | }; 7 | use proptest::{prelude::*, test_runner::Config}; 8 | 9 | const MIN_TEST_CASES: usize = 1; 10 | const MAX_TEST_CASES: usize = 10; 11 | 12 | proptest! { 13 | #![proptest_config(Config { 14 | failure_persistence: None, 15 | ..Config::default() 16 | })] 17 | 18 | // Data Consistency & Round-trip Tests 19 | #[test] 20 | fn test_roundtrip( 21 | networks in proptest::collection::hash_map( 22 | "[a-z0-9_]{1,10}", 23 | network_strategy(), 24 | MIN_TEST_CASES..MAX_TEST_CASES 25 | ) 26 | ){ 27 | // Simulate saving and reloading from a repository 28 | let repo = NetworkRepository { networks: networks.clone() }; 29 | let reloaded_networks = repo.get_all(); 30 | 31 | prop_assert_eq!(networks, reloaded_networks); // Ensure roundtrip consistency 32 | } 33 | 34 | // Query Operations Tests 35 | #[test] 36 | fn test_query_operations( 37 | networks in proptest::collection::hash_map( 38 | "[a-z0-9_]{1,10}", 39 | network_strategy(), 40 | MIN_TEST_CASES..MAX_TEST_CASES 41 | ) 42 | ) { 43 | let repo = NetworkRepository { networks: networks.clone() }; 44 | 45 | // Test get by slug 46 | for (slug, network) in &networks { 47 | let retrieved = repo.get(slug); 48 | prop_assert_eq!(Some(network), retrieved.as_ref()); 49 | } 50 | 51 | // Test get_all consistency 52 | let all_networks = repo.get_all(); 53 | prop_assert_eq!(networks, all_networks); 54 | 55 | // Test non-existent slug 56 | prop_assert_eq!(None, repo.get("non_existent_slug")); 57 | } 58 | 59 | // Empty/Null Handling Tests 60 | #[test] 61 | fn test_empty_repository( 62 | _networks in proptest::collection::hash_map( 63 | "[a-zA-Z0-9_]{1,10}", 64 | network_strategy(), 65 | MIN_TEST_CASES..MAX_TEST_CASES 66 | ) 67 | ) { 68 | let empty_repo = NetworkRepository { networks: std::collections::HashMap::new() }; 69 | // Test empty repository operations 70 | prop_assert!(empty_repo.get_all().is_empty()); 71 | prop_assert_eq!(None, empty_repo.get("any_id")); 72 | } 73 | 74 | // Configuration Validation Tests 75 | #[test] 76 | fn test_config_validation( 77 | networks in proptest::collection::vec( 78 | network_strategy(), 79 | MIN_TEST_CASES..MAX_TEST_CASES 80 | ) 81 | ) { 82 | for network in networks { 83 | // Valid network should pass validation 84 | prop_assert!(network.validate().is_ok()); 85 | 86 | // Test invalid cases 87 | let mut invalid_network = network.clone(); 88 | invalid_network.block_time_ms = 50; // Too low block time 89 | prop_assert!(invalid_network.validate().is_err()); 90 | 91 | invalid_network = network.clone(); 92 | invalid_network.confirmation_blocks = 0; // Invalid confirmation blocks 93 | prop_assert!(invalid_network.validate().is_err()); 94 | 95 | invalid_network = network.clone(); 96 | invalid_network.rpc_urls[0].url = SecretValue::Plain(SecretString::new("invalid-url".to_string())); // Invalid RPC URL 97 | prop_assert!(invalid_network.validate().is_err()); 98 | 99 | invalid_network = network.clone(); 100 | invalid_network.slug = "INVALID_SLUG".to_string(); // Invalid slug with uppercase 101 | prop_assert!(invalid_network.validate().is_err()); 102 | 103 | invalid_network = network.clone(); 104 | invalid_network.name = "".to_string(); // Empty name 105 | prop_assert!(invalid_network.validate().is_err()); 106 | 107 | invalid_network = network.clone(); 108 | invalid_network.cron_schedule = "0 */1 * * *".to_string(); // Invalid cron schedule 109 | prop_assert!(invalid_network.validate().is_err()); 110 | } 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /tests/properties/utils/logging.rs: -------------------------------------------------------------------------------- 1 | //! Property-based tests for logging. 2 | //! 3 | //! These tests verify the behavior of the `compute_rolled_file_path` function, 4 | //! focusing on template variable substitution and output consistency. 5 | //! The tests ensure that the logging system handles template variables correctly 6 | //! and produces consistent, well-formed output across various input combinations. 7 | //! 8 | //! Refer to `src/utils/logging/mod.rs` for more details. 9 | use openzeppelin_monitor::utils::logging::compute_rolled_file_path; 10 | use proptest::{prelude::*, test_runner::Config}; 11 | 12 | proptest! { 13 | // Set the number of cases to 1000 14 | #![proptest_config(Config { 15 | cases: 1000, ..Config::default() 16 | })] 17 | 18 | /// Property test for compute_rolled_file_path when base ends with ".log" 19 | #[test] 20 | fn prop_compute_rolled_file_path_with_log_suffix( 21 | base in ".*[^.]", 22 | // ensuring non-empty ending character in date 23 | date in "[0-9]{4}-[0-9]{2}-[0-9]{2}" 24 | ) { 25 | let base_with_log = format!("{}{}.log", base, ""); 26 | let result = compute_rolled_file_path(&base_with_log, &date, 1); 27 | let expected = format!("{}-{}.{}.log", base_with_log.strip_suffix(".log").unwrap(), date, 1); 28 | prop_assert_eq!(result, expected); 29 | } 30 | 31 | /// Property test for compute_rolled_file_path when base does not end with ".log" 32 | #[test] 33 | fn prop_compute_rolled_file_path_without_log_suffix( 34 | base in ".*", 35 | date in "[0-9]{4}-[0-9]{2}-[0-9]{2}" 36 | ) { 37 | // Ensure base does not end with ".log" 38 | let base_non_log = if base.ends_with(".log") 39 | { 40 | format!("{}x", base) 41 | } else { 42 | base 43 | }; 44 | let result = compute_rolled_file_path(&base_non_log, &date,1); 45 | let expected = format!("{}-{}.{}.log", base_non_log, date, 1); 46 | prop_assert_eq!(result, expected); 47 | } 48 | } 49 | --------------------------------------------------------------------------------