├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── blank.md │ ├── config.yml │ ├── bug_report.yml │ └── feature.yml ├── dependabot.yml ├── workflows │ ├── pull-request.yml │ ├── release.yml │ ├── lint-pr.yml │ ├── linters.yml │ ├── rust.yml │ └── docker-build.yml ├── PULL_REQUEST_TEMPLATE.md └── labels.yml ├── .env.example ├── docs ├── images │ └── logo.jpeg ├── SECURITY.md ├── CONTRIBUTING.md └── CODE_OF_CONDUCT.md ├── Makefile ├── .markdownlint.json ├── rust-toolchain.toml ├── taplo ├── taplo.toml └── README.md ├── .gitignore ├── src ├── types │ ├── mod.rs │ ├── asset.rs │ ├── account.rs │ └── position.rs ├── utils │ ├── constants.rs │ ├── conversions.rs │ ├── mod.rs │ ├── services.rs │ └── ekubo.rs ├── storages │ ├── mod.rs │ └── json.rs ├── cli │ ├── account.rs │ └── mod.rs ├── main.rs ├── services │ ├── mod.rs │ ├── oracle.rs │ ├── monitoring.rs │ └── indexer.rs └── config.rs ├── .dockerignore ├── Dockerfile ├── LICENSE ├── Cargo.toml ├── config.yaml └── README.md /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @astraly-labs -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/blank.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: New blank issue 3 | about: New blank issue 4 | --- 5 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Indexing 2 | APIBARA_API_KEY= 3 | 4 | # Liquidator 5 | ACCOUNT_ADDRESS= 6 | PRIVATE_KEY= 7 | -------------------------------------------------------------------------------- /docs/images/logo.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/astraly-labs/vesu-liquidator/HEAD/docs/images/logo.jpeg -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | format: 2 | cargo fmt -- --check 3 | cargo clippy --no-deps -- -D warnings 4 | cargo clippy --tests --no-deps -- -D warnings 5 | -------------------------------------------------------------------------------- /.markdownlint.json: -------------------------------------------------------------------------------- 1 | { 2 | "MD033": false, 3 | "MD041": false, 4 | "MD045": false, 5 | "MD003": false, 6 | "MD013": { 7 | "code_blocks": false 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.87.0" 3 | components = ["rustfmt", "clippy", "rust-analyzer"] 4 | targets = ["wasm32-unknown-unknown"] 5 | profile = "minimal" 6 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "cargo" 4 | directory: "/" 5 | schedule: 6 | interval: "monthly" # can be `daily` or `monthly` also 7 | open-pull-requests-limit: 10 8 | -------------------------------------------------------------------------------- /taplo/taplo.toml: -------------------------------------------------------------------------------- 1 | include = ["**/*.toml"] 2 | exclude = ["**/bad.toml"] 3 | 4 | [formatting] 5 | align_entries = false 6 | 7 | [[rule]] 8 | keys = ["dependencies"] 9 | 10 | [rule.formatting] 11 | reorder_keys = true 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .idea 3 | .env 4 | db/ 5 | infra/db/password.txt 6 | data.json 7 | 8 | # Vscode settings 9 | .vscode 10 | 11 | # Vscode workspaces 12 | *.code-workspace 13 | 14 | # Indexer 15 | indexer.log 16 | run_indexer.sh 17 | src/bindings 18 | 19 | # Data 20 | vesu.json 21 | -------------------------------------------------------------------------------- /src/types/mod.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use starknet::providers::{JsonRpcClient, jsonrpc::HttpTransport}; 4 | 5 | pub mod account; 6 | pub mod asset; 7 | pub mod position; 8 | 9 | pub type StarknetSingleOwnerAccount = Arc< 10 | starknet::accounts::SingleOwnerAccount< 11 | Arc>, 12 | starknet::signers::LocalWallet, 13 | >, 14 | >; 15 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: true 2 | contact_links: 3 | - name: Support & Troubleshooting with Madara Community 4 | url: https://t.me/madara 5 | about: | 6 | For general problems with Madara or related technologies, please ask 7 | and questions in our community! We highly encourage everyone also share their understanding by answering questions for others. 8 | -------------------------------------------------------------------------------- /.github/workflows/pull-request.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Workflow - Pull Request 3 | 4 | on: 5 | workflow_dispatch: 6 | pull_request: 7 | branches: [main] 8 | push: 9 | branches: [main] 10 | 11 | jobs: 12 | linters: 13 | name: Run linters 14 | uses: ./.github/workflows/linters.yml 15 | 16 | rust: 17 | name: Build, Format, Clippy, Machete, LLVM-Cov, Nextest 18 | uses: ./.github/workflows/rust.yml 19 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Workflow - Release 3 | 4 | on: 5 | workflow_dispatch: 6 | release: 7 | types: [published] 8 | 9 | jobs: 10 | docker_release_build: 11 | name: Docker release build 12 | uses: ./.github/workflows/docker-build.yml 13 | with: 14 | release_tag_name: ${{ github.event.release.tag_name }} 15 | docker_context: . 16 | package_name: ghcr.io/astraly-labs/vesu-liquidator 17 | -------------------------------------------------------------------------------- /src/utils/constants.rs: -------------------------------------------------------------------------------- 1 | use cainome::cairo_serde::U256; 2 | 3 | use crate::bindings::liquidate::I129; 4 | 5 | // Decimals are always 18 for vesu response 6 | pub const VESU_RESPONSE_DECIMALS: i64 = 18; 7 | pub const MAX_RETRIES_VERIFY_TX_FINALITY: usize = 10; 8 | pub const INTERVAL_CHECK_TX_FINALITY: u64 = 3; 9 | 10 | pub const U256_ZERO: U256 = U256 { low: 0, high: 0 }; 11 | pub const I129_ZERO: I129 = I129 { 12 | mag: 0, 13 | sign: false, 14 | }; 15 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Exclude the Dockerfile itself 2 | Dockerfile 3 | 4 | # Exclude any Docker-related files 5 | .dockerignore 6 | docker-compose.yml 7 | 8 | # Exclude version control system files 9 | .git 10 | .gitignore 11 | 12 | # Exclude any large data files, if present 13 | *.data 14 | *.csv 15 | *.log 16 | 17 | # Exclude any other unnecessary files or directories 18 | node_modules 19 | *.md 20 | *.txt 21 | 22 | # Keep only necessary configuration files 23 | !config.yaml 24 | -------------------------------------------------------------------------------- /.github/workflows/lint-pr.yml: -------------------------------------------------------------------------------- 1 | name: "Task - Lint PR" 2 | 3 | on: 4 | pull_request_target: 5 | types: 6 | - opened 7 | - edited 8 | - synchronize 9 | - reopened 10 | 11 | permissions: 12 | pull-requests: read 13 | 14 | jobs: 15 | main: 16 | name: Validate PR title 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: amannn/action-semantic-pull-request@v5 20 | env: 21 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 22 | -------------------------------------------------------------------------------- /.github/workflows/linters.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Task - Linters 3 | 4 | on: 5 | workflow_dispatch: 6 | workflow_call: 7 | 8 | jobs: 9 | prettier: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v3 13 | - name: Run prettier 14 | run: |- 15 | npx prettier --check . 16 | 17 | toml-lint: 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Checkout toml files 21 | uses: actions/checkout@v3 22 | - name: Run toml check 23 | run: npx @taplo/cli fmt --config ./taplo/taplo.toml --check 24 | -------------------------------------------------------------------------------- /docs/SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | The bot 100% security cannot be assured. It is provided "as is" without any warranty. Use at your own risk. 4 | 5 | ## Reporting a Vulnerability 6 | 7 | If there are any vulnerabilities in **Vesu-Liquidator**, don't hesitate to _report them_. 8 | 9 | 1. Use any of the [private contact addresses](https://github.com/astraly-labs/Vesu-liquidator#support). 10 | 2. Describe the vulnerability. 11 | 12 | If you have a fix, that is most welcome -- please attach or summarize it in your message! 13 | 14 | 3. We will evaluate the vulnerability and, if necessary, release a fix or mitigating steps to address it. We will contact you to let you know the outcome, and will credit you in the report. 15 | 16 | Please **do not disclose the vulnerability publicly** until a fix is released! 17 | 18 | 4. Once we have either a) published a fix, or b) declined to address the vulnerability for whatever reason, you are free to publicly disclose it. 19 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM lukemathwalker/cargo-chef:latest-rust-1.87.0 AS chef 2 | WORKDIR /app/ 3 | 4 | FROM chef AS planner 5 | COPY . . 6 | RUN cargo chef prepare --recipe-path recipe.json 7 | 8 | FROM chef AS builder 9 | RUN apt-get update && \ 10 | apt-get install -y pkg-config protobuf-compiler libprotobuf-dev libssl-dev 11 | COPY --from=planner /app/recipe.json recipe.json 12 | # Build dependencies - this is the caching Docker layer! 13 | RUN cargo chef cook --release --recipe-path recipe.json 14 | # Build application 15 | COPY . . 16 | RUN cargo build --release 17 | 18 | # We do not need the Rust toolchain to run the binary! 19 | FROM ubuntu:24.04 AS runtime 20 | RUN apt-get update && \ 21 | apt-get install -y tini ca-certificates && \ 22 | apt-get autoremove -y && \ 23 | apt-get clean && \ 24 | rm -rf /var/lib/apt/lists/* 25 | WORKDIR /app/ 26 | COPY --from=builder /app/target/release/vesu-liquidator /usr/local/bin 27 | COPY --from=builder /app/config.yaml . 28 | 29 | ENTRYPOINT ["tini", "--", "vesu-liquidator"] 30 | CMD ["--help"] 31 | -------------------------------------------------------------------------------- /src/storages/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod json; 2 | 3 | use std::collections::HashMap; 4 | 5 | use anyhow::Result; 6 | use dashmap::DashMap; 7 | 8 | use crate::types::position::{self, Position}; 9 | 10 | #[derive(serde::Serialize, Default)] 11 | struct StoredData { 12 | last_block_indexed: u64, 13 | positions: HashMap, 14 | } 15 | 16 | impl StoredData { 17 | pub fn new(last_block_indexed: u64, positions: HashMap) -> Self { 18 | StoredData { 19 | last_block_indexed, 20 | positions, 21 | } 22 | } 23 | pub fn as_tuple(&self) -> (u64, HashMap) { 24 | (self.last_block_indexed, self.positions.clone()) 25 | } 26 | } 27 | 28 | #[async_trait::async_trait] 29 | pub trait Storage: Send + Sync { 30 | async fn load(&mut self) -> Result<(u64, HashMap)>; 31 | async fn save( 32 | &mut self, 33 | positions: &DashMap, 34 | last_block_indexed: u64, 35 | ) -> Result<()>; 36 | fn get_positions(&self) -> HashMap; 37 | } 38 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Bug Report 2 | description: Let us know about an issue you experienced with this software 3 | labels: [I2-bug, I10-unconfirmed] 4 | 5 | body: 6 | - type: checkboxes 7 | attributes: 8 | label: Is there an existing issue? 9 | description: Please search to see if an issue already exists and leave a comment that you also experienced this issue or add your specifics that are related to an existing issue. 10 | options: 11 | - label: I have searched the existing issues 12 | required: true 13 | - type: textarea 14 | id: bug 15 | attributes: 16 | label: Description of bug 17 | description: What seems to be the problem? 18 | # placeholder: Describe the problem. 19 | validations: 20 | required: true 21 | - type: textarea 22 | id: steps 23 | attributes: 24 | label: Steps to reproduce 25 | description: Provide the steps that led to the discovery of the issue. 26 | # placeholder: Describe what you were doing so we can reproduce the problem. 27 | validations: 28 | required: false 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) [2025] [Astraly Labs] 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /taplo/README.md: -------------------------------------------------------------------------------- 1 | # Taplo 2 | 3 | [Taplo](https://github.com/tamasfe/taplo) is a TOML validator and formatter. It 4 | provides a command-line interface (CLI) for working with TOML files. 5 | 6 | ## Installation 7 | 8 | You can install Taplo using either cargo or Yarn or NPM. 9 | 10 | ### Cargo 11 | 12 | ```bash 13 | cargo install taplo-cli --locked 14 | ``` 15 | 16 | ### Yarn 17 | 18 | ```bash 19 | yarn global add @taplo/cli 20 | ``` 21 | 22 | ### NPM 23 | 24 | ```bash 25 | npm install -g @taplo/cli 26 | ``` 27 | 28 | ### Usage 29 | 30 | To check your TOML files for formatting issues, use the following command: 31 | 32 | ```bash 33 | npx @taplo/cli fmt --config taplo.toml --check 34 | ``` 35 | 36 | To format all TOML files in your project, use the following command: 37 | 38 | ```bash 39 | npx @taplo/cli fmt --config taplo.toml 40 | ``` 41 | 42 | This command will automatically format the TOML files, ensuring consistent and 43 | readable formatting. 44 | 45 | ### Configuration 46 | 47 | Taplo allows you to customize the formatting rules by adding configuration 48 | options. You can find the available options and how to use them 49 | [here](https://taplo.tamasfe.dev/configuration/formatter-options.html). 50 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Pull Request type 4 | 5 | 6 | 7 | Please add the labels corresponding to the type of changes your PR introduces: 8 | 9 | - Bugfix 10 | - Feature 11 | - Code style update (formatting, renaming) 12 | - Refactoring (no functional changes, no API changes) 13 | - Build-related changes 14 | - Documentation content changes 15 | - Testing 16 | - Other (please describe): 17 | 18 | ## What is the current behavior? 19 | 20 | 21 | 22 | Resolves: #NA 23 | 24 | ## What is the new behavior? 25 | 26 | ## 27 | 28 | ## Does this introduce a breaking change? 29 | 30 | 31 | 32 | 33 | ## Other information 34 | 35 | 36 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature.yml: -------------------------------------------------------------------------------- 1 | name: Feature Request 2 | description: Submit your requests and suggestions 3 | labels: [I5-enhancement] 4 | body: 5 | - type: checkboxes 6 | id: existing 7 | attributes: 8 | label: Is there an existing issue? 9 | description: Please search to see if an issue already exists and leave a comment that you also experienced this issue or add your specifics that are related to an existing issue. 10 | options: 11 | - label: I have searched the existing issues 12 | required: true 13 | - type: textarea 14 | id: motivation 15 | attributes: 16 | label: Motivation 17 | description: Please give precedence as to what lead you to file this issue. 18 | validations: 19 | required: false 20 | - type: textarea 21 | id: request 22 | attributes: 23 | label: Request 24 | description: Please describe what is needed. 25 | validations: 26 | required: true 27 | - type: textarea 28 | id: solution 29 | attributes: 30 | label: Solution 31 | description: If possible, please describe what a solution could be. 32 | validations: 33 | required: false 34 | - type: dropdown 35 | id: help 36 | attributes: 37 | label: Are you willing to help with this request? 38 | multiple: true 39 | options: 40 | - Yes! 41 | - No. 42 | - Maybe (please elaborate above) 43 | validations: 44 | required: true 45 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "vesu-liquidator" 3 | version = "0.5.2" 4 | edition = "2024" 5 | license = "MIT" 6 | homepage = "https://www.vesu.xyz/" 7 | repository = "https://github.com/astraly-labs/Vesu-liquidator" 8 | description = "Liquidator bot for the Vesu Protocol" 9 | readme = "README.md" 10 | keywords = ["vesu", "liquidator", "bot", "starknet"] 11 | 12 | [dependencies] 13 | anyhow = "1.0" 14 | async-trait = "0.1" 15 | bigdecimal = { version = "0.4", features = ["serde"] } 16 | cainome = { git = "https://github.com/cartridge-gg/cainome", rev = "cb41794", features = [ 17 | "abigen-rs", 18 | ] } 19 | clap = { version = "4.5", features = ["derive", "env"] } 20 | colored = "2.1.0" 21 | dashmap = "6.1.0" 22 | dotenvy = "0.15.7" 23 | futures-util = "0.3.30" 24 | lazy_static = "1.5.0" 25 | reqwest = { version = "0.12", features = ["json"] } 26 | serde = "1.0" 27 | serde_json = "1.0" 28 | serde_yaml = "0.9" 29 | starknet = { version = "0.17.0" } 30 | strum = { version = "0.26", features = ["derive"] } 31 | tokio = { version = "1.40", features = ["full"] } 32 | tracing = "0.1" 33 | tracing-subscriber = { version = "0.3", features = [ 34 | "env-filter", 35 | "local-time", 36 | ] } 37 | url = "2.5" 38 | 39 | apibara-core = { git = "https://github.com/apibara/dna", rev = "9caa385" } 40 | apibara-sdk = { git = "https://github.com/apibara/dna", rev = "9caa385" } 41 | 42 | [build-dependencies] 43 | cainome = { git = "https://github.com/cartridge-gg/cainome", rev = "cb41794", features = [ 44 | "abigen-rs", 45 | ] } 46 | -------------------------------------------------------------------------------- /src/cli/account.rs: -------------------------------------------------------------------------------- 1 | use std::{path::PathBuf, str::FromStr}; 2 | 3 | use anyhow::{Result, anyhow}; 4 | use clap::Args; 5 | use starknet::core::types::Felt; 6 | 7 | fn parse_felt(s: &str) -> Result { 8 | Felt::from_str(s).map_err(|_| anyhow!("Could not convert {s} to Felt")) 9 | } 10 | 11 | #[derive(Clone, Debug, Args)] 12 | pub struct AccountParams { 13 | /// Account address of the liquidator account 14 | #[clap(long, value_parser = parse_felt, value_name = "LIQUIDATOR ACCOUNT ADDRESS", env = "ACCOUNT_ADDRESS")] 15 | pub account_address: Felt, 16 | 17 | /// Private key of the liquidator account 18 | #[clap(long, value_parser = parse_felt, value_name = "LIQUIDATOR PRIVATE KEY", env = "PRIVATE_KEY")] 19 | pub private_key: Option, 20 | 21 | /// Keystore path for the liquidator account 22 | #[clap(long, value_name = "LIQUIDATOR KEYSTORE")] 23 | pub keystore_path: Option, 24 | 25 | /// Keystore password for the liquidator account 26 | #[clap(long, value_name = "LIQUIDATOR KEYSTORE PASSWORD")] 27 | pub keystore_password: Option, 28 | } 29 | 30 | impl AccountParams { 31 | pub fn validate(&self) -> Result<()> { 32 | match ( 33 | &self.private_key, 34 | &self.keystore_path, 35 | &self.keystore_password, 36 | ) { 37 | (Some(_), None, None) => Ok(()), 38 | (None, Some(_), Some(_)) => Ok(()), 39 | _ => Err(anyhow!( 40 | "Missing liquidator account key. Use either (--private-key) or (--keystore-path + --keystore-password)." 41 | )), 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Task - Format, Clippy, Machete, LLVM-Cov, Nextest 3 | 4 | on: 5 | workflow_dispatch: 6 | workflow_call: 7 | 8 | jobs: 9 | rust_build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v3 13 | 14 | # selecting a toolchain either by action or manual `rustup` calls should happen 15 | # before the plugin, as the cache uses the current rustc version as its cache key 16 | - run: rustup show 17 | 18 | - uses: Swatinem/rust-cache@v2 19 | 20 | - name: Install protobuf compiler 21 | run: | 22 | sudo apt-get update 23 | sudo apt-get install -y protobuf-compiler libprotobuf-dev 24 | protoc --version 25 | 26 | - name: Build the project 27 | run: | 28 | cargo build --release 29 | 30 | - name: Format and clippy 31 | run: | 32 | cargo fmt -- --check 33 | cargo clippy --no-deps -- -D warnings 34 | cargo clippy --tests --no-deps -- -D warnings 35 | 36 | - name: Check for unused dependencies 37 | uses: bnjbvr/cargo-machete@main 38 | 39 | - uses: taiki-e/install-action@cargo-llvm-cov 40 | - uses: taiki-e/install-action@nextest 41 | 42 | - name: Clean workspace 43 | run: | 44 | cargo llvm-cov clean --workspace 45 | 46 | - name: Run llvm-cov 47 | run: | 48 | cargo llvm-cov nextest --release --lcov --output-path lcov.info 49 | 50 | - name: Upload coverage to codecov.io 51 | uses: codecov/codecov-action@v3 52 | with: 53 | files: lcov.info 54 | fail_ci_if_error: false 55 | -------------------------------------------------------------------------------- /src/utils/conversions.rs: -------------------------------------------------------------------------------- 1 | use apibara_core::starknet::v1alpha2::FieldElement; 2 | use bigdecimal::BigDecimal; 3 | use bigdecimal::num_bigint::BigInt; 4 | use starknet::core::types::{Felt, U256}; 5 | 6 | /// Converts an hexadecimal string with decimals to BigDecimal. 7 | pub fn hex_str_to_big_decimal(hex_price: &str, decimals: i64) -> BigDecimal { 8 | let cleaned_hex = hex_price.trim_start_matches("0x"); 9 | let price_bigint = BigInt::parse_bytes(cleaned_hex.as_bytes(), 16).unwrap(); 10 | BigDecimal::new(price_bigint, decimals) 11 | } 12 | 13 | /// Converts a Felt element from starknet-rs to a FieldElement from Apibara-core. 14 | pub fn felt_as_apibara_field(value: &Felt) -> FieldElement { 15 | FieldElement::from_bytes(&value.to_bytes_be()) 16 | } 17 | 18 | /// Converts an Apibara core FieldElement into a Felt from starknet-rs. 19 | pub fn apibara_field_as_felt(value: &FieldElement) -> Felt { 20 | Felt::from_bytes_be(&value.to_bytes()) 21 | } 22 | 23 | /// Converts a BigDecimal to a U256. 24 | pub fn big_decimal_to_u256(value: BigDecimal) -> U256 { 25 | U256::from(big_decimal_to_felt(value)) 26 | } 27 | 28 | pub fn big_decimal_to_felt(value: BigDecimal) -> Felt { 29 | let (amount, _): (BigInt, _) = value.as_bigint_and_exponent(); 30 | Felt::from(amount.clone()) 31 | } 32 | 33 | #[cfg(test)] 34 | mod test { 35 | use std::str::FromStr; 36 | 37 | use bigdecimal::{BigDecimal, num_bigint::BigInt}; 38 | 39 | use crate::utils::conversions::hex_str_to_big_decimal; 40 | 41 | #[test] 42 | fn test_hex_str_to_decimal() { 43 | assert_eq!( 44 | hex_str_to_big_decimal("0x100000000000", 3), 45 | BigDecimal::new(BigInt::from_str("17592186044416").unwrap(), 3) 46 | ); 47 | assert_eq!( 48 | hex_str_to_big_decimal("100000000000", 3), 49 | BigDecimal::new(BigInt::from_str("17592186044416").unwrap(), 3) 50 | ); 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #[rustfmt::skip] 2 | pub mod bindings; 3 | pub mod cli; 4 | pub mod config; 5 | pub mod services; 6 | pub mod storages; 7 | pub mod types; 8 | pub mod utils; 9 | 10 | use std::sync::Arc; 11 | 12 | use anyhow::Result; 13 | use clap::Parser; 14 | use starknet::{ 15 | core::types::Felt, 16 | providers::{JsonRpcClient, jsonrpc::HttpTransport}, 17 | }; 18 | 19 | use cli::{NetworkName, RunCmd}; 20 | use config::Config; 21 | use services::start_all_services; 22 | use types::account::StarknetAccount; 23 | use utils::setup_tracing; 24 | 25 | #[tokio::main] 26 | async fn main() -> Result<()> { 27 | let _ = dotenvy::dotenv(); 28 | setup_tracing(); 29 | 30 | let mut run_cmd = RunCmd::parse(); 31 | run_cmd.validate()?; 32 | 33 | print_app_title(run_cmd.account_params.account_address, run_cmd.network); 34 | 35 | let rpc_url = run_cmd.rpc_url.clone(); 36 | let rpc_client = Arc::new(JsonRpcClient::new(HttpTransport::new(rpc_url))); 37 | let account = StarknetAccount::from_cli(rpc_client.clone(), run_cmd.clone())?; 38 | 39 | let config = Config::from_cli(&run_cmd)?; 40 | start_all_services(config, rpc_client, account, run_cmd).await 41 | } 42 | 43 | /// Prints information about the bot parameters. 44 | fn print_app_title(account_address: Felt, network: NetworkName) { 45 | println!("\n 46 | ██╗ ██╗███████╗███████╗██╗ ██╗ ██╗ ██╗ ██████╗ ██╗ ██╗██╗██████╗ █████╗ ████████╗ ██████╗ ██████╗ 47 | ██║ ██║██╔════╝██╔════╝██║ ██║ ██║ ██║██╔═══██╗██║ ██║██║██╔══██╗██╔══██╗╚══██╔══╝██╔═══██╗██╔══██╗ 48 | ██║ ██║█████╗ ███████╗██║ ██║ ██║ ██║██║ ██║██║ ██║██║██║ ██║███████║ ██║ ██║ ██║██████╔╝ 49 | ╚██╗ ██╔╝██╔══╝ ╚════██║██║ ██║ ██║ ██║██║▄▄ ██║██║ ██║██║██║ ██║██╔══██║ ██║ ██║ ██║██╔══██╗ 50 | ╚████╔╝ ███████╗███████║╚██████╔╝ ███████╗██║╚██████╔╝╚██████╔╝██║██████╔╝██║ ██║ ██║ ╚██████╔╝██║ ██║ 51 | ╚═══╝ ╚══════╝╚══════╝ ╚═════╝ ╚══════╝╚═╝ ╚══▀▀═╝ ╚═════╝ ╚═╝╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ 52 | 53 | 🤖 Liquidator 👉 0x{:x} 54 | 🎯 On {}", account_address, network); 55 | } 56 | -------------------------------------------------------------------------------- /src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod constants; 2 | pub mod conversions; 3 | pub mod ekubo; 4 | pub mod services; 5 | 6 | use std::{ 7 | sync::Arc, 8 | time::{Duration, SystemTime}, 9 | }; 10 | 11 | use anyhow::bail; 12 | use starknet::{ 13 | core::types::{ExecutionResult, Felt, StarknetError}, 14 | providers::{JsonRpcClient, Provider, ProviderError, jsonrpc::HttpTransport}, 15 | }; 16 | 17 | pub fn setup_tracing() { 18 | tracing_subscriber::fmt() 19 | .with_max_level(tracing::Level::INFO) 20 | .compact() 21 | .with_file(false) 22 | .with_line_number(false) 23 | .with_thread_ids(false) 24 | .with_target(false) 25 | .init(); 26 | } 27 | 28 | pub async fn wait_for_tx( 29 | rpc_client: &Arc>, 30 | tx_hash: Felt, 31 | ) -> anyhow::Result<()> { 32 | const WAIT_FOR_TX_TIMEOUT: Duration = Duration::from_secs(15); 33 | const CHECK_INTERVAL: Duration = Duration::from_secs(1); 34 | 35 | let start = SystemTime::now(); 36 | 37 | loop { 38 | if start.elapsed().unwrap() >= WAIT_FOR_TX_TIMEOUT { 39 | bail!("Timeout while waiting for transaction {tx_hash:#064x}"); 40 | } 41 | 42 | match rpc_client.get_transaction_receipt(tx_hash).await { 43 | Ok(tx) => match tx.receipt.execution_result() { 44 | ExecutionResult::Succeeded => { 45 | return Ok(()); 46 | } 47 | ExecutionResult::Reverted { reason } => { 48 | bail!(format!( 49 | "Transaction {tx_hash:#064x} has been rejected/reverted: {reason}" 50 | )); 51 | } 52 | }, 53 | Err(ProviderError::StarknetError(StarknetError::TransactionHashNotFound)) => { 54 | tracing::debug!("Waiting for transaction {tx_hash:#064x} to show up"); 55 | tokio::time::sleep(CHECK_INTERVAL).await; 56 | } 57 | Err(err) => { 58 | bail!("Error while waiting for transaction {tx_hash:#064x}: {err:?}"); 59 | } 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/types/asset.rs: -------------------------------------------------------------------------------- 1 | use bigdecimal::BigDecimal; 2 | use serde::{Deserialize, Serialize}; 3 | use starknet::core::types::Felt; 4 | 5 | use crate::config::Config; 6 | 7 | #[derive(Default, Clone, Hash, Eq, PartialEq, Debug, Serialize, Deserialize)] 8 | pub struct Asset { 9 | pub name: String, 10 | pub address: Felt, 11 | pub amount: BigDecimal, 12 | pub decimals: i64, 13 | } 14 | 15 | impl Asset { 16 | pub fn from_address(config: &Config, address: Felt) -> Option { 17 | let name = config.get_asset_ticker_for_address(&address); 18 | let decimals = config.get_decimal_for_address(&address); 19 | 20 | match (name, decimals) { 21 | (Some(name), Some(decimals)) => Some(Self::new(name, address, decimals)), 22 | _ => None, 23 | } 24 | } 25 | 26 | pub fn new(name: String, address: Felt, decimals: i64) -> Self { 27 | Self { 28 | name, 29 | address, 30 | amount: BigDecimal::from(0), 31 | decimals, 32 | } 33 | } 34 | } 35 | 36 | #[cfg(test)] 37 | mod tests { 38 | use std::path::PathBuf; 39 | 40 | use bigdecimal::BigDecimal; 41 | use starknet::core::types::Felt; 42 | 43 | use crate::{cli::NetworkName, config::Config}; 44 | 45 | use super::Asset; 46 | 47 | #[test] 48 | fn test_asset_from_address() { 49 | let config = Config::new( 50 | NetworkName::Mainnet, 51 | crate::config::LiquidationMode::Full, 52 | &PathBuf::from("./config.yaml"), 53 | ) 54 | .unwrap(); 55 | let asset = Asset::from_address( 56 | &config, 57 | Felt::from_hex("0x049d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7") 58 | .unwrap(), 59 | ) 60 | .unwrap(); 61 | assert_eq!(asset.name, "ETH"); 62 | assert_eq!( 63 | asset.address, 64 | Felt::from_hex("0x049d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7") 65 | .unwrap() 66 | ); 67 | assert_eq!(asset.amount, BigDecimal::from(0)); 68 | assert_eq!(asset.decimals, 18); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /docs/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | When contributing to this repository, please first discuss the change you wish to make via issue, email, or any other method with the owners of this repository before making a change. 4 | Please note we have a [code of conduct](CODE_OF_CONDUCT.md), please follow it in all your interactions with the project. 5 | 6 | ## Development environment setup 7 | 8 | > **[?]** 9 | > Proceed to describe how to setup local development environment. 10 | > e.g: 11 | 12 | To set up a development environment, please follow these steps: 13 | 14 | 1. Clone the repo 15 | 16 | ```sh 17 | git clone https://github.com/astraly-labs/Vesu-liquidator 18 | ``` 19 | 20 | 2. Install dependencies 21 | 22 | In order to run the liquidator, you need the protoc Protocol Buffers compiler, along with Protocol Buffers resource files. 23 | 24 | ##### Ubuntu 25 | 26 | ```sh 27 | sudo apt update && sudo apt upgrade -y 28 | sudo apt install -y protobuf-compiler libprotobuf-dev 29 | ``` 30 | 31 | ##### macOS 32 | 33 | Assuming Homebrew is already installed. 34 | 35 | ```sh 36 | brew install protobuf 37 | ``` 38 | 39 | 3. Environment Variables 40 | 41 | Create an `.env` file following the example file and fill the keys. 42 | 43 | ## Issues and feature requests 44 | 45 | You've found a bug in the source code, a mistake in the documentation or maybe you'd like a new feature? You can help us by [submitting an issue on GitHub](https://github.com/astraly-labs/vesu-liquidator/issues). Before you create an issue, make sure to search the issue archive -- your issue may have already been addressed! 46 | 47 | Please try to create bug reports that are: 48 | 49 | - _Reproducible._ Include steps to reproduce the problem. 50 | - _Specific._ Include as much detail as possible: which version, what environment, etc. 51 | - _Unique._ Do not duplicate existing opened issues. 52 | - _Scoped to a Single Bug._ One bug per report. 53 | 54 | **Even better: Submit a pull request with a fix or new feature!** 55 | 56 | ### How to submit a Pull Request 57 | 58 | 1. Search our repository for open or closed 59 | [Pull Requests](https://github.com/astraly-labs/vesu-liquidator/pulls) 60 | that relate to your submission. You don't want to duplicate effort. 61 | 2. Fork the project 62 | 3. Create your feature branch (`git checkout -b feat/amazing_feature`) 63 | 4. Commit your changes (`git commit -m 'feat: add amazing_feature'`) 64 | 5. Push to the branch (`git push origin feat/amazing_feature`) 65 | 6. Open a Pull Request 66 | -------------------------------------------------------------------------------- /src/services/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod indexer; 2 | pub mod monitoring; 3 | pub mod oracle; 4 | 5 | use std::{cmp, sync::Arc}; 6 | 7 | use anyhow::Result; 8 | use starknet::providers::{JsonRpcClient, jsonrpc::HttpTransport}; 9 | use tokio::sync::mpsc::unbounded_channel; 10 | 11 | use oracle::{LatestOraclePrices, OracleService}; 12 | 13 | use crate::{ 14 | cli::RunCmd, 15 | config::Config, 16 | services::{indexer::IndexerService, monitoring::MonitoringService}, 17 | storages::{Storage, json::JsonStorage}, 18 | types::{account::StarknetAccount, position::Position}, 19 | utils::services::{Service, ServiceGroup}, 20 | }; 21 | 22 | /// Starts all the services needed by the Liquidator Bot. 23 | /// This include: 24 | /// - the indexer service, that indexes blocks & send positions, 25 | /// - the monitoring service, that monitors & liquidates positions. 26 | pub async fn start_all_services( 27 | config: Config, 28 | rpc_client: Arc>, 29 | account: StarknetAccount, 30 | run_cmd: RunCmd, 31 | ) -> Result<()> { 32 | let (positions_sender, position_receiver) = unbounded_channel::<(u64, Position)>(); 33 | 34 | // TODO: Add new methods of storage (s3, postgres, sqlite) and be able to define them in CLI 35 | let mut storage = JsonStorage::new( 36 | run_cmd 37 | .storage_path 38 | .unwrap_or_default() 39 | .as_path() 40 | .to_str() 41 | .unwrap_or_default(), 42 | ); 43 | let (last_block_indexed, _) = storage.load().await?; 44 | 45 | let starting_block = cmp::max(run_cmd.starting_block, last_block_indexed); 46 | println!(" 🥡 Starting from block {}\n\n", starting_block); 47 | 48 | let indexer_service = IndexerService::new( 49 | config.clone(), 50 | run_cmd.apibara_api_key.unwrap(), 51 | positions_sender, 52 | starting_block, 53 | ); 54 | let latest_oracle_prices = LatestOraclePrices::from_config(&config); 55 | let oracle_service = OracleService::new( 56 | config.pragma_oracle_address, 57 | rpc_client.clone(), 58 | latest_oracle_prices.clone(), 59 | ); 60 | let monitoring_service = MonitoringService::new( 61 | config, 62 | rpc_client, 63 | account, 64 | position_receiver, 65 | latest_oracle_prices, 66 | Box::new(storage), 67 | ); 68 | 69 | ServiceGroup::default() 70 | .with(indexer_service) 71 | .with(oracle_service) 72 | .with(monitoring_service) 73 | .start_and_drive_to_end() 74 | .await?; 75 | 76 | Ok(()) 77 | } 78 | -------------------------------------------------------------------------------- /.github/labels.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "breaking-change" 3 | color: ee0701 4 | description: "A change that changes the API or breaks backward compatibility for users." 5 | - name: "bugfix" 6 | color: ee0701 7 | description: 8 | "Inconsistencies or issues which will cause a problem for users or 9 | implementors." 10 | - name: "documentation" 11 | color: 0052cc 12 | description: "Solely about the documentation of the project." 13 | - name: "enhancement" 14 | color: 1d76db 15 | description: "Enhancement of the code, not introducing new features." 16 | - name: "refactor" 17 | color: 1d76db 18 | description: 19 | "Updating the code with simpler, easier to understand or more efficient 20 | syntax or methods, but not introducing new features." 21 | - name: "performance" 22 | color: 1d76db 23 | description: "Improving performance of the project, not introducing new features." 24 | - name: "new-feature" 25 | color: 0e8a16 26 | description: "New features or options." 27 | - name: "maintenance" 28 | color: 2af79e 29 | description: "Generic maintenance tasks." 30 | - name: "ci" 31 | color: 1d76db 32 | description: "Work that improves the continuous integration." 33 | - name: "dependencies" 34 | color: 1d76db 35 | description: "Change in project dependencies." 36 | 37 | - name: "in-progress" 38 | color: fbca04 39 | description: "Issue is currently being worked on by a developer." 40 | - name: "stale" 41 | color: fef2c0 42 | description: "No activity for quite some time." 43 | - name: "no-stale" 44 | color: fef2c0 45 | description: "This is exempt from the stale bot." 46 | 47 | - name: "security" 48 | color: ee0701 49 | description: "Addressing a vulnerability or security risk in this project." 50 | - name: "incomplete" 51 | color: fef2c0 52 | description: "Missing information." 53 | - name: "invalid" 54 | color: fef2c0 55 | description: "This is off-topic, spam, or otherwise doesn't apply to this project." 56 | 57 | - name: "beginner-friendly" 58 | color: 0e8a16 59 | description: "Good first issue for people wanting to contribute to this project." 60 | - name: "help-wanted" 61 | color: 0e8a16 62 | description: "We need some extra helping hands or expertise in order to resolve this!" 63 | 64 | - name: "priority-critical" 65 | color: ee0701 66 | description: "Must be addressed as soon as possible." 67 | - name: "priority-high" 68 | color: b60205 69 | description: 70 | "After critical issues are fixed, these should be dealt with before any 71 | further issues." 72 | - name: "priority-medium" 73 | color: 0e8a16 74 | description: "This issue may be useful, and needs some attention." 75 | - name: "priority-low" 76 | color: e4ea8a 77 | description: "Nice addition, maybe... someday..." 78 | 79 | - name: "major" 80 | color: b60205 81 | description: "This PR causes a major bump in the version number." 82 | - name: "minor" 83 | color: 0e8a16 84 | description: "This PR causes a minor bump in the version number." 85 | -------------------------------------------------------------------------------- /src/utils/services.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Context; 2 | use std::panic; 3 | use tokio::task::JoinSet; 4 | 5 | /// Source: 6 | /// https://github.com/madara-alliance/madara/blob/main/crates/primitives/utils/src/service.rs 7 | /// - 8 | /// The app is divided into services, with each service having a different responsability within the app. 9 | /// 10 | /// This trait enables launching nested services and groups. 11 | #[async_trait::async_trait] 12 | pub trait Service: 'static + Send + Sync { 13 | async fn start(&mut self, _join_set: &mut JoinSet>) -> anyhow::Result<()> { 14 | Ok(()) 15 | } 16 | 17 | async fn start_and_drive_to_end(mut self) -> anyhow::Result<()> 18 | where 19 | Self: Sized, 20 | { 21 | let mut join_set = JoinSet::new(); 22 | self.start(&mut join_set) 23 | .await 24 | .context("Starting service")?; 25 | drive_joinset(join_set).await 26 | } 27 | } 28 | 29 | pub struct ServiceGroup { 30 | services: Vec>, 31 | join_set: Option>>, 32 | } 33 | 34 | impl Default for ServiceGroup { 35 | fn default() -> Self { 36 | Self { 37 | services: vec![], 38 | join_set: Some(Default::default()), 39 | } 40 | } 41 | } 42 | 43 | impl ServiceGroup { 44 | pub fn new(services: Vec>) -> Self { 45 | Self { 46 | services, 47 | join_set: Some(Default::default()), 48 | } 49 | } 50 | 51 | /// Add a new service to the service group. 52 | pub fn push(&mut self, value: impl Service) { 53 | if self.join_set.is_none() { 54 | panic!("Cannot add services to a group that has been started.") 55 | } 56 | self.services.push(Box::new(value)); 57 | } 58 | 59 | pub fn with(mut self, value: impl Service) -> Self { 60 | self.push(value); 61 | self 62 | } 63 | } 64 | 65 | #[async_trait::async_trait] 66 | impl Service for ServiceGroup { 67 | async fn start(&mut self, join_set: &mut JoinSet>) -> anyhow::Result<()> { 68 | // drive the join set as a nested task 69 | let mut own_join_set = self 70 | .join_set 71 | .take() 72 | .expect("Service has already been started."); 73 | for svc in self.services.iter_mut() { 74 | svc.start(&mut own_join_set) 75 | .await 76 | .context("Starting service")?; 77 | } 78 | 79 | join_set.spawn(drive_joinset(own_join_set)); 80 | Ok(()) 81 | } 82 | } 83 | 84 | async fn drive_joinset(mut join_set: JoinSet>) -> anyhow::Result<()> { 85 | while let Some(result) = join_set.join_next().await { 86 | match result { 87 | Ok(result) => result?, 88 | Err(panic_error) if panic_error.is_panic() => { 89 | // bubble up panics too 90 | panic::resume_unwind(panic_error.into_panic()); 91 | } 92 | Err(_task_cancelled_error) => {} 93 | } 94 | } 95 | 96 | Ok(()) 97 | } 98 | -------------------------------------------------------------------------------- /src/cli/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod account; 2 | 3 | use std::{env, path::PathBuf}; 4 | use url::Url; 5 | 6 | use anyhow::{Result, anyhow}; 7 | use strum::Display; 8 | 9 | use account::AccountParams; 10 | 11 | use crate::config::LiquidationMode; 12 | 13 | fn parse_url(s: &str) -> Result { 14 | s.parse() 15 | .map_err(|_| anyhow!("Could not convert {s} to Url")) 16 | } 17 | 18 | #[derive(Clone, Debug, clap::Parser)] 19 | pub struct RunCmd { 20 | #[allow(missing_docs)] 21 | #[clap(flatten)] 22 | pub account_params: AccountParams, 23 | 24 | /// The network chain configuration. 25 | #[clap(long, short, value_name = "NETWORK NAME")] 26 | pub network: NetworkName, 27 | 28 | /// The rpc endpoint url. 29 | #[clap(long, value_parser = parse_url, value_name = "RPC URL")] 30 | pub rpc_url: Url, 31 | 32 | /// Configuration file path. 33 | #[clap(long, default_value = "config.yaml", value_name = "VESU CONFIG PATH")] 34 | pub config_path: Option, 35 | 36 | /// Configuration file path. 37 | #[clap(long, default_value = "data.json", value_name = "STORAGE PATH")] 38 | pub storage_path: Option, 39 | 40 | /// The block you want to start syncing from. 41 | #[clap(long, short, value_name = "BLOCK NUMBER")] 42 | pub starting_block: u64, 43 | 44 | /// Apibara API Key for indexing. 45 | #[clap(long, value_name = "APIBARA API KEY")] 46 | pub apibara_api_key: Option, 47 | 48 | /// Configuration file path. 49 | #[clap(long, value_enum, default_value_t = LiquidationMode::Full, value_name = "LIQUIDATION MODE")] 50 | pub liquidation_mode: LiquidationMode, 51 | } 52 | 53 | /// First blocks with Vesu activity. Not necessary to index before. 54 | const FIRST_MAINNET_BLOCK: u64 = 1439949; 55 | const FIRST_SEPOLIA_BLOCK: u64 = 77860; 56 | 57 | impl RunCmd { 58 | pub fn validate(&mut self) -> Result<()> { 59 | self.account_params.validate()?; 60 | if self.apibara_api_key.is_none() { 61 | self.apibara_api_key = env::var("APIBARA_API_KEY").ok(); 62 | } 63 | if self.apibara_api_key.is_none() { 64 | return Err(anyhow!( 65 | "Apibara API Key is missing. Please provide at least one via command line arguments or environment variable." 66 | )); 67 | } 68 | 69 | match self.network { 70 | NetworkName::Mainnet => { 71 | if self.starting_block <= FIRST_MAINNET_BLOCK { 72 | self.starting_block = FIRST_MAINNET_BLOCK; 73 | } 74 | } 75 | NetworkName::Sepolia => { 76 | if self.starting_block <= FIRST_SEPOLIA_BLOCK { 77 | self.starting_block = FIRST_SEPOLIA_BLOCK; 78 | } 79 | } 80 | } 81 | Ok(()) 82 | } 83 | } 84 | 85 | /// Starknet network name. 86 | #[derive(Debug, Clone, Copy, clap::ValueEnum, PartialEq, Display)] 87 | pub enum NetworkName { 88 | #[strum(serialize = "Mainnet")] 89 | #[value(alias("mainnet"))] 90 | Mainnet, 91 | #[strum(serialize = "Sepolia")] 92 | #[value(alias("sepolia"))] 93 | Sepolia, 94 | } 95 | -------------------------------------------------------------------------------- /src/storages/json.rs: -------------------------------------------------------------------------------- 1 | use std::{fs::File, io::Write, path::PathBuf}; 2 | 3 | use anyhow::Result; 4 | use dashmap::DashMap; 5 | use serde_json::Value; 6 | use std::collections::HashMap; 7 | 8 | use crate::types::position::{self, Position}; 9 | 10 | use super::{Storage, StoredData}; 11 | 12 | pub struct JsonStorage { 13 | file_path: PathBuf, 14 | data: StoredData, 15 | } 16 | 17 | impl JsonStorage { 18 | pub fn new(path: &str) -> Self { 19 | JsonStorage { 20 | file_path: PathBuf::from(path), 21 | data: StoredData::default(), 22 | } 23 | } 24 | } 25 | 26 | #[async_trait::async_trait] 27 | impl Storage for JsonStorage { 28 | async fn load(&mut self) -> Result<(u64, HashMap)> { 29 | if !self.file_path.exists() { 30 | self.data = StoredData::new(0, HashMap::new()); 31 | return Ok(self.data.as_tuple()); 32 | } 33 | let json_value: Value = serde_json::from_reader(File::open(self.file_path.clone())?)?; 34 | let last_block_indexed: u64 = match json_value.get("last_block_indexed") { 35 | Some(Value::Number(lbi)) => { 36 | if lbi.is_u64() { 37 | lbi.as_u64().unwrap() 38 | } else { 39 | 0_u64 40 | } 41 | } 42 | _ => 0_u64, 43 | }; 44 | // no need to go further if last block indexed is genesis 45 | if last_block_indexed == 0 { 46 | self.data = StoredData::new(0, HashMap::new()); 47 | return Ok(self.data.as_tuple()); 48 | } 49 | let positions: HashMap = match json_value.get("positions") { 50 | Some(Value::Object(map)) => map 51 | .iter() 52 | .filter_map(|(key, value)| { 53 | let key = key.parse::().ok()?; 54 | let position: Position = serde_json::from_value(value.clone()).ok()?; 55 | Some((key, position)) 56 | }) 57 | .collect(), 58 | _ => HashMap::new(), 59 | }; 60 | self.data = StoredData::new(last_block_indexed, positions); 61 | Ok(self.data.as_tuple()) 62 | } 63 | 64 | async fn save( 65 | &mut self, 66 | positions: &DashMap, 67 | last_block_indexed: u64, 68 | ) -> Result<()> { 69 | let file_path = self.file_path.clone(); 70 | // Convert DashMap to HashMap for serialization 71 | let positions_map: HashMap = positions 72 | .iter() 73 | .map(|entry| (*entry.key(), entry.value().clone())) 74 | .collect(); 75 | 76 | let map = StoredData { 77 | last_block_indexed, 78 | positions: positions_map, 79 | }; 80 | 81 | let json = serde_json::to_string_pretty(&map)?; 82 | let mut file = File::create(file_path)?; 83 | file.write_all(json.as_bytes())?; 84 | Ok(()) 85 | } 86 | 87 | fn get_positions(&self) -> HashMap { 88 | self.data.positions.clone() 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /docs/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | - Using welcoming and inclusive language 12 | - Being respectful of differing viewpoints and experiences 13 | - Gracefully accepting constructive criticism 14 | - Focusing on what is best for the community 15 | - Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | - The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | - Trolling, insulting/derogatory comments, and personal or political attacks 21 | - Public or private harassment 22 | - Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | - Other conduct that could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned with this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies within all project spaces, and it also applies when an individual is representing the project or its community in public spaces. Examples of representing a project or community include using an official project email address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project maintainer using any of the [private contact addresses](https://github.com/astraly-labs/Vesu-liquidator#Support). All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4, available at 44 | 45 | For answers to common questions about this code of conduct, see 46 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | vesu: 2 | mainnet: 3 | singleton_address: "0x000d8d6dfec4d33bfb6895de9f3852143a17c6f92fd2a21da3d6924d34870160" 4 | extension_address: "0x4e06e04b8d624d039aa1c3ca8e0aa9e21dc1ccba1d88d0d650837159e0ee054" 5 | liquidate_address: "0x58c80ed9801b32b441566d320ae236c73257981800dcda63c9f02dd154c3f39" 6 | pragma_oracle_address: "0x2a85bd616f912537c50a49a4076db02c00b29b2cdc8a197ce92ed1837fa875b" 7 | 8 | sepolia: 9 | singleton_address: "0x69d0eca40cb01eda7f3d76281ef524cecf8c35f4ca5acc862ff128e7432964b" 10 | extension_address: "0x18e0277fef34ae5687da68b7810a04230a45ff9686068868528d2e07fae705d" 11 | liquidate_address: "0x11cc615b361d445d07aac1f27882f1597ac0e02cec434d729510c2d02fdc883" 12 | pragma_oracle_address: "0x36031daa264c24520b11d93af622c848b2499b66b41d611bac95e13cfca131a" 13 | 14 | assets: 15 | - name: "ethereum" 16 | ticker: "ETH" 17 | decimals: 18 18 | mainnet_address: "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7" 19 | sepolia_address: "0x7809bb63f557736e49ff0ae4a64bd8aa6ea60e3f77f26c520cb92c24e3700d3" 20 | 21 | - name: "wrapped-bitcoin" 22 | ticker: "WBTC" 23 | decimals: 8 24 | mainnet_address: "0x03fe2b97c1fd336e750087d68b9b867997fd64a2661ff3ca5a7c771641e8e7ac" 25 | sepolia_address: "0x063d32a3fa6074e72e7a1e06fe78c46a0c8473217773e19f11d8c8cbfc4ff8ca" 26 | 27 | - name: "usd-coin" 28 | ticker: "USDC" 29 | decimals: 6 30 | mainnet_address: "0x053c91253bc9682c04929ca02ed00b3e423f6710d2ee7e0d5ebb06f3ecf368a8" 31 | sepolia_address: "0x027ef4670397069d7d5442cb7945b27338692de0d8896bdb15e6400cf5249f94" 32 | 33 | - name: "tether" 34 | ticker: "USDT" 35 | decimals: 6 36 | mainnet_address: "0x068f5c6a61780768455de69077e07e89787839bf8166decfbf92b645209c0fb8" 37 | sepolia_address: "0x002cd937c3dccd4a4e125011bbe3189a6db0419bb6dd95c4b5ce5f6d834d8996" 38 | 39 | - name: "wrapped-steth" 40 | ticker: "WSTETH" 41 | decimals: 18 42 | mainnet_address: "0x57912720381af14b0e5c87aa4718ed5e527eab60b3801ebf702ab09139e38b" 43 | sepolia_address: "0x057181b39020af1416747a7d0d2de6ad5a5b721183136585e8774e1425efd5d2" 44 | 45 | - name: "starknet" 46 | ticker: "STRK" 47 | decimals: 18 48 | mainnet_address: "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d" 49 | sepolia_address: "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d" 50 | 51 | - name: "Endur xSTRK" 52 | ticker: "XSTRK" 53 | decimals: 18 54 | mainnet_address: "0x28d709c875C0CEAc3dCE7065beC5328186Dc89FE254527084D1689910954B0a" 55 | sepolia_address: "0x28d709c875C0CEAc3dCE7065beC5328186Dc89FE254527084D1689910954B0a" 56 | 57 | - name: "Staked Starknet Token" 58 | ticker: "SSTRK" 59 | decimals: 18 60 | mainnet_address: "0x772131070c7d56f78f3e46b27b70271d8ca81c7c52e3f62aa868fab4b679e43" 61 | sepolia_address: "0x772131070c7d56f78f3e46b27b70271d8ca81c7c52e3f62aa868fab4b679e43" 62 | 63 | - name: "kSTRK Token" 64 | ticker: "KSTRK" 65 | decimals: 18 66 | mainnet_address: "0x45cd05ee2caaac3459b87e5e2480099d201be2f62243f839f00e10dde7f500c" 67 | sepolia_address: "0x45cd05ee2caaac3459b87e5e2480099d201be2f62243f839f00e10dde7f500c" 68 | 69 | - name: "Relend Network USDC" 70 | ticker: "rUSDC" 71 | decimals: 6 72 | mainnet_address: "0x2019e47a0bc54ea6b4853c6123ffc8158ea3ae2af4166928b0de6e89f06de6c" 73 | sepolia_address: "0x2019e47a0bc54ea6b4853c6123ffc8158ea3ae2af4166928b0de6e89f06de6c" 74 | 75 | - name: "Ekubo" 76 | ticker: "EKUBO" 77 | decimals: 18 78 | mainnet_address: "0x75afe6402ad5a5c20dd25e10ec3b3986acaa647b77e4ae24b0cbc9a54a27a87" 79 | sepolia_address: "0x75afe6402ad5a5c20dd25e10ec3b3986acaa647b77e4ae24b0cbc9a54a27a87" 80 | 81 | - name: "Staked Starknet Token" 82 | ticker: "sSTRK" 83 | decimals: 18 84 | mainnet_address: "0x356f304b154d29d2a8fe22f1cb9107a9b564a733cf6b4cc47fd121ac1af90c9" 85 | sepolia_address: "0x356f304b154d29d2a8fe22f1cb9107a9b564a733cf6b4cc47fd121ac1af90c9" 86 | -------------------------------------------------------------------------------- /.github/workflows/docker-build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Task - Build and Push Docker Image 3 | 4 | on: 5 | workflow_dispatch: 6 | inputs: 7 | release_tag_name: 8 | description: "Release tag name" 9 | type: string 10 | required: true 11 | docker_context: 12 | description: "Docker context" 13 | type: string 14 | required: true 15 | package_name: 16 | description: "Package name" 17 | type: string 18 | required: true 19 | workflow_call: 20 | inputs: 21 | release_tag_name: 22 | description: "Release tag name" 23 | type: string 24 | required: true 25 | docker_context: 26 | description: "Docker context" 27 | type: string 28 | required: true 29 | package_name: 30 | description: "Package name" 31 | type: string 32 | required: true 33 | 34 | jobs: 35 | build: 36 | runs-on: ubuntu-latest 37 | strategy: 38 | fail-fast: false 39 | matrix: 40 | platform: 41 | - linux/amd64 42 | - linux/arm64 43 | steps: 44 | - name: Checkout 45 | uses: actions/checkout@v3 46 | 47 | - name: Docker meta 48 | id: meta 49 | uses: docker/metadata-action@v4 50 | with: 51 | images: ${{ inputs.package_name }} 52 | tags: | 53 | type=raw,value=${{ inputs.release_tag_name }} 54 | 55 | - name: Set up QEMU 56 | uses: docker/setup-qemu-action@v2 57 | 58 | - name: Set up Docker Buildx 59 | uses: docker/setup-buildx-action@v2 60 | 61 | - name: Login to GitHub Container Registry 62 | uses: docker/login-action@v2 63 | with: 64 | registry: ghcr.io 65 | username: ${{ github.actor }} 66 | password: ${{ secrets.GITHUB_TOKEN }} 67 | 68 | - name: Build and push by digest 69 | id: build 70 | uses: docker/build-push-action@v4 71 | with: 72 | context: ${{ inputs.docker_context }} 73 | file: Dockerfile 74 | platforms: ${{ matrix.platform }} 75 | labels: ${{ steps.meta.outputs.labels }} 76 | outputs: type=image,name=${{ inputs.package_name }},push-by-digest=true,name-canonical=true,push=true 77 | 78 | - name: Export digest 79 | run: | 80 | mkdir -p /tmp/digests 81 | digest="${{ steps.build.outputs.digest }}" 82 | touch "/tmp/digests/${digest#sha256:}" 83 | 84 | - name: Upload digest 85 | uses: actions/upload-artifact@v4 86 | with: 87 | name: digests-${{ matrix.platform == 'linux/amd64' && 'linux-amd64' || 'linux-arm64' }} 88 | path: /tmp/digests/* 89 | if-no-files-found: error 90 | retention-days: 1 91 | 92 | merge: 93 | runs-on: ubuntu-latest 94 | needs: 95 | - build 96 | steps: 97 | - name: Download digests 98 | uses: actions/download-artifact@v4 99 | with: 100 | pattern: digests-* 101 | path: /tmp/digests 102 | merge-multiple: true 103 | 104 | - name: Set up Docker Buildx 105 | uses: docker/setup-buildx-action@v2 106 | 107 | - name: Docker meta 108 | id: meta 109 | uses: docker/metadata-action@v4 110 | with: 111 | images: ${{ inputs.package_name }} 112 | tags: | 113 | type=raw,value=${{ inputs.release_tag_name }} 114 | 115 | - name: Login to GitHub Container Registry 116 | uses: docker/login-action@v2 117 | with: 118 | registry: ghcr.io 119 | username: ${{ github.actor }} 120 | password: ${{ secrets.GITHUB_TOKEN }} 121 | 122 | - name: Create manifest list and push 123 | working-directory: /tmp/digests 124 | run: | 125 | tags=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< '${{ steps.meta.outputs.json }}') 126 | if [[ -z "$tags" ]]; then 127 | echo "No tags specified, skipping docker buildx imagetools create command" 128 | else 129 | docker buildx imagetools create $tags \ 130 | $(printf '${{ inputs.package_name }}@sha256:%s ' *) 131 | fi 132 | 133 | - name: Inspect image 134 | run: | 135 | docker buildx imagetools inspect ${{ inputs.package_name }}:${{ steps.meta.outputs.version }} 136 | -------------------------------------------------------------------------------- /src/types/account.rs: -------------------------------------------------------------------------------- 1 | use std::{path::PathBuf, sync::Arc}; 2 | 3 | use anyhow::Result; 4 | use bigdecimal::BigDecimal; 5 | use bigdecimal::num_bigint::ToBigInt; 6 | use starknet::{ 7 | accounts::{Account, ExecutionEncoding, SingleOwnerAccount}, 8 | core::{ 9 | chain_id, 10 | types::{BlockId, BlockTag, Call, Felt}, 11 | }, 12 | providers::{JsonRpcClient, jsonrpc::HttpTransport}, 13 | signers::{LocalWallet, SigningKey}, 14 | }; 15 | 16 | use crate::{ 17 | cli::{NetworkName, RunCmd}, 18 | utils::constants::VESU_RESPONSE_DECIMALS, 19 | }; 20 | 21 | pub struct StarknetAccount( 22 | pub Arc>, LocalWallet>>, 23 | ); 24 | 25 | impl StarknetAccount { 26 | /// Creates a StarknetAccount from the CLI args 27 | pub fn from_cli( 28 | rpc_client: Arc>, 29 | run_cmd: RunCmd, 30 | ) -> Result { 31 | let mut builder = StarknetAccountBuilder::default(); 32 | 33 | builder = match run_cmd.network { 34 | NetworkName::Mainnet => builder.on_mainnet(), 35 | NetworkName::Sepolia => builder.on_sepolia(), 36 | }; 37 | 38 | builder = builder 39 | .as_account(run_cmd.account_params.account_address) 40 | .with_provider(rpc_client); 41 | 42 | if let Some(private_key) = run_cmd.account_params.private_key { 43 | builder.from_secret(private_key) 44 | } else { 45 | builder.from_keystore( 46 | run_cmd.account_params.keystore_path.unwrap(), 47 | &run_cmd.account_params.keystore_password.unwrap(), 48 | ) 49 | } 50 | } 51 | 52 | /// Returns the account_address of the Account. 53 | pub fn account_address(&self) -> Felt { 54 | self.0.address() 55 | } 56 | 57 | /// Simulate a set of TXs and return the estimation of the fee necessary 58 | /// to execute them. 59 | pub async fn estimate_fees_cost(&self, txs: &[Call]) -> Result { 60 | let estimation = self.0.execute_v3(txs.to_vec()).estimate_fee().await?; 61 | Ok(BigDecimal::new( 62 | estimation.overall_fee.to_bigint().unwrap(), 63 | VESU_RESPONSE_DECIMALS, 64 | )) 65 | } 66 | 67 | /// Executes a set of transactions and returns the transaction hash. 68 | pub async fn execute_txs(&self, txs: &[Call]) -> Result { 69 | let res = self 70 | .0 71 | .execute_v3(txs.to_vec()) 72 | .send() 73 | .await 74 | .map_err(|e| anyhow::anyhow!(format!("{:?}", e)))?; 75 | Ok(res.transaction_hash) 76 | } 77 | } 78 | 79 | #[derive(Debug, Default)] 80 | pub struct StarknetAccountBuilder { 81 | account_address: Option, 82 | chain_id: Option, 83 | rpc_client: Option>>, 84 | } 85 | 86 | impl StarknetAccountBuilder { 87 | pub fn new() -> Self { 88 | StarknetAccountBuilder::default() 89 | } 90 | 91 | pub fn on_mainnet(mut self) -> Self { 92 | self.chain_id = Some(chain_id::MAINNET); 93 | self 94 | } 95 | 96 | pub fn on_sepolia(mut self) -> Self { 97 | self.chain_id = Some(chain_id::SEPOLIA); 98 | self 99 | } 100 | pub fn as_account(mut self, account_address: Felt) -> Self { 101 | self.account_address = Some(account_address); 102 | self 103 | } 104 | 105 | pub fn with_provider(mut self, rpc_client: Arc>) -> Self { 106 | self.rpc_client = Some(rpc_client); 107 | self 108 | } 109 | 110 | pub fn from_secret(self, private_key: Felt) -> Result { 111 | let signing_key = SigningKey::from_secret_scalar(private_key); 112 | let signer = LocalWallet::from(signing_key); 113 | self.build(signer) 114 | } 115 | 116 | pub fn from_keystore( 117 | self, 118 | keystore_path: PathBuf, 119 | keystore_password: &str, 120 | ) -> Result { 121 | let signing_key = SigningKey::from_keystore(keystore_path, keystore_password)?; 122 | let signer = LocalWallet::from(signing_key); 123 | self.build(signer) 124 | } 125 | 126 | fn build(self, signer: LocalWallet) -> Result { 127 | let mut account = SingleOwnerAccount::new( 128 | self.rpc_client.unwrap(), 129 | signer, 130 | self.account_address.unwrap(), 131 | self.chain_id.unwrap(), 132 | ExecutionEncoding::New, 133 | ); 134 | 135 | account.set_block_id(BlockId::Tag(BlockTag::PreConfirmed)); 136 | 137 | Ok(StarknetAccount(Arc::new(account))) 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /src/services/oracle.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use std::time::Duration; 3 | 4 | use anyhow::Result; 5 | use bigdecimal::BigDecimal; 6 | use dashmap::DashMap; 7 | use futures_util::future::join_all; 8 | use starknet::core::types::{BlockId, BlockTag, Felt, FunctionCall}; 9 | use starknet::core::utils::{cairo_short_string_to_felt, get_selector_from_name}; 10 | use starknet::providers::jsonrpc::HttpTransport; 11 | use starknet::providers::{JsonRpcClient, Provider}; 12 | use tokio::task::JoinSet; 13 | 14 | use crate::config::Config; 15 | use crate::utils::conversions::hex_str_to_big_decimal; 16 | use crate::utils::services::Service; 17 | 18 | const LST_ASSETS: [&str; 3] = ["xstrk", "sstrk", "kstrk"]; 19 | 20 | /// Aggregations possible using the Pragma Oracle contract. 21 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 22 | pub enum AggregationMode { 23 | Median, 24 | Mean, 25 | ConversionRate, 26 | } 27 | 28 | impl AggregationMode { 29 | pub fn to_felt(&self) -> Felt { 30 | match self { 31 | AggregationMode::Median => Felt::ZERO, 32 | AggregationMode::Mean => Felt::ONE, 33 | AggregationMode::ConversionRate => Felt::TWO, 34 | } 35 | } 36 | } 37 | 38 | /// Map contaning the price in dollars for a list of monitored assets. 39 | #[derive(Default, Clone)] 40 | pub struct LatestOraclePrices(pub Arc>); 41 | 42 | impl LatestOraclePrices { 43 | pub fn from_config(config: &Config) -> Self { 44 | let prices = DashMap::new(); 45 | for asset in config.assets.iter() { 46 | prices.insert(asset.ticker.to_lowercase(), BigDecimal::default()); 47 | } 48 | LatestOraclePrices(Arc::new(prices)) 49 | } 50 | } 51 | 52 | #[derive(Clone)] 53 | pub struct OracleService { 54 | pragma_address: Felt, 55 | rpc_client: Arc>, 56 | latest_prices: LatestOraclePrices, 57 | } 58 | 59 | #[async_trait::async_trait] 60 | impl Service for OracleService { 61 | async fn start(&mut self, join_set: &mut JoinSet>) -> anyhow::Result<()> { 62 | let service = self.clone(); 63 | join_set.spawn(async move { 64 | tracing::info!("🔮 Oracle service started"); 65 | service.run_forever().await?; 66 | Ok(()) 67 | }); 68 | Ok(()) 69 | } 70 | } 71 | 72 | impl OracleService { 73 | pub fn new( 74 | pragma_address: Felt, 75 | rpc_client: Arc>, 76 | latest_prices: LatestOraclePrices, 77 | ) -> Self { 78 | Self { 79 | pragma_address, 80 | rpc_client, 81 | latest_prices, 82 | } 83 | } 84 | 85 | /// Starts the oracle service that will fetch the latest oracle prices every 86 | /// PRICES_UPDATE_INTERVAL seconds. 87 | pub async fn run_forever(self) -> Result<()> { 88 | const PRICES_UPDATE_INTERVAL: u64 = 3; 89 | let sleep_duration = Duration::from_secs(PRICES_UPDATE_INTERVAL); 90 | loop { 91 | self.update_prices().await?; 92 | tokio::time::sleep(sleep_duration).await; 93 | } 94 | } 95 | 96 | /// Update all the monitored assets with their latest USD price asynchronously. 97 | async fn update_prices(&self) -> Result<()> { 98 | let assets: Vec = self 99 | .latest_prices 100 | .0 101 | .iter() 102 | .map(|entry| entry.key().clone()) 103 | .collect(); 104 | 105 | let fetch_tasks = assets.into_iter().map(|asset| async move { 106 | let price = self.get_price_in_dollars(&asset).await; 107 | (asset, price) 108 | }); 109 | 110 | let results = join_all(fetch_tasks).await; 111 | 112 | for (asset, price_result) in results { 113 | if let Ok(price) = price_result { 114 | self.latest_prices.0.insert(asset, price); 115 | } 116 | } 117 | 118 | Ok(()) 119 | } 120 | 121 | async fn get_price_in_dollars(&self, base_asset: &str) -> Result { 122 | let pair = format!("{}/USD", base_asset.to_ascii_uppercase()); 123 | 124 | let aggregation_mode = if LST_ASSETS.contains(&base_asset) { 125 | AggregationMode::ConversionRate 126 | } else { 127 | AggregationMode::Median 128 | }; 129 | 130 | let price_request = FunctionCall { 131 | contract_address: self.pragma_address, 132 | entry_point_selector: get_selector_from_name("get_data")?, 133 | calldata: vec![ 134 | Felt::ZERO, 135 | cairo_short_string_to_felt(&pair)?, 136 | aggregation_mode.to_felt(), 137 | ], 138 | }; 139 | 140 | let call_result = self 141 | .rpc_client 142 | .call(price_request, BlockId::Tag(BlockTag::PreConfirmed)) 143 | .await?; 144 | 145 | let asset_price = hex_str_to_big_decimal( 146 | &call_result[0].to_hex_string(), 147 | call_result[1].to_bigint().try_into()?, 148 | ); 149 | 150 | Ok(asset_price) 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::{collections::HashMap, path::PathBuf}; 3 | 4 | use anyhow::Result; 5 | use clap::ValueEnum; 6 | use lazy_static::lazy_static; 7 | use serde::{Deserialize, Serialize}; 8 | use starknet::core::types::Felt; 9 | use starknet::core::utils::get_selector_from_name; 10 | 11 | use crate::cli::{NetworkName, RunCmd}; 12 | 13 | // Contract selectors 14 | lazy_static! { 15 | pub static ref MODIFY_POSITION_EVENT: Felt = get_selector_from_name("ModifyPosition").unwrap(); 16 | pub static ref MIGRATE_POSITION_EVENT: Felt = 17 | get_selector_from_name("MigratePosition").unwrap(); 18 | pub static ref VESU_POSITION_UNSAFE_SELECTOR: Felt = 19 | get_selector_from_name("position_unsafe").unwrap(); 20 | pub static ref VESU_LTV_CONFIG_SELECTOR: Felt = get_selector_from_name("ltv_config").unwrap(); 21 | pub static ref FLASH_LOAN_SELECTOR: Felt = get_selector_from_name("flash_loan").unwrap(); 22 | pub static ref LIQUIDATE_SELECTOR: Felt = get_selector_from_name("liquidate_position").unwrap(); 23 | pub static ref LIQUIDATION_CONFIG_SELECTOR: Felt = 24 | get_selector_from_name("liquidation_config").unwrap(); 25 | } 26 | 27 | #[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] 28 | pub enum LiquidationMode { 29 | Full, 30 | Partial, 31 | } 32 | 33 | impl LiquidationMode { 34 | pub fn as_bool(&self) -> bool { 35 | match self { 36 | LiquidationMode::Full => true, 37 | LiquidationMode::Partial => false, 38 | } 39 | } 40 | } 41 | 42 | #[derive(Debug, Clone)] 43 | pub struct Config { 44 | pub network: NetworkName, 45 | pub singleton_address: Felt, 46 | pub extension_address: Felt, 47 | pub liquidate_address: Felt, 48 | pub pragma_oracle_address: Felt, 49 | pub assets: Vec, 50 | pub asset_map: HashMap, 51 | pub liquidation_mode: LiquidationMode, 52 | } 53 | 54 | impl Config { 55 | pub fn from_cli(run_cmd: &RunCmd) -> Result { 56 | let config_path = run_cmd.config_path.clone().unwrap_or_default(); 57 | let network = run_cmd.network; 58 | let liquidation_mode = run_cmd.liquidation_mode; 59 | 60 | Self::new(network, liquidation_mode, &config_path) 61 | } 62 | 63 | pub fn new( 64 | network: NetworkName, 65 | liquidation_mode: LiquidationMode, 66 | config_path: &PathBuf, 67 | ) -> Result { 68 | let raw_config: RawConfig = { 69 | let config_str = fs::read_to_string(config_path)?; 70 | serde_yaml::from_str(&config_str)? 71 | }; 72 | 73 | let network_config = match network { 74 | NetworkName::Mainnet => &raw_config.vesu.mainnet, 75 | NetworkName::Sepolia => &raw_config.vesu.sepolia, 76 | }; 77 | 78 | let singleton_address = Felt::from_hex(&network_config.singleton_address)?; 79 | let extension_address = Felt::from_hex(&network_config.extension_address)?; 80 | let liquidate_address = Felt::from_hex(&network_config.liquidate_address)?; 81 | let pragma_oracle_address = Felt::from_hex(&network_config.pragma_oracle_address)?; 82 | 83 | let assets = raw_config.assets; 84 | let asset_map = assets 85 | .iter() 86 | .filter_map(|asset| { 87 | let address = match network { 88 | NetworkName::Mainnet => Felt::from_hex(&asset.mainnet_address), 89 | NetworkName::Sepolia => Felt::from_hex(&asset.sepolia_address), 90 | }; 91 | address.ok().map(|addr| (addr, asset.clone())) 92 | }) 93 | .collect(); 94 | 95 | let config = Config { 96 | network, 97 | singleton_address, 98 | extension_address, 99 | liquidate_address, 100 | pragma_oracle_address, 101 | assets, 102 | asset_map, 103 | liquidation_mode, 104 | }; 105 | 106 | Ok(config) 107 | } 108 | 109 | pub fn get_asset_ticker_for_address(&self, address: &Felt) -> Option { 110 | self.asset_map 111 | .get(address) 112 | .map(|asset| asset.ticker.clone()) 113 | } 114 | 115 | pub fn get_decimal_for_address(&self, address: &Felt) -> Option { 116 | self.asset_map.get(address).map(|asset| asset.decimals) 117 | } 118 | } 119 | 120 | // Below are the structs that represents the raw config extracted from the yaml file. 121 | 122 | #[derive(Debug, Deserialize, Serialize)] 123 | pub struct RawConfig { 124 | pub vesu: VesuConfig, 125 | pub assets: Vec, 126 | } 127 | 128 | #[derive(Debug, Deserialize, Serialize)] 129 | pub struct VesuConfig { 130 | pub mainnet: NetworkConfig, 131 | pub sepolia: NetworkConfig, 132 | } 133 | 134 | #[derive(Debug, Deserialize, Serialize)] 135 | pub struct NetworkConfig { 136 | pub singleton_address: String, 137 | pub extension_address: String, 138 | pub liquidate_address: String, 139 | pub pragma_oracle_address: String, 140 | } 141 | 142 | #[derive(Debug, Deserialize, Serialize, Clone)] 143 | pub struct Asset { 144 | pub name: String, 145 | pub ticker: String, 146 | pub decimals: i64, 147 | pub mainnet_address: String, 148 | pub sepolia_address: String, 149 | } 150 | -------------------------------------------------------------------------------- /src/utils/ekubo.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Context, Result}; 2 | use bigdecimal::BigDecimal; 3 | use cainome::cairo_serde::{ContractAddress, U256}; 4 | use serde_json::Value; 5 | use starknet::core::types::Felt; 6 | 7 | use crate::{ 8 | bindings::liquidate::{PoolKey, RouteNode, Swap, TokenAmount}, 9 | utils::constants::I129_ZERO, 10 | }; 11 | 12 | const EKUBO_QUOTE_ENDPOINT: &str = "https://quoter-mainnet-api.ekubo.org"; 13 | const SCALE: u128 = 1_000_000_000_000_000_000; 14 | 15 | pub async fn get_ekubo_route( 16 | http_client: &reqwest::Client, 17 | from_token: Felt, 18 | to_token: Felt, 19 | amount: &BigDecimal, 20 | ) -> Result<(Vec, Vec)> { 21 | let (scaled_amount, _) = amount.as_bigint_and_exponent(); 22 | 23 | let ekubo_api_endpoint = format!( 24 | "{}/-{}/{}/{}", 25 | EKUBO_QUOTE_ENDPOINT, 26 | scaled_amount, 27 | from_token.to_fixed_hex_string(), 28 | to_token.to_fixed_hex_string() 29 | ); 30 | 31 | let response = http_client.get(ekubo_api_endpoint).send().await?; 32 | 33 | if !response.status().is_success() { 34 | anyhow::bail!("API request failed with status: {}", response.status()); 35 | } 36 | 37 | let response_text = response.text().await?; 38 | let json_value: Value = serde_json::from_str(&response_text)?; 39 | 40 | let splits = json_value["splits"] 41 | .as_array() 42 | .context("'splits' is not an array")?; 43 | 44 | if splits.is_empty() { 45 | anyhow::bail!("No splits returned from Ekubo API"); 46 | } 47 | 48 | // Handle single split case (100% weight) 49 | if splits.len() == 1 { 50 | let route = parse_route(&splits[0])?; 51 | return Ok(( 52 | vec![Swap { 53 | route, 54 | token_amount: TokenAmount { 55 | token: ContractAddress(from_token), 56 | amount: I129_ZERO, 57 | }, 58 | }], 59 | vec![SCALE], // Single weight of 100% 60 | )); 61 | } 62 | 63 | // Calculate total amount for weight calculation 64 | let total_amount: i128 = splits 65 | .iter() 66 | .map(|split| { 67 | split["amount_specified"] 68 | .as_str() 69 | .unwrap_or("0") 70 | .parse::() 71 | .unwrap_or(0) 72 | }) 73 | .sum(); 74 | 75 | let mut swaps = Vec::with_capacity(splits.len()); 76 | let mut weights = Vec::with_capacity(splits.len()); 77 | let mut running_weight_sum: u128 = 0; 78 | 79 | // Process all splits except the last one 80 | for split in splits.iter().take(splits.len() - 1) { 81 | let split_amount = split["amount_specified"] 82 | .as_str() 83 | .context("amount_specified is not a string")? 84 | .parse::()?; 85 | 86 | let weight = (split_amount.unsigned_abs() * SCALE) / (total_amount.unsigned_abs()); 87 | running_weight_sum += weight; 88 | weights.push(weight); 89 | 90 | let route = parse_route(split)?; 91 | swaps.push(Swap { 92 | route, 93 | token_amount: TokenAmount { 94 | token: ContractAddress(from_token), 95 | amount: I129_ZERO, 96 | }, 97 | }); 98 | } 99 | 100 | // Handle the last split - ensure exact SCALE total 101 | let last_split = splits.last().unwrap(); 102 | let last_weight = SCALE - running_weight_sum; 103 | weights.push(last_weight); 104 | 105 | let route = parse_route(last_split)?; 106 | swaps.push(Swap { 107 | route, 108 | token_amount: TokenAmount { 109 | token: ContractAddress(from_token), 110 | amount: I129_ZERO, 111 | }, 112 | }); 113 | 114 | // Verify total is exactly SCALE 115 | let total_weight: u128 = weights.iter().sum(); 116 | assert!(total_weight == SCALE, "Weights do not sum to SCALE"); 117 | 118 | Ok((swaps, weights)) 119 | } 120 | 121 | fn parse_route(split: &Value) -> Result> { 122 | split["route"] 123 | .as_array() 124 | .context("'route' is not an array")? 125 | .iter() 126 | .map(|node| { 127 | let pool_key = &node["pool_key"]; 128 | let sqrt_ratio_limit = node["sqrt_ratio_limit"] 129 | .as_str() 130 | .context("sqrt_ratio_limit is not a string")?; 131 | 132 | let sqrt_ratio = U256::from_bytes_be(&Felt::from_hex(sqrt_ratio_limit)?.to_bytes_be()); 133 | 134 | Ok(RouteNode { 135 | pool_key: PoolKey { 136 | token0: ContractAddress(Felt::from_hex( 137 | pool_key["token0"] 138 | .as_str() 139 | .context("token0 is not a string")?, 140 | )?), 141 | token1: ContractAddress(Felt::from_hex( 142 | pool_key["token1"] 143 | .as_str() 144 | .context("token1 is not a string")?, 145 | )?), 146 | fee: u128::from_str_radix( 147 | pool_key["fee"] 148 | .as_str() 149 | .context("fee is not a string")? 150 | .trim_start_matches("0x"), 151 | 16, 152 | ) 153 | .context("Failed to parse fee as u128")?, 154 | tick_spacing: pool_key["tick_spacing"] 155 | .as_u64() 156 | .context("tick_spacing is not a u64")? 157 | as u128, 158 | extension: ContractAddress(Felt::from_hex( 159 | pool_key["extension"] 160 | .as_str() 161 | .context("extension is not a string")?, 162 | )?), 163 | }, 164 | sqrt_ratio_limit: sqrt_ratio, 165 | skip_ahead: node["skip_ahead"] 166 | .as_u64() 167 | .context("skip_ahead is not a u64")? as u128, 168 | }) 169 | }) 170 | .collect() 171 | } 172 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 |

Vesu Liquidator

3 | 4 |
5 | Report a Bug 6 | - 7 | Request a Feature 8 |
9 | 10 | ## About 11 | 12 | Vesu Liquidator 🤖 is an automated bot that monitors positions on the Vesu Protocol and liquidates them. 13 | 14 | ## Getting Started 15 | 16 | ### Docker through published package 17 | 18 | You can run the Vesu Liquidator using our pre-built Docker image. Here's how to use it: 19 | 20 | 1. Pull the latest image: 21 | 22 | ```sh 23 | docker pull ghcr.io/astraly-labs/vesu-liquidator:latest 24 | ``` 25 | 26 | 1. Run the container: 27 | 28 | ```sh 29 | docker run --rm -it \ 30 | -v /path/to/your/.env:/app/.env \ 31 | ghcr.io/astraly-labs/vesu-liquidator:latest \ 32 | --account-address \ 33 | --network \ 34 | --rpc-url \ 35 | --starting-block \ 36 | --storage-path ./vesu.json 37 | ``` 38 | 39 | For more options, run: 40 | 41 | ```bash 42 | docker run --rm ghcr.io/astraly-labs/vesu-liquidator:latest --help 43 | ``` 44 | 45 | ### Docker locally 46 | 47 | If you want to build the Docker image locally: 48 | 49 | 1. Build the Docker image: 50 | 51 | ```sh 52 | docker build -t vesu-liquidator . 53 | ``` 54 | 55 | 2. Run the locally built image: 56 | 57 | ```sh 58 | docker run --rm vesu-liquidator --help 59 | # OR 60 | docker run --rm -it \ 61 | # Optional .env, can also be provided through CLI 62 | -v /path/to/your/.env:/app/.env \ 63 | vesu-liquidator \ 64 | --account-address \ 65 | --network \ 66 | --rpc-url \ 67 | --starting-block \ 68 | --storage-path ./vesu.json # persistent indexer storage into a file 69 | ``` 70 | 71 | ### Prerequisites 72 | 73 | #### Protobuf 74 | 75 | In order to run the liquidator, you need the protoc Protocol Buffers compiler, along with Protocol Buffers resource files. 76 | 77 | ##### Ubuntu 78 | 79 | ```sh 80 | sudo apt update && sudo apt upgrade -y 81 | sudo apt install -y protobuf-compiler libprotobuf-dev 82 | ``` 83 | 84 | ##### macOS 85 | 86 | Assuming Homebrew is already installed. 87 | 88 | ```sh 89 | brew install protobuf 90 | ``` 91 | 92 | #### Environment Variables 93 | 94 | Create an `.env` file following the example file and fill the keys. 95 | 96 | ## Usage 97 | 98 | ### Build 99 | 100 | ```sh 101 | cargo build --release 102 | ``` 103 | 104 | The executable can be found at `./target/release/vesu-liquidator`. 105 | 106 | ### Run 107 | 108 | You can run `vesu-liquidator --help` - which will show how to use the bot: 109 | 110 | ```bash 111 | Usage: vesu-liquidator [OPTIONS] --account-address --network --rpc-url --starting-block --pragma-api-base-url 112 | 113 | Options: 114 | --account-address 115 | Account address of the liquidator account 116 | 117 | --private-key 118 | Private key of the liquidator account 119 | 120 | --keystore-path 121 | Keystore path for the liquidator account 122 | 123 | --keystore-password 124 | Keystore password for the liquidator account 125 | 126 | -n, --network 127 | The network chain configuration [possible values: mainnet, sepolia] 128 | 129 | --rpc-url 130 | The rpc endpoint url 131 | 132 | --config-path 133 | Configuration file path [default: config.yaml] 134 | 135 | -s, --starting-block 136 | The block you want to start syncing from 137 | 138 | --apibara-api-key 139 | Apibara API Key for indexing 140 | 141 | -h, --help 142 | Print help 143 | ``` 144 | 145 | #### Example: running the bot on Mainnet 146 | 147 | ```bash 148 | ./target/release/vesu-liquidator --network mainnet --rpc-url https://starknet-mainnet.public.blastapi.io --starting-block 668886 --pragma-api-base-url https://api.dev.pragma.build --account-address --private-key 149 | ``` 150 | 151 | Should run the bot: 152 | 153 | ```bash 154 | 155 | ██╗ ██╗███████╗███████╗██╗ ██╗ ██╗ ██╗ ██████╗ ██╗ ██╗██╗██████╗ █████╗ ████████╗ ██████╗ ██████╗ 156 | ██║ ██║██╔════╝██╔════╝██║ ██║ ██║ ██║██╔═══██╗██║ ██║██║██╔══██╗██╔══██╗╚══██╔══╝██╔═══██╗██╔══██╗ 157 | ██║ ██║█████╗ ███████╗██║ ██║ ██║ ██║██║ ██║██║ ██║██║██║ ██║███████║ ██║ ██║ ██║██████╔╝ 158 | ╚██╗ ██╔╝██╔══╝ ╚════██║██║ ██║ ██║ ██║██║▄▄ ██║██║ ██║██║██║ ██║██╔══██║ ██║ ██║ ██║██╔══██╗ 159 | ╚████╔╝ ███████╗███████║╚██████╔╝ ███████╗██║╚██████╔╝╚██████╔╝██║██████╔╝██║ ██║ ██║ ╚██████╔╝██║ ██║ 160 | ╚═══╝ ╚══════╝╚══════╝ ╚═════╝ ╚══════╝╚═╝ ╚══▀▀═╝ ╚═════╝ ╚═╝╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ 161 | 162 | 🤖 Liquidator 👉 0x42...6c 163 | 🎯 On Mainnet 164 | 🥡 Starting from block 668886 165 | 166 | 167 | 2024-08-23T05:29:06.808482Z INFO 🧩 Starting the indexer service... 168 | 2024-08-23T05:29:06.808583Z INFO ⏳ Waiting a few moment for the indexer to fetch positions... 169 | 170 | 2024-08-23T05:29:07.916084Z INFO [🔍 Indexer] Found new position 0x156fa1e95830c415 171 | 2024-08-23T05:29:16.809509Z INFO 🧩 Starting the oracle service... 172 | 2024-08-23T05:29:16.833518Z INFO 🧩 Starting the monitoring service... 173 | 174 | 2024-08-23T05:29:16.833561Z INFO [🔮 Oracle] Fetching latest prices... 175 | 2024-08-23T05:29:16.833667Z INFO [🔭 Monitoring] Checking if any position is liquidable... 176 | 2024-08-23T05:29:20.176390Z INFO [🔮 Oracle] ✅ Fetched all new prices 177 | 2024-08-23T05:29:20.177651Z INFO [🔭 Monitoring] 🤨 They're good.. for now... 178 | 179 | #... 180 | ``` 181 | 182 | ## Project assistance 183 | 184 | If you want to say **thank you** or/and support: 185 | 186 | - Add a [GitHub Star](https://github.com/astraly-labs/Vesu-liquidator) to the project. 187 | - Tweet about it. 188 | - Write interesting articles about the project on [Dev.to](https://dev.to/), [Medium](https://medium.com/) or your personal blog. 189 | 190 | ## Contributing 191 | 192 | First off, thanks for taking the time to contribute! Contributions are what make the open-source community such an amazing place to learn, inspire, and create. Any contributions you make will benefit everybody else and are **greatly appreciated**. 193 | 194 | Please read [our contribution guidelines](docs/CONTRIBUTING.md), and thank you for being involved! 195 | 196 | ## Security 197 | 198 | We follows good practices of security, but 100% security cannot be assured. 199 | The bot is provided **"as is"** without any **warranty**. Use at your own risk. 200 | 201 | _For more information and to report security issues, please refer to our [security documentation](docs/SECURITY.md)._ 202 | 203 | ## License 204 | 205 | This project is licensed under the **MIT license**. 206 | 207 | See [LICENSE](LICENSE) for more information. 208 | -------------------------------------------------------------------------------- /src/services/monitoring.rs: -------------------------------------------------------------------------------- 1 | use std::{sync::Arc, time::Duration}; 2 | 3 | use anyhow::{Result, anyhow}; 4 | use futures_util::lock::Mutex; 5 | use starknet::providers::{JsonRpcClient, jsonrpc::HttpTransport}; 6 | use tokio::task::JoinSet; 7 | use tokio::{ 8 | sync::mpsc::UnboundedReceiver, 9 | time::{interval, sleep}, 10 | }; 11 | 12 | use crate::bindings::liquidate::Liquidate; 13 | use crate::types::StarknetSingleOwnerAccount; 14 | use crate::{ 15 | config::Config, 16 | services::oracle::LatestOraclePrices, 17 | storages::Storage, 18 | types::{ 19 | account::StarknetAccount, 20 | position::{Position, PositionsMap}, 21 | }, 22 | utils::{services::Service, wait_for_tx}, 23 | }; 24 | 25 | #[derive(Clone)] 26 | pub struct MonitoringService { 27 | liquidate_contract: Arc>, 28 | config: Config, 29 | rpc_client: Arc>, 30 | account: Arc, 31 | positions_receiver: Arc>>, 32 | positions: PositionsMap, 33 | latest_oracle_prices: LatestOraclePrices, 34 | storage: Arc>>, 35 | http_client: reqwest::Client, 36 | } 37 | 38 | #[async_trait::async_trait] 39 | impl Service for MonitoringService { 40 | async fn start(&mut self, join_set: &mut JoinSet>) -> anyhow::Result<()> { 41 | let service = self.clone(); 42 | // We wait a few seconds before starting the monitoring service to be sure that we have prices 43 | // + indexed a few positions. 44 | sleep(Duration::from_secs(4)).await; 45 | join_set.spawn(async move { 46 | tracing::info!("🔭 Monitoring service started"); 47 | service.run_forever().await?; 48 | Ok(()) 49 | }); 50 | Ok(()) 51 | } 52 | } 53 | 54 | impl MonitoringService { 55 | pub fn new( 56 | config: Config, 57 | rpc_client: Arc>, 58 | account: StarknetAccount, 59 | positions_receiver: UnboundedReceiver<(u64, Position)>, 60 | latest_oracle_prices: LatestOraclePrices, 61 | storage: Box, 62 | ) -> MonitoringService { 63 | MonitoringService { 64 | liquidate_contract: Arc::new(Liquidate::new( 65 | config.liquidate_address, 66 | account.0.clone(), 67 | )), 68 | config, 69 | rpc_client, 70 | account: Arc::new(account), 71 | positions_receiver: Arc::new(Mutex::new(positions_receiver)), 72 | positions: PositionsMap::from_storage(storage.as_ref()), 73 | latest_oracle_prices, 74 | storage: Arc::new(Mutex::new(storage)), 75 | http_client: reqwest::Client::new(), 76 | } 77 | } 78 | 79 | /// Starts the monitoring service. 80 | pub async fn run_forever(&self) -> Result<()> { 81 | const CHECK_POSITIONS_INTERVAL: u64 = 3500; 82 | let mut update_interval = interval(Duration::from_millis(CHECK_POSITIONS_INTERVAL)); 83 | 84 | loop { 85 | let mut receiver = self.positions_receiver.lock().await; 86 | 87 | tokio::select! { 88 | _ = update_interval.tick() => { 89 | drop(receiver); 90 | self.monitor_positions_liquidability().await?; 91 | } 92 | 93 | maybe_position = receiver.recv() => { 94 | drop(receiver); 95 | match maybe_position { 96 | Some((block_number, mut new_position)) => { 97 | new_position 98 | .update(&self.rpc_client, &self.config.singleton_address) 99 | .await?; 100 | if new_position.is_closed() { 101 | continue; 102 | } 103 | self.positions.0.insert(new_position.key(), new_position); 104 | self.storage.lock().await.save(&self.positions.0, block_number).await?; 105 | } 106 | None => { 107 | return Err(anyhow!("Monitoring stopped unexpectedly")); 108 | } 109 | } 110 | } 111 | } 112 | } 113 | } 114 | 115 | /// Update all monitored positions and check if it's worth to liquidate any. 116 | async fn monitor_positions_liquidability(&self) -> Result<()> { 117 | if self.positions.0.is_empty() { 118 | return Ok(()); 119 | } 120 | 121 | let position_keys: Vec = self.positions.0.iter().map(|entry| *entry.key()).collect(); 122 | let mut positions_to_delete = vec![]; 123 | 124 | for key in position_keys { 125 | if let Some(mut entry) = self.positions.0.get_mut(&key) { 126 | let position = entry.value_mut(); 127 | 128 | if !position.is_liquidable(&self.latest_oracle_prices).await? { 129 | continue; 130 | } 131 | tracing::info!( 132 | "[🔭 Monitoring] Liquidatable position found #{}!", 133 | position.key() 134 | ); 135 | 136 | tracing::info!("[🔭 Monitoring] 🔫 Liquidating position..."); 137 | if let Err(e) = self.liquidate_position(position).await { 138 | if e.to_string().contains("not-undercollateralized") { 139 | tracing::warn!("[🔭 Monitoring] Position was not under collateralized!"); 140 | positions_to_delete.push(key); 141 | continue; 142 | } else { 143 | tracing::error!( 144 | error = %e, 145 | "[🔭 Monitoring] 😨 Could not liquidate position #{:x}", 146 | position.key(), 147 | ); 148 | } 149 | } 150 | 151 | position 152 | .update(&self.rpc_client, &self.config.singleton_address) 153 | .await?; 154 | } 155 | } 156 | 157 | for to_delete in positions_to_delete { 158 | self.positions.0.remove(&to_delete); 159 | } 160 | 161 | Ok(()) 162 | } 163 | 164 | /// Check if a position is liquidable, computes the profitability and if it's worth it 165 | /// liquidate it. 166 | async fn liquidate_position(&self, position: &Position) -> Result<()> { 167 | let started_at = std::time::Instant::now(); 168 | let liquidation_tx = position 169 | .get_vesu_liquidate_tx( 170 | &self.liquidate_contract, 171 | &self.http_client, 172 | &self.account.account_address(), 173 | ) 174 | .await?; 175 | let tx_hash = self.account.execute_txs(&[liquidation_tx]).await?; 176 | wait_for_tx(&self.rpc_client, tx_hash).await?; 177 | tracing::info!( 178 | "[🔭 Monitoring] ✅ Liquidated position #{}! (tx {tx_hash:#064x}) - ⌛ {:?}", 179 | position.key(), 180 | started_at.elapsed() 181 | ); 182 | Ok(()) 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /src/services/indexer.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use apibara_core::starknet::v1alpha2::Event; 3 | use apibara_core::{ 4 | node::v1alpha2::DataFinality, 5 | starknet::v1alpha2::{Block, Filter, HeaderFilter}, 6 | }; 7 | use apibara_sdk::{ClientBuilder, Configuration, Uri, configuration}; 8 | use dashmap::DashSet; 9 | use futures_util::TryStreamExt; 10 | use starknet::core::types::Felt; 11 | use tokio::sync::mpsc::UnboundedSender; 12 | use tokio::task::JoinSet; 13 | 14 | use crate::cli::NetworkName; 15 | use crate::config::{Config, MIGRATE_POSITION_EVENT, MODIFY_POSITION_EVENT}; 16 | use crate::utils::services::Service; 17 | use crate::{ 18 | types::position::Position, 19 | utils::conversions::{apibara_field_as_felt, felt_as_apibara_field}, 20 | }; 21 | 22 | const INDEXING_STREAM_CHUNK_SIZE: usize = 1; 23 | 24 | #[derive(Clone)] 25 | pub struct IndexerService { 26 | config: Config, 27 | uri: Uri, 28 | apibara_api_key: String, 29 | stream_config: Configuration, 30 | positions_sender: UnboundedSender<(u64, Position)>, 31 | seen_positions: DashSet, 32 | } 33 | 34 | #[async_trait::async_trait] 35 | impl Service for IndexerService { 36 | async fn start(&mut self, join_set: &mut JoinSet>) -> anyhow::Result<()> { 37 | let service = self.clone(); 38 | join_set.spawn(async move { 39 | tracing::info!("🔍 Indexer service started"); 40 | service.run_forever().await?; 41 | Ok(()) 42 | }); 43 | Ok(()) 44 | } 45 | } 46 | 47 | impl IndexerService { 48 | pub fn new( 49 | config: Config, 50 | apibara_api_key: String, 51 | positions_sender: UnboundedSender<(u64, Position)>, 52 | from_block: u64, 53 | ) -> IndexerService { 54 | let uri = match config.network { 55 | NetworkName::Mainnet => Uri::from_static("https://mainnet.starknet.a5a.ch"), 56 | NetworkName::Sepolia => Uri::from_static("https://sepolia.starknet.a5a.ch"), 57 | }; 58 | 59 | let stream_config = Configuration::::default() 60 | .with_starting_block(from_block) 61 | .with_finality(DataFinality::DataStatusPending) 62 | .with_filter(|mut filter| { 63 | filter 64 | .with_header(HeaderFilter::weak()) 65 | .add_event(|event| { 66 | event 67 | .with_from_address(felt_as_apibara_field(&config.singleton_address)) 68 | .with_keys(vec![felt_as_apibara_field(&MODIFY_POSITION_EVENT)]) 69 | }) 70 | .add_event(|event| { 71 | event 72 | .with_from_address(felt_as_apibara_field(&config.singleton_address)) 73 | .with_keys(vec![felt_as_apibara_field(&MIGRATE_POSITION_EVENT)]) 74 | }) 75 | .build() 76 | }); 77 | 78 | IndexerService { 79 | config, 80 | uri, 81 | apibara_api_key, 82 | stream_config, 83 | positions_sender, 84 | seen_positions: DashSet::default(), 85 | } 86 | } 87 | 88 | /// Retrieve all the ModifyPosition events emitted from the Vesu Singleton Contract. 89 | pub async fn run_forever(mut self) -> Result<()> { 90 | let (config_client, config_stream) = configuration::channel(INDEXING_STREAM_CHUNK_SIZE); 91 | 92 | let mut reached_pending_block: bool = false; 93 | 94 | config_client.send(self.stream_config.clone()).await?; 95 | 96 | let mut stream = ClientBuilder::default() 97 | .with_bearer_token(Some(self.apibara_api_key.clone())) 98 | .connect(self.uri.clone()) 99 | .await 100 | .unwrap() 101 | .start_stream::(config_stream) 102 | .await 103 | .unwrap(); 104 | 105 | loop { 106 | match stream.try_next().await { 107 | Ok(Some(response)) => match response { 108 | apibara_sdk::DataMessage::Data { 109 | cursor: _, 110 | end_cursor: _, 111 | finality, 112 | batch, 113 | } => { 114 | if finality == DataFinality::DataStatusPending && !reached_pending_block { 115 | tracing::info!("[🔍 Indexer] 🥳🎉 Reached pending block!"); 116 | reached_pending_block = true; 117 | } 118 | for block in batch { 119 | for event in block.events { 120 | if let Some(event) = event.event { 121 | let block_number = match block.header.clone() { 122 | Some(hdr) => hdr.block_number, 123 | None => 0, 124 | }; 125 | self.create_position_from_event(block_number, event).await?; 126 | } 127 | } 128 | } 129 | } 130 | apibara_sdk::DataMessage::Invalidate { cursor } => match cursor { 131 | Some(c) => { 132 | return Err(anyhow::anyhow!( 133 | "Received an invalidate request data at {}", 134 | &c.order_key 135 | )); 136 | } 137 | None => { 138 | return Err(anyhow::anyhow!( 139 | "Invalidate request without cursor provided" 140 | )); 141 | } 142 | }, 143 | apibara_sdk::DataMessage::Heartbeat => {} 144 | }, 145 | Ok(None) => continue, 146 | Err(e) => { 147 | tracing::error!("[🔍 Indexer] Error while streaming, {}", e); 148 | } 149 | } 150 | } 151 | } 152 | 153 | /// Index the provided event & creates a new position. 154 | async fn create_position_from_event(&mut self, block_number: u64, event: Event) -> Result<()> { 155 | if event.from_address.is_none() { 156 | return Ok(()); 157 | } 158 | 159 | let debt_address = apibara_field_as_felt(&event.keys[3]); 160 | // Corresponds to event associated with the extension contract - we ignore them. 161 | if debt_address == Felt::ZERO { 162 | return Ok(()); 163 | } 164 | 165 | // Create the new position & sends it to the monitoring service. 166 | if let Some(new_position) = Position::from_event(&self.config, &event.keys) { 167 | let position_key = new_position.key(); 168 | if self.seen_positions.insert(position_key) { 169 | tracing::info!( 170 | "[🔍 Indexer] Found new/updated position at block {}", 171 | block_number 172 | ); 173 | } 174 | match self.positions_sender.send((block_number, new_position)) { 175 | Ok(_) => {} 176 | Err(e) => panic!("[🔍 Indexer] 😱 Could not send position: {}", e), 177 | } 178 | } else { 179 | tracing::error!("Could not create position from event :/"); 180 | } 181 | Ok(()) 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /src/types/position.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Result, anyhow}; 2 | use apibara_core::starknet::v1alpha2::FieldElement; 3 | use bigdecimal::{BigDecimal, FromPrimitive}; 4 | use colored::Colorize; 5 | use dashmap::DashMap; 6 | use serde::{Deserialize, Serialize}; 7 | use starknet::core::types::{BlockId, BlockTag, FunctionCall}; 8 | use starknet::core::types::{Call, Felt}; 9 | use starknet::providers::jsonrpc::HttpTransport; 10 | use starknet::providers::{JsonRpcClient, Provider}; 11 | use std::fmt; 12 | use std::hash::{Hash, Hasher}; 13 | use std::sync::Arc; 14 | use std::time::Duration; 15 | 16 | use crate::bindings::liquidate::{Liquidate, LiquidateParams}; 17 | 18 | use crate::config::{ 19 | Config, LIQUIDATION_CONFIG_SELECTOR, VESU_LTV_CONFIG_SELECTOR, VESU_POSITION_UNSAFE_SELECTOR, 20 | }; 21 | use crate::services::oracle::LatestOraclePrices; 22 | use crate::storages::Storage; 23 | use crate::utils::constants::{U256_ZERO, VESU_RESPONSE_DECIMALS}; 24 | use crate::utils::ekubo::get_ekubo_route; 25 | use crate::{types::asset::Asset, utils::conversions::apibara_field_as_felt}; 26 | 27 | use super::StarknetSingleOwnerAccount; 28 | 29 | /// Threshold for which we consider a position almost liquidable. 30 | const ALMOST_LIQUIDABLE_THRESHOLD: f64 = 0.01; 31 | 32 | /// Thread-safe wrapper around the positions. 33 | /// PositionsMap is a map between position position_key <=> position. 34 | #[derive(Clone)] 35 | pub struct PositionsMap(pub Arc>); 36 | 37 | impl PositionsMap { 38 | pub fn new() -> Self { 39 | Self(Arc::new(DashMap::new())) 40 | } 41 | 42 | pub fn from_storage(storage: &dyn Storage) -> Self { 43 | let positions = storage.get_positions(); 44 | let dash_map = DashMap::new(); 45 | for (key, value) in positions { 46 | dash_map.insert(key, value); 47 | } 48 | Self(Arc::new(dash_map)) 49 | } 50 | 51 | pub fn insert(&self, position: Position) -> Option { 52 | self.0.insert(position.key(), position) 53 | } 54 | 55 | pub fn len(&self) -> usize { 56 | self.0.len() 57 | } 58 | 59 | pub fn is_empty(&self) -> bool { 60 | self.0.is_empty() 61 | } 62 | } 63 | 64 | impl Default for PositionsMap { 65 | fn default() -> Self { 66 | Self::new() 67 | } 68 | } 69 | 70 | #[derive(Default, Clone, Hash, Eq, PartialEq, Debug, Serialize, Deserialize)] 71 | pub struct Position { 72 | pub user_address: Felt, 73 | pub pool_id: Felt, 74 | pub collateral: Asset, 75 | pub debt: Asset, 76 | pub lltv: BigDecimal, 77 | } 78 | 79 | impl Position { 80 | /// Create a new position from the event_keys of a ModifyPosition event. 81 | pub fn from_event(config: &Config, event_keys: &[FieldElement]) -> Option { 82 | let event_keys: Vec = event_keys.iter().map(apibara_field_as_felt).collect(); 83 | 84 | let collateral = Asset::from_address(config, event_keys[2]); 85 | let debt = Asset::from_address(config, event_keys[3]); 86 | if collateral.is_none() || debt.is_none() { 87 | tracing::info!("{event_keys:?}"); 88 | tracing::warn!("collat & debt is none :/"); 89 | return None; 90 | } 91 | 92 | let position = Position { 93 | pool_id: event_keys[1], 94 | collateral: collateral.unwrap(), 95 | debt: debt.unwrap(), 96 | user_address: event_keys[4], 97 | lltv: BigDecimal::default(), 98 | }; 99 | Some(position) 100 | } 101 | 102 | /// Computes & returns the LTV Ratio for a position. 103 | pub async fn ltv(&self, oracle_prices: &LatestOraclePrices) -> Result { 104 | let collateral_name = self.collateral.name.to_lowercase(); 105 | let debt_name = self.debt.name.to_lowercase(); 106 | 107 | let collateral_price = oracle_prices 108 | .0 109 | .get(&collateral_name) 110 | .ok_or_else(|| anyhow!("Price not found for collateral: {}", collateral_name))? 111 | .clone(); 112 | 113 | let debt_price = oracle_prices 114 | .0 115 | .get(&debt_name) 116 | .ok_or_else(|| anyhow!("Price not found for debt: {}", debt_name))? 117 | .clone(); 118 | 119 | anyhow::ensure!( 120 | (collateral_price > BigDecimal::from(0)) && (debt_price > BigDecimal::from(0)), 121 | "Oracle prices are zero. Can't compute LTV." 122 | ); 123 | anyhow::ensure!( 124 | (self.collateral.amount > BigDecimal::from(0)), 125 | "Colateral amount is zero. Can't compute LTV." 126 | ); 127 | 128 | let ltv = (&self.debt.amount * debt_price) / (&self.collateral.amount * collateral_price); 129 | Ok(ltv) 130 | } 131 | 132 | /// Check if a position is closed. 133 | pub fn is_closed(&self) -> bool { 134 | (self.collateral.amount == 0.into()) && (self.debt.amount == 0.into()) 135 | } 136 | 137 | /// Returns if the position is liquidable or not. 138 | pub async fn is_liquidable(&self, oracle_prices: &LatestOraclePrices) -> anyhow::Result { 139 | if self.lltv == BigDecimal::default() { 140 | return Ok(false); 141 | } 142 | 143 | let ltv_ratio = match self.ltv(oracle_prices).await { 144 | Result::Ok(ltv) => ltv, 145 | Result::Err(_) => return Ok(false), 146 | }; 147 | 148 | let is_liquidable = ltv_ratio >= self.lltv.clone(); 149 | let almost_liquidable_threshold = 150 | self.lltv.clone() - BigDecimal::from_f64(ALMOST_LIQUIDABLE_THRESHOLD).unwrap(); 151 | let is_almost_liquidable = ltv_ratio > almost_liquidable_threshold; 152 | 153 | if is_liquidable || is_almost_liquidable { 154 | self.logs_liquidation_state(is_liquidable, ltv_ratio); 155 | } 156 | 157 | Ok(is_liquidable) 158 | } 159 | 160 | fn logs_liquidation_state(&self, is_liquidable: bool, ltv_ratio: BigDecimal) { 161 | tracing::info!( 162 | "{} is at ratio {:.2}%/{:.2}% => {}", 163 | self, 164 | ltv_ratio * BigDecimal::from(100), 165 | self.lltv.clone() * BigDecimal::from(100), 166 | if is_liquidable { 167 | "liquidable!".green() 168 | } else { 169 | "almost liquidable 🔫".yellow() 170 | } 171 | ); 172 | } 173 | 174 | // TODO : put that in cache in a map with poolid/collateral/debt as key 175 | /// Fetches the liquidation factor from the extension contract 176 | pub async fn fetch_liquidation_factors( 177 | &self, 178 | config: &Config, 179 | rpc_client: Arc>, 180 | ) -> BigDecimal { 181 | let calldata = vec![self.pool_id, self.collateral.address, self.debt.address]; 182 | 183 | let liquidation_config_request = &FunctionCall { 184 | contract_address: config.extension_address, 185 | entry_point_selector: *LIQUIDATION_CONFIG_SELECTOR, 186 | calldata, 187 | }; 188 | 189 | let ltv_config = rpc_client 190 | .call( 191 | liquidation_config_request, 192 | BlockId::Tag(BlockTag::PreConfirmed), 193 | ) 194 | .await 195 | .expect("failed to retrieve"); 196 | BigDecimal::new(ltv_config[0].to_bigint(), VESU_RESPONSE_DECIMALS) 197 | } 198 | 199 | pub async fn update( 200 | &mut self, 201 | rpc_client: &Arc>, 202 | singleton_address: &Felt, 203 | ) -> anyhow::Result<()> { 204 | const RETRY_DELAY: Duration = Duration::from_secs(2); 205 | let mut attempt = 1; 206 | 207 | loop { 208 | match self.try_update(rpc_client, singleton_address).await { 209 | Ok(_) => return Ok(()), 210 | Err(e) => { 211 | tracing::error!( 212 | "[🔭 Monitoring] Position 0x#{:x} update failed (attempt {}), likely due to RPC error: {}", 213 | self.key(), 214 | attempt, 215 | e 216 | ); 217 | tokio::time::sleep(RETRY_DELAY).await; 218 | attempt += 1; 219 | } 220 | } 221 | } 222 | } 223 | 224 | async fn try_update( 225 | &mut self, 226 | rpc_client: &Arc>, 227 | singleton_address: &Felt, 228 | ) -> anyhow::Result<()> { 229 | self.update_amounts(rpc_client, singleton_address).await?; 230 | self.update_lltv(rpc_client, singleton_address).await?; 231 | Ok(()) 232 | } 233 | 234 | async fn update_amounts( 235 | &mut self, 236 | rpc_client: &Arc>, 237 | singleton_address: &Felt, 238 | ) -> anyhow::Result<()> { 239 | let get_position_request = &FunctionCall { 240 | contract_address: *singleton_address, 241 | entry_point_selector: *VESU_POSITION_UNSAFE_SELECTOR, 242 | calldata: self.as_update_calldata(), 243 | }; 244 | let result = rpc_client 245 | .call(get_position_request, BlockId::Tag(BlockTag::PreConfirmed)) 246 | .await?; 247 | 248 | let new_collateral = BigDecimal::new(result[4].to_bigint(), self.collateral.decimals); 249 | let new_debt = BigDecimal::new(result[6].to_bigint(), self.debt.decimals); 250 | self.collateral.amount = new_collateral; 251 | self.debt.amount = new_debt; 252 | Ok(()) 253 | } 254 | 255 | async fn update_lltv( 256 | &mut self, 257 | rpc_client: &Arc>, 258 | singleton_address: &Felt, 259 | ) -> anyhow::Result<()> { 260 | let ltv_config_request = &FunctionCall { 261 | contract_address: *singleton_address, 262 | entry_point_selector: *VESU_LTV_CONFIG_SELECTOR, 263 | calldata: self.as_ltv_calldata(), 264 | }; 265 | 266 | let ltv_config = rpc_client 267 | .call(ltv_config_request, BlockId::Tag(BlockTag::PreConfirmed)) 268 | .await?; 269 | 270 | self.lltv = BigDecimal::new(ltv_config[0].to_bigint(), VESU_RESPONSE_DECIMALS); 271 | Ok(()) 272 | } 273 | 274 | /// Returns a unique identifier for the position by hashing the update calldata. 275 | pub fn key(&self) -> u64 { 276 | let mut hasher = std::hash::DefaultHasher::new(); 277 | self.as_update_calldata().hash(&mut hasher); 278 | hasher.finish() 279 | } 280 | 281 | /// Returns the TX necessary to liquidate this position using the Vesu Liquidate 282 | /// contract. 283 | pub async fn get_vesu_liquidate_tx( 284 | &self, 285 | liquidate_contract: &Arc>, 286 | http_client: &reqwest::Client, 287 | liquidator_address: &Felt, 288 | ) -> Result { 289 | let (liquidate_swap, liquidate_swap_weights) = get_ekubo_route( 290 | http_client, 291 | self.debt.address, 292 | self.collateral.address, 293 | &self.debt.amount, 294 | ) 295 | .await?; 296 | 297 | let liquidate_params = LiquidateParams { 298 | pool_id: self.pool_id, 299 | collateral_asset: cainome::cairo_serde::ContractAddress(self.collateral.address), 300 | debt_asset: cainome::cairo_serde::ContractAddress(self.debt.address), 301 | user: cainome::cairo_serde::ContractAddress(self.user_address), 302 | recipient: cainome::cairo_serde::ContractAddress(*liquidator_address), 303 | min_collateral_to_receive: U256_ZERO, 304 | debt_to_repay: U256_ZERO, 305 | liquidate_swap, 306 | liquidate_swap_weights, 307 | liquidate_swap_limit_amount: u128::MAX, 308 | withdraw_swap: vec![], 309 | withdraw_swap_limit_amount: 0, 310 | withdraw_swap_weights: vec![], 311 | }; 312 | Ok(liquidate_contract.liquidate_getcall(&liquidate_params)) 313 | } 314 | 315 | /// Returns the position as a calldata for the LTV config RPC call. 316 | fn as_ltv_calldata(&self) -> Vec { 317 | vec![self.pool_id, self.collateral.address, self.debt.address] 318 | } 319 | 320 | /// Returns the position as a calldata for the Update Position RPC call. 321 | fn as_update_calldata(&self) -> Vec { 322 | vec![ 323 | self.pool_id, 324 | self.collateral.address, 325 | self.debt.address, 326 | self.user_address, 327 | ] 328 | } 329 | } 330 | 331 | impl fmt::Display for Position { 332 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 333 | write!( 334 | f, 335 | "Position {} with {} {} of collateral and {} {} of debt", 336 | self.key(), 337 | self.collateral.amount.round(2), 338 | self.collateral.name, 339 | self.debt.amount.round(2), 340 | self.debt.name, 341 | ) 342 | } 343 | } 344 | --------------------------------------------------------------------------------