├── .github └── workflows │ ├── audit-on-push.yml │ ├── general.yml │ └── scheduled-audit.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── Procfile ├── README.md ├── api.dockerfile ├── build.rs ├── configuration ├── base.yaml ├── local.yaml └── production.yaml ├── docs ├── README.md ├── SequenceDiagrams.md └── diagrams │ ├── mm_no_offer.png │ ├── mm_no_offer.pu │ ├── mm_offer_trader_doesnt_execute.png │ ├── mm_offer_trader_doesnt_execute.pu │ ├── mm_offer_trader_execute.png │ └── mm_offer_trader_execute.pu ├── examples └── client │ ├── abi │ └── OptionSettlementEngine.json │ └── market_maker.rs ├── indexer.dockerfile ├── migrations └── 20220609205943_create_initial_tables.sql ├── proto └── quay │ ├── rfq.proto │ ├── seaport.proto │ ├── session.proto │ └── types.proto ├── scripts ├── init_db.sh └── init_redis.sh ├── sqlx-data.json ├── src ├── auth.rs ├── bin │ ├── gossip.rs │ ├── indexer.rs │ └── server.rs ├── bindings │ ├── conduit_controller.rs │ ├── mod.rs │ ├── seaport.rs │ └── seaport_domain_registry.rs ├── configuration.rs ├── database │ ├── address.rs │ ├── db.rs │ ├── mod.rs │ └── order.rs ├── indexer.rs ├── lib.rs ├── middleware │ ├── metrics.rs │ ├── mod.rs │ └── request_id.rs ├── redis_pool.rs ├── routes │ ├── health_check.rs │ ├── metrics │ │ ├── mod.rs │ │ └── prometheus.rs │ ├── mod.rs │ ├── nft_market │ │ ├── create_listing.rs │ │ ├── create_offer.rs │ │ ├── mod.rs │ │ ├── retrieve_listings.rs │ │ └── retrieve_offers.rs │ └── sessions.rs ├── services │ ├── mod.rs │ ├── rfq.rs │ └── session.rs ├── startup.rs ├── state.rs ├── structs │ ├── mod.rs │ ├── network.rs │ └── seaport.rs ├── telemetry │ ├── metrics │ │ ├── api.rs │ │ ├── database.rs │ │ ├── mod.rs │ │ └── registry.rs │ ├── mod.rs │ └── tracing.rs ├── types │ └── mod.rs └── utils │ ├── mod.rs │ ├── seaport.rs │ └── session_interceptor.rs └── tests ├── api ├── health_check.rs ├── helpers.rs ├── listings.rs ├── main.rs ├── offers.rs ├── rpc_sessions.rs └── sessions.rs ├── test_offer.json └── test_session.json /.github/workflows/audit-on-push.yml: -------------------------------------------------------------------------------- 1 | name: Security audit 2 | on: 3 | push: 4 | paths: 5 | - '**/Cargo.toml' 6 | - '**/Cargo.lock' 7 | jobs: 8 | security_audit: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v1 12 | - uses: moliva/audit-check@v1.3.1 13 | with: 14 | token: ${{ secrets.GITHUB_TOKEN }} 15 | ignore: "RUSTSEC-2020-0071" -------------------------------------------------------------------------------- /.github/workflows/general.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | types: [ opened, synchronize, reopened ] 9 | branches: 10 | - master 11 | 12 | env: 13 | CARGO_TERM_COLOR: always 14 | 15 | jobs: 16 | test: 17 | name: Format 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Checkout repository 21 | uses: actions/checkout@v3 22 | 23 | - name: Install Rust 24 | uses: actions-rs/toolchain@v1 25 | with: 26 | profile: minimal 27 | toolchain: stable 28 | override: true 29 | components: rustfmt, clippy 30 | - name: Formatting 31 | uses: actions-rs/cargo@v1 32 | with: 33 | command: fmt 34 | args: --all -- --check 35 | 36 | clippy: 37 | name: Clippy 38 | runs-on: ubuntu-latest 39 | services: 40 | postgres: 41 | image: postgres:13 42 | env: 43 | POSTGRES_USER: postgres 44 | POSTGRES_PASSWORD: password 45 | POSTGRES_DB: quay 46 | ports: 47 | - 5432:5432 48 | redis: 49 | image: redis:7 50 | ports: 51 | - 6379:6379 52 | env: 53 | SQLX_VERSION: 0.5.7 54 | SQLX_FEATURES: postgres 55 | steps: 56 | - name: Checkout repository 57 | uses: actions/checkout@v3 58 | 59 | - name: Install Protoc 60 | uses: arduino/setup-protoc@v1 61 | with: 62 | repo-token: ${{ secrets.GITHUB_TOKEN }} 63 | 64 | - name: Install Rust 65 | uses: actions-rs/toolchain@v1 66 | with: 67 | profile: minimal 68 | toolchain: stable 69 | override: true 70 | components: rustfmt, clippy 71 | 72 | - name: Cache cargo dependencies 73 | uses: Swatinem/rust-cache@v2 74 | id: rust-cache 75 | 76 | - name: Install sqlx-cli 77 | uses: actions-rs/cargo@v1 78 | if: steps.rust-cache.outputs.cache-hit != true 79 | with: 80 | command: install 81 | args: > 82 | sqlx-cli 83 | --force 84 | --version=${{ env.SQLX_VERSION }} 85 | --features=${{ env.SQLX_FEATURES }} 86 | --no-default-features 87 | --locked 88 | 89 | - name: Migrate database 90 | run: | 91 | SKIP_DOCKER=true ./scripts/init_db.sh 92 | 93 | - name: Check sqlx-data.json is up-to-date 94 | run: | 95 | cargo sqlx prepare --merged -- --lib --tests 96 | env: 97 | DATABASE_URL: postgres://postgres:password@localhost:5432/quay 98 | 99 | - name: Clippy 100 | uses: actions-rs/clippy-check@v1 101 | with: 102 | token: ${{ secrets.GITHUB_TOKEN }} 103 | args: -- -D warnings 104 | 105 | tests: 106 | name: Test 107 | runs-on: ubuntu-latest 108 | services: 109 | postgres: 110 | image: postgres:13 111 | env: 112 | POSTGRES_USER: postgres 113 | POSTGRES_PASSWORD: password 114 | POSTGRES_DB: quay 115 | ports: 116 | - 5432:5432 117 | redis: 118 | image: redis:7 119 | ports: 120 | - 6379:6379 121 | env: 122 | SQLX_VERSION: 0.5.7 123 | SQLX_FEATURES: postgres 124 | steps: 125 | - name: Checkout repository 126 | uses: actions/checkout@v3 127 | 128 | - name: Install Protoc 129 | uses: arduino/setup-protoc@v1 130 | with: 131 | repo-token: ${{ secrets.GITHUB_TOKEN }} 132 | 133 | - name: Install Rust 134 | uses: actions-rs/toolchain@v1 135 | with: 136 | profile: minimal 137 | toolchain: stable 138 | override: true 139 | components: rustfmt, clippy 140 | 141 | - name: Cache cargo dependencies 142 | uses: Swatinem/rust-cache@v2 143 | id: rust-cache 144 | 145 | - name: Install sqlx-cli 146 | uses: actions-rs/cargo@v1 147 | if: steps.rust-cache.outputs.cache-hit != true 148 | with: 149 | command: install 150 | args: > 151 | sqlx-cli 152 | --force 153 | --version=${{ env.SQLX_VERSION }} 154 | --features=${{ env.SQLX_FEATURES }} 155 | --no-default-features 156 | --locked 157 | 158 | - name: Migrate database 159 | run: | 160 | SKIP_DOCKER=true ./scripts/init_db.sh 161 | 162 | - name: Check sqlx-data.json is up-to-date 163 | run: | 164 | cargo sqlx prepare --merged -- --lib --tests 165 | env: 166 | DATABASE_URL: postgres://postgres:password@localhost:5432/quay 167 | 168 | - name: Test 169 | uses: actions-rs/cargo@v1 170 | env: 171 | APP_RPC__URI: ${{ secrets.APP_RPC__URI }} 172 | with: 173 | command: test 174 | 175 | coverage: 176 | name: Coverage 177 | runs-on: ubuntu-latest 178 | services: 179 | postgres: 180 | image: postgres:13 181 | env: 182 | POSTGRES_USER: postgres 183 | POSTGRES_PASSWORD: password 184 | POSTGRES_DB: quay 185 | ports: 186 | - 5432:5432 187 | redis: 188 | image: redis:7 189 | ports: 190 | - 6379:6379 191 | env: 192 | SQLX_VERSION: 0.5.7 193 | SQLX_FEATURES: postgres 194 | steps: 195 | - name: Checkout repository 196 | uses: actions/checkout@v3 197 | 198 | - name: Install Protoc 199 | uses: arduino/setup-protoc@v1 200 | with: 201 | repo-token: ${{ secrets.GITHUB_TOKEN }} 202 | 203 | - name: Install Rust 204 | uses: actions-rs/toolchain@v1 205 | with: 206 | profile: minimal 207 | toolchain: stable 208 | override: true 209 | components: rustfmt, clippy 210 | 211 | - name: Cache cargo dependencies 212 | uses: Swatinem/rust-cache@v2 213 | id: cache-rust 214 | 215 | - name: Install sqlx-cli 216 | uses: actions-rs/cargo@v1 217 | if: steps.cache-rust.outputs.cache-hit != true 218 | with: 219 | command: install 220 | args: > 221 | sqlx-cli 222 | --force 223 | --version=${{ env.SQLX_VERSION }} 224 | --features=${{ env.SQLX_FEATURES }} 225 | --no-default-features 226 | --locked 227 | 228 | - name: Migrate database 229 | run: | 230 | SKIP_DOCKER=true ./scripts/init_db.sh 231 | 232 | - name: Check sqlx-data.json is up-to-date 233 | run: | 234 | cargo sqlx prepare --merged -- --lib --tests 235 | env: 236 | DATABASE_URL: postgres://postgres:password@localhost:5432/quay 237 | 238 | - name: Coverage 239 | uses: actions-rs/tarpaulin@v0.1 240 | env: 241 | APP_RPC__URI: ${{ secrets.APP_RPC__URI }} 242 | with: 243 | args: '--ignore-tests --avoid-cfg-tarpaulin' 244 | -------------------------------------------------------------------------------- /.github/workflows/scheduled-audit.yml: -------------------------------------------------------------------------------- 1 | name: Security audit 2 | on: 3 | schedule: 4 | - cron: '0 0 * * *' 5 | jobs: 6 | audit: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v1 10 | - uses: moliva/audit-check@v1.3.1 11 | with: 12 | token: ${{ secrets.GITHUB_TOKEN }} 13 | ignore: "RUSTSEC-2020-0071" -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .idea 3 | 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "quay" 3 | version = "0.1.0" 4 | authors = ["Alcibiades ", "Perpetuum Seven "] 5 | edition = "2021" 6 | 7 | [lib] 8 | path = "src/lib.rs" 9 | 10 | [dependencies] 11 | anyhow = "1.0.66" 12 | arrayref = "0.3.6" 13 | async-redis-session = { git = "https://github.com/0xAlcibiades/async-redis-session.git", branch = "tokio" } 14 | async-trait = "0.1.59" 15 | axum = { version = "0.6.1", features = ["http2", "query"] } 16 | axum-macros = "0.3.0" 17 | axum-server = "0.4.4" 18 | axum-sessions = "0.4.1" 19 | bb8 = "0.8.0" 20 | chrono = "0.4" 21 | # TODO(Config breaks TryFrom between 0.11 -> 0.13, violating semantic versioning) 22 | config = { version = "0.11", default-features = false, features = ["yaml"] } 23 | ethers = { version = "1.0.2", features = ["abigen", "ws", "rustls", "ipc"] } 24 | futures = "0.3.25" 25 | http = "0.2.8" 26 | hyper = "0.14.23" 27 | log = "0.4.17" 28 | once_cell = "1.16.0" 29 | prometheus = "0.13.3" 30 | prometheus-metric-storage = "0.5.0" 31 | prost = "0.11.3" 32 | # TODO(https://github.com/redis-rs/redis-rs/pull/725) 33 | # We should switch to rustls for a performance benefit when it's ready. 34 | redis = { version = "0.22.1", features = ["tokio-comp", "connection-manager"] } 35 | secrecy = { version = "0.8.0", features = ["serde"] } 36 | serde = { version = "1.0.148", features = ["derive"] } 37 | serde-aux = "4.1.2" 38 | serde_json = "1.0.89" 39 | siwe = { version = "0.5.0", features = ["serde"] } 40 | time = "0.3.17" 41 | tokio = { version = "1.22", features = ["macros", "rt-multi-thread"] } 42 | tokio-stream = "0.1.11" 43 | tonic = "0.8.3" 44 | tower = { version = "0.4.13", features = ["steer"] } 45 | tower-http = { version = "0.3.5", features = ["trace", "cors"] } 46 | tower-layer = "0.3.2" 47 | tower-service = "0.3.2" 48 | tracing = "0.1.37" 49 | tracing-bunyan-formatter = "0.3.4" 50 | tracing-futures = "0.2.5" 51 | tracing-log = "0.1.3" 52 | tracing-subscriber = { version = "0.3.16", features = ["registry", "env-filter"] } 53 | ulid = "1.0.0" 54 | uuid = { version = "1.2.2", features = ["v4", "serde"] } 55 | 56 | # TODO(sqlx breaks connect_timeout on minor version upgrade, violating semantic versioning) 57 | [dependencies.sqlx] 58 | version = "0.5.7" 59 | default-features = false 60 | features = [ 61 | "runtime-tokio-rustls", 62 | "macros", 63 | "postgres", 64 | "uuid", 65 | "chrono", 66 | "migrate", 67 | "offline" 68 | ] 69 | 70 | [dev-dependencies] 71 | once_cell = "1.16.0" 72 | reqwest = { version = "0.11.13", default-features = false, features = ["json", "rustls-tls", "cookies", "blocking"] } 73 | 74 | [build-dependencies] 75 | tonic-build = "0.8.4" 76 | 77 | [[example]] 78 | name = "client" 79 | path = "examples/client/market_maker.rs" 80 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2022 Alcibiades Capital LLC 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /Procfile: -------------------------------------------------------------------------------- 1 | web: APP_APPLICATION__PORT=$PORT ./target/release/api 2 | worker: APP_APPLICATION__PORT=$PORT ./target/release/indexer -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Quay 2 | 3 | Quay is an open source, high performance backend for the Seaport smart 4 | contracts. The project is implemented in Rust, using Postgres as a storage 5 | backend, with the aim of allowing a proliferation of signature based 6 | trading platforms for ERC-721s and ERC-1155s based on the Seaport smart 7 | contract interface. 8 | 9 | ## Project structure 10 | 11 | ### Web application 12 | 13 | `main.rs` contains the `main` function, which is called when the crate's 14 | binary is executed. That in turn calls `startup.rs`. That creates the 15 | application, attaches the services, routes, database connection pool, and 16 | rpc connection context used by the endpoints. 17 | 18 | ### Routes/Services 19 | 20 | Routes and services are defined in `/routes` using `actix-web` macros. 21 | 22 | ### Telemetry 23 | 24 | `telemetry.rs` and `startup.rs` set up a tracing framework that provides rich 25 | logs for runtime telemetry and development debugging. These tracing logs are 26 | then extended via macros on routes. 27 | 28 | Example: 29 | 30 | ```rust 31 | #[post("/offers")] 32 | #[tracing::instrument( 33 | name = "Adding a new offer", 34 | skip(offer, pool, seaport), 35 | fields( 36 | offerer = %offer.parameters.offerer, 37 | ) 38 | )] 39 | async fn create_offer( 40 | offer: web::Json, 41 | pool: web::Data, 42 | seaport: web::Data>>, 43 | ) -> HttpResponse { 44 | if insert_offer(&pool, &offer, &seaport).await.is_err() { 45 | return HttpResponse::InternalServerError().finish(); 46 | } 47 | HttpResponse::Ok().finish() 48 | } 49 | ``` 50 | 51 | ### SIWE Authentication 52 | 53 | This project supports EIP-4361 (Sign in with Ethereum) authentication. See 54 | `src/routes/sessions.rs` for implementation details. 55 | 56 | ### Redis 57 | 58 | Redis is used for SIWE session storage. 59 | 60 | ### Database 61 | 62 | The database schema and queries are written in SQL and managed with `sqlx`. The 63 | schemas live in `migrations/`, and the database queries are inline where used 64 | on the objects (SQL speaking objects). The database connection is handled in a 65 | connection pool at application startup. 66 | 67 | #### Generating queries for macros 68 | 69 | When new queries are added, run `DATABASE_URL=postgres://postgres:password@localhost:5432/quay cargo sqlx prepare -- --lib` 70 | to generate new query definitions for the macro expansion used by sqlx. 71 | 72 | #### Migrations 73 | 74 | Database schema migrations are performed out of band with 75 | [sqlx-cli](https://crates.io/crates/sqlx-cli). All database migrations should be 76 | tested locally, and applied in a non-breaking fashion. What this means, ideally, 77 | is that there is a migration to add new functionality, then a rollout of the new 78 | backend version, then another migration to delete any legacy schema after roll 79 | out. 80 | 81 | ##### How to run migration using `sqlx-cli` 82 | 1. `sqlx migrate add ''`; 83 | 2. Write all necesarry queries in `migrations/`; 84 | 3. Apply for local db (if it's required for testing): `sqlx migrate run`; 85 | 86 | ### Smart contract interactions and data structures 87 | 88 | Blockchain interactions happen via RPC using the `ethers-rs` library. This project 89 | contains type safe bindings for the seaport (`seaport.rs`) and conduit controller 90 | (`conduit_controller.rs`) contracts, which have been extended with other 91 | functionality, and will eventually include functions for serializing to and 92 | from postgres. The rpc connection is handled by a wrapped reference counter 93 | at application startup. 94 | 95 | ### Local development environment 96 | 97 | Developers will need rust and docker to work on this project. Docker is used 98 | to host a local instance of the postgres database to execute tests against. 99 | To set up a local database, run: `./scripts/init_db.sh` and `./scripts/init_redis.sh` 100 | 101 | #### Building the project 102 | 103 | Run `cargo build` to build the project. `indexer.dockerfile` and `api.dockerfile` 104 | are useful for generating docker images for use in production or testing. 105 | 106 | ### Tests 107 | 108 | Tests live in the `/tests` folder. 109 | 110 | #### Running the tests 111 | 112 | Run `cargo test` any time after setting up your development environment. 113 | 114 | ## CI/CD 115 | 116 | CI/CD is implemented using GitHub actions, the scripting is in 117 | `.github/workflows`. There are certain actions which run on open 118 | pull requests, and deploy actions, which run only after passing 119 | tests on `master`. 120 | 121 | ### Continuous Integration 122 | 123 | This project uses `clippy`, `tarpaulin`, and a number of other crates 124 | to provide end-to-end tests, unit tests, formatting, linting, and static 125 | analysis to enforce code quality. These tests are then run in an automated 126 | fashion using GitHub actions. 127 | 128 | ### Continuous Deployment 129 | 130 | This project supports deployment to Digital Ocean via `spec.yaml` -------------------------------------------------------------------------------- /api.dockerfile: -------------------------------------------------------------------------------- 1 | FROM lukemathwalker/cargo-chef:latest-rust-1.61.0 as chef 2 | WORKDIR /app 3 | 4 | FROM chef as planner 5 | COPY . . 6 | # Compute a lock-like file for our project 7 | RUN cargo chef prepare --recipe-path recipe.json 8 | 9 | FROM chef as builder 10 | COPY --from=planner /app/recipe.json recipe.json 11 | # Build our project dependencies, not our application! 12 | RUN cargo chef cook --release --recipe-path recipe.json 13 | # Up to this point, if our dependency tree stays the same, 14 | # all layers should be cached. 15 | COPY . . 16 | ENV SQLX_OFFLINE true 17 | # Build our project 18 | RUN cargo build --release --bin api 19 | 20 | FROM debian:bullseye-slim AS runtime 21 | WORKDIR /app 22 | RUN apt-get update -y \ 23 | && apt-get install -y --no-install-recommends openssl \ 24 | # Clean up 25 | && apt-get autoremove -y \ 26 | && apt-get clean -y \ 27 | && rm -rf /var/lib/apt/lists/* 28 | COPY --from=builder /app/target/release/api api 29 | COPY configuration configuration 30 | ENV APP_ENVIRONMENT production 31 | ENTRYPOINT ["./api"] -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | // generated by `sqlx migrate build-script` 2 | fn main() -> Result<(), Box> { 3 | tonic_build::configure() 4 | .build_server(true) 5 | //.out_dir("src/google") // you can change the generated code's location 6 | .compile( 7 | &["proto/quay/rfq.proto"], 8 | &["proto/quay"], // specify the root location to search proto dependencies 9 | ) 10 | .unwrap(); 11 | 12 | tonic_build::configure() 13 | .build_server(true) 14 | .compile( 15 | &["proto/quay/session.proto"], 16 | &["proto/quay"], // specify the root location to search proto dependencies 17 | ) 18 | .unwrap(); 19 | // trigger recompilation when a new migration is added 20 | println!("cargo:rerun-if-changed=migrations"); 21 | Ok(()) 22 | } 23 | -------------------------------------------------------------------------------- /configuration/base.yaml: -------------------------------------------------------------------------------- 1 | application: 2 | port: 8000 3 | hmac_secret: "super-long-and-secret-random-key-needed-to-verify-message-integrity" 4 | database: 5 | host: "localhost" 6 | port: 5432 7 | username: "postgres" 8 | password: "password" 9 | database_name: "quay" 10 | redis_url: "redis://127.0.0.1:6379" 11 | rpc: 12 | uri: "http://127.0.0.1:8545" 13 | chain_id: 1 14 | indexer: 15 | seaport_deploy_block: 14946473 -------------------------------------------------------------------------------- /configuration/local.yaml: -------------------------------------------------------------------------------- 1 | application: 2 | host: 127.0.0.1 3 | base_url: "http://127.0.0.1" 4 | database: 5 | require_ssl: false -------------------------------------------------------------------------------- /configuration/production.yaml: -------------------------------------------------------------------------------- 1 | application: 2 | host: 0.0.0.0 3 | 4 | database: 5 | require_ssl: true -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Documentation Module 2 | 3 | ## Sequence diagrams 4 | For sequence diagrams see [Sequence Diagrams](./SequenceDiagrams.md) 5 | -------------------------------------------------------------------------------- /docs/SequenceDiagrams.md: -------------------------------------------------------------------------------- 1 | # Sequence Diagrams 2 | ### The market maker does not give an offer: 3 | The following diagram assumes that the Options token does not currently exist. However, if the Option does exist, 4 | then the Trader can forgo the request to `newOptionType` and instead simply just use the Option ID. 5 | 6 | The diagram highlights important fields within the messages being passed. The message structure with theses fields 7 | may not contain _all_ required fields 8 | 9 | ![mm_no_offer](./diagrams/mm_no_offer.png) 10 | 11 | ### The market maker gives an offer, trader does not execute: 12 | The following diagram assumes that the Options token does not currently exist. However, if the Option does exist, 13 | then the Trader can forgo the request to `newOptionType` and instead simply just use the Option ID. 14 | 15 | The diagram highlights important fields within the messages being passed. The message structure with theses fields 16 | may not contain _all_ required fields 17 | 18 | ![mm_offer_trader_doesnt_execute](./diagrams/mm_offer_trader_doesnt_execute.png) 19 | 20 | ### The market maker gives an offer, trader executes offer: 21 | The following diagram assumes that the Options token does not currently exist. However, if the Option does exist, 22 | then the Trader can forgo the request to `newOptionType` and instead simply just use the Option ID. 23 | 24 | The diagram highlights important fields within the messages being passed. The message structure with theses fields 25 | may not contain _all_ required fields 26 | 27 | ![mm_offer_trader_execute](./diagrams/mm_offer_trader_execute.png) 28 | 29 | ### General 30 | #### Request for Quote (RFQ) data structure 31 | 32 | If the Trader doesn't fill the `exerciseTimestamp` or `expiryTimestamp` 33 | then the Market Maker is free to set those values to whatever it chooses 34 | on the Option, otherwise the Market Maker must have the values set to 35 | what the Trader wishes if it makes an offer. 36 | 37 | If the `listingId` is `Some` then all the information is taken from 38 | the listing instead. 39 | 40 | The Request for Quote request structure: 41 | 42 | ```protobuf 43 | message QuoteRequest { 44 | optional H128 ulid = 1; 45 | optional H160 takerAddress = 2; 46 | ItemType itemType = 3; 47 | optional H160 tokenAddress = 4; 48 | optional H256 identifierOrCriteria = 5; 49 | H256 startAmount = 6; 50 | H256 endAmount = 7; 51 | Action action = 8; 52 | } 53 | ``` 54 | 55 | #### Quote Response data structure 56 | 57 | If the Market Maker doesn't create an order, then the `order` field in the 58 | response will be `None`. If the Market Maker has an order and the response 59 | will contain `Some(order)`. 60 | 61 | Quote response 62 | structure: 63 | 64 | ```protobuf 65 | message QuoteResponse { 66 | optional H128 ulid = 1; 67 | optional H160 makerAddress = 2; 68 | Order order = 3; 69 | } 70 | ``` 71 | -------------------------------------------------------------------------------- /docs/diagrams/mm_no_offer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/valorem-labs-inc/quay/441cdc21984939c492aa02d34863e6677e2f744f/docs/diagrams/mm_no_offer.png -------------------------------------------------------------------------------- /docs/diagrams/mm_no_offer.pu: -------------------------------------------------------------------------------- 1 | @startuml 2 | 3 | actor Taker 4 | box Valorem #LightGreen 5 | participant Quay 6 | end box 7 | actor "Market Maker" as MM 8 | box Valorem #LightBlue 9 | participant SettlementEngine as SE 10 | end box 11 | 12 | == Initialisation == 13 | MM -> Quay: Authenticate 14 | MM -> Quay: gRPC: MM RFQ Stream 15 | Quay -> MM: gRPC: MM RFQ Stream 16 | 17 | == Case: Market Maker has no offer for Taker == 18 | Taker --> SE: newOptionType(...) 19 | SE --> Taker: optionId 20 | Taker -> Quay: gRPC: RFQ\nQuoteRequest{ulid=None} 21 | Quay -> Taker: gRPC: Taker RFQ Stream 22 | Quay -> MM: gRPC: MM RFQ Stream\nQuoteRequest{ulid=Some} 23 | MM -> SE: option(...) 24 | SE -> MM: optionInfo 25 | MM -[#red]x MM: No offer 26 | MM -> Quay: gRPC: MM RFQ Stream\nQuoteResponse{\n\tulid=Some,\n\tOffer=None\n} 27 | Quay -> Taker: gRPC: Taker RFQ Stream\nQuoteResponse{Offer=None} 28 | Quay -[#red]X Taker: gRPC: Taker RFQ Stream\nClose 29 | @enduml -------------------------------------------------------------------------------- /docs/diagrams/mm_offer_trader_doesnt_execute.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/valorem-labs-inc/quay/441cdc21984939c492aa02d34863e6677e2f744f/docs/diagrams/mm_offer_trader_doesnt_execute.png -------------------------------------------------------------------------------- /docs/diagrams/mm_offer_trader_doesnt_execute.pu: -------------------------------------------------------------------------------- 1 | @startuml 2 | 3 | actor Taker 4 | box Valorem #LightGreen 5 | participant Quay 6 | end box 7 | actor "Market Maker" as MM 8 | box Valorem #LightBlue 9 | participant SettlementEngine as SE 10 | end box 11 | 12 | == Initialisation == 13 | MM -> Quay: Authenticate 14 | MM -> Quay: gRPC: MM RFQ Stream 15 | Quay -> MM: gRPC: MM RFQ Stream 16 | 17 | == Case: Market Maker Offer, Taker doesn't execute == 18 | Taker --> SE: newOptionType(...) 19 | SE --> Taker: optionId 20 | Taker -> Quay: gRPC: RFQ\nQuoteRequest{ulid=None} 21 | Quay -> Taker: gRPC: Taker RFQ Stream 22 | Quay -> MM: gRPC: MM RFQ Stream\nQuoteRequest{ulid=Some} 23 | MM -> SE: option(...) 24 | SE -> MM: optionInfo 25 | MM -> SE: write(...) 26 | SE -> MM: claimId 27 | MM -> Quay: gRPC: MM RFQ Stream\nQuoteResponse{\n\tulid=Some,\n\tOffer=Some\n} 28 | Quay -> Taker: gRPC: Taker RFQ Stream\nQuoteResponse{Offer=Some} 29 | Quay -[#red]X Taker: gRPC: Taker RFQ Stream\nClose 30 | hnote over Taker #lightBlue 31 | [Start of exercise window] 32 | end hnote 33 | hnote over Taker #lightBlue 34 | [ End of exercise window ] 35 | end hnote 36 | MM -> SE: redeem(...) 37 | @enduml -------------------------------------------------------------------------------- /docs/diagrams/mm_offer_trader_execute.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/valorem-labs-inc/quay/441cdc21984939c492aa02d34863e6677e2f744f/docs/diagrams/mm_offer_trader_execute.png -------------------------------------------------------------------------------- /docs/diagrams/mm_offer_trader_execute.pu: -------------------------------------------------------------------------------- 1 | @startuml 2 | 3 | actor Trader 4 | box Valorem #LightGreen 5 | participant Quay 6 | end box 7 | actor "Market Maker" as MM 8 | box Valorem #LightBlue 9 | participant SettlementEngine as SE 10 | end box 11 | 12 | == Initialisation == 13 | MM -> Quay: Authenticate 14 | MM -> Quay: gRPC: MM RFQ Stream 15 | Quay -> MM: gRPC: MM RFQ Stream 16 | 17 | == Case: Market Maker Offer, Trader doesn't execute == 18 | Trader --> SE: newOptionType(...) 19 | SE --> Trader: optionId 20 | Trader -> Quay: gRPC: RFQ\nQuoteRequest{ulid=None} 21 | Quay -> Trader: gRPC: Trader RFQ Stream 22 | Quay -> MM: gRPC: MM RFQ Stream\nQuoteRequest{ulid=Some} 23 | MM -> SE: option(...) 24 | SE -> MM: optionInfo 25 | MM -> SE: write(...) 26 | SE -> MM: claimId 27 | MM -> Quay: gRPC: MM RFQ Stream\nQuoteResponse{\n\tulid=Some,\n\tOffer=Some\n} 28 | Quay -> Trader: gRPC: Trader RFQ Stream\nQuoteResponse{Offer=Some} 29 | Quay -[#red]X Trader: gRPC: Trader RFQ Stream\nClose 30 | hnote over Trader #lightBlue 31 | [Start of exercise window] 32 | end hnote 33 | Trader -> SE: exercise(...) 34 | hnote over Trader #lightBlue 35 | [ End of exercise window ] 36 | end hnote 37 | MM -> SE: redeem(...) 38 | @enduml -------------------------------------------------------------------------------- /examples/client/market_maker.rs: -------------------------------------------------------------------------------- 1 | use ethers::prelude::{ 2 | abigen, Address, Http, JsonRpcClient, LocalWallet, Provider, Signer, Ws, H160, 3 | }; 4 | use http::Uri; 5 | use quay::{ 6 | rfq, 7 | rfq::rfq_client::RfqClient, 8 | rfq::{QuoteRequest, QuoteResponse}, 9 | utils::session_interceptor::SessionInterceptor, 10 | }; 11 | use quay::{ 12 | session::session_client::SessionClient, 13 | session::{Empty, VerifyText}, 14 | }; 15 | use siwe::{TimeStamp, Version}; 16 | use std::{env, process::exit, str::FromStr, sync::Arc}; 17 | use time::OffsetDateTime; 18 | use tokio::sync::mpsc; 19 | use tonic::transport::Channel; 20 | 21 | abigen!( 22 | SettlementEngine, 23 | "$CARGO_MANIFEST_DIR/examples/client/abi/OptionSettlementEngine.json", 24 | event_derives(serde::Deserialize, serde::Serialize) 25 | ); 26 | 27 | const SESSION_COOKIE_KEY: &str = "set-cookie"; 28 | 29 | /// An example Market Maker (MM) client interface to Quay. 30 | /// 31 | /// The Market Maker will receive Request For Quote (RFQ) from the Quay server formatted as 32 | /// `QuoteRequest` and the MM needs to respond with `QuoteResponse`. 33 | /// 34 | /// # Usage 35 | /// `client `
36 | ///
where:
37 | /// `` : The location of the Quay server, for example `http://localhost:8000`.
38 | /// `` : The location of the node RPC endpoint, for example `http://localhost:8545`.
39 | /// `` : The address of the wallet for signing messages.
40 | /// `` : The address of the Option Settlement contract (optional). If not provided, Address::default() is used.
41 | #[tokio::main] 42 | async fn main() -> Result<(), Box> { 43 | let args: Vec = env::args().skip(1).collect(); 44 | 45 | if args.len() != 3 && args.len() != 4 { 46 | eprintln!("Unexpected command line arguments. Received {:?}", args); 47 | eprintln!("Usage: client []"); 48 | exit(1); 49 | } 50 | 51 | let quay_uri = args[0].parse::().unwrap(); 52 | let (session_cookie, maker_address) = setup(quay_uri.clone(), args[2].to_string()).await; 53 | 54 | let settlement_address = if args.len() == 4 { 55 | args[3].parse::
()? 56 | } else { 57 | Address::default() 58 | }; 59 | 60 | if args[1].starts_with("http") { 61 | let provider = Provider::::try_from(args[1].clone())?; 62 | run( 63 | Arc::new(provider), 64 | quay_uri, 65 | session_cookie, 66 | maker_address, 67 | settlement_address, 68 | ) 69 | .await; 70 | } else if args[1].starts_with("ws") { 71 | // Websockets (ws & wss) 72 | let provider = Provider::::new(Ws::connect(args[1].clone()).await?); 73 | run( 74 | Arc::new(provider), 75 | quay_uri, 76 | session_cookie, 77 | maker_address, 78 | settlement_address, 79 | ) 80 | .await; 81 | } else { 82 | // IPC 83 | let provider = Provider::connect_ipc(args[1].clone()).await?; 84 | run( 85 | Arc::new(provider), 86 | quay_uri, 87 | session_cookie, 88 | maker_address, 89 | settlement_address, 90 | ) 91 | .await; 92 | } 93 | 94 | Ok(()) 95 | } 96 | 97 | // Main execution function 98 | async fn run( 99 | _provider: Arc>, 100 | quay_uri: Uri, 101 | session_cookie: String, 102 | maker_address: Address, 103 | _settlement_address: Address, 104 | ) { 105 | // Now there is a valid authenticated session, connect to the RFQ stream 106 | let mut client = RfqClient::with_interceptor( 107 | Channel::builder(quay_uri).connect().await.unwrap(), 108 | SessionInterceptor { session_cookie }, 109 | ); 110 | 111 | // Setup the comms channels. Server responses are requests to the client. 112 | // Client requests are responses to the server. 113 | let (tx_quote_response, rx_quote_response) = mpsc::channel::(64); 114 | let (tx_quote_request, mut rx_quote_request) = mpsc::channel::(64); 115 | 116 | // Create the settlement engine contract, you can call `exercise` and other functions on the contract with: 117 | // let settlement_contract = SettlementEngine(settlement_address, provider); 118 | // settlement_contract.exercise(...).call().await; 119 | 120 | // The main task that handles incoming server requests 121 | let task = tokio::spawn(async move { 122 | while let Some(request_for_quote) = rx_quote_request.recv().await { 123 | let quote_offer = handle_server_request(request_for_quote, maker_address); 124 | 125 | // Send the response to the server 126 | tx_quote_response.send(quote_offer).await.unwrap(); 127 | } 128 | 129 | eprintln!("Client connection to the server has been closed"); 130 | }); 131 | 132 | // Call the required function which will return the servers response stream (which is really 133 | // requests to the client). 134 | let mut quote_stream = client 135 | .maker(tokio_stream::wrappers::ReceiverStream::new( 136 | rx_quote_response, 137 | )) 138 | .await 139 | .unwrap() 140 | .into_inner(); 141 | 142 | // Now we have received the servers request stream - loop until it ends (its not expected to). 143 | while let Ok(Some(quote)) = quote_stream.message().await { 144 | tx_quote_request.send(quote).await.unwrap(); 145 | } 146 | 147 | // Explicitly drop the tx side of the channel allowing the rx side to get notified we are 148 | // about to close. 149 | drop(tx_quote_request); 150 | 151 | // We never expect to get here or the task to end unless the server has disconnected. 152 | task.await.unwrap(); 153 | } 154 | 155 | // Handle the quote. 156 | // The current example simply sends back an empty order (indicating no offer). 157 | fn handle_server_request(request_for_quote: QuoteRequest, maker_address: H160) -> QuoteResponse { 158 | println!("Request received, returning no offer"); 159 | QuoteResponse { 160 | ulid: request_for_quote.ulid, 161 | maker_address: Some(rfq::H160::from(maker_address)), 162 | order: None, 163 | } 164 | } 165 | 166 | // Helper function used to setup a valid session with Quay 167 | async fn setup(quay_uri: Uri, private_key: String) -> (String, Address) { 168 | // Connect and authenticate with Quay 169 | let mut client: SessionClient = 170 | SessionClient::new(Channel::builder(quay_uri.clone()).connect().await.unwrap()); 171 | let response = client 172 | .nonce(Empty::default()) 173 | .await 174 | .expect("Unable to fetch Nonce from Quay"); 175 | 176 | // Fetch the session cookie for all future requests 177 | let session_cookie = response 178 | .metadata() 179 | .get(SESSION_COOKIE_KEY) 180 | .expect("Session cookie was not returned in Nonce response") 181 | .to_str() 182 | .expect("Unable to fetch session cookie from Nonce response") 183 | .to_string(); 184 | 185 | let nonce = response.into_inner().nonce; 186 | 187 | // Verify & authenticate with Quay before connecting to RFQ endpoint. 188 | let mut client = SessionClient::with_interceptor( 189 | Channel::builder(quay_uri).connect().await.unwrap(), 190 | SessionInterceptor { 191 | session_cookie: session_cookie.clone(), 192 | }, 193 | ); 194 | 195 | // Setup a local wallet 196 | let wallet = LocalWallet::from_str(private_key.as_str()).unwrap(); 197 | 198 | // Create a sign in with ethereum message 199 | let message = siwe::Message { 200 | domain: "localhost.com".parse().unwrap(), 201 | address: wallet.address().0, 202 | statement: None, 203 | uri: "http://localhost/".parse().unwrap(), 204 | version: Version::V1, 205 | chain_id: 1, 206 | nonce, 207 | issued_at: TimeStamp::from(OffsetDateTime::now_utc()), 208 | expiration_time: None, 209 | not_before: None, 210 | request_id: None, 211 | resources: vec![], 212 | }; 213 | 214 | // Generate a signature 215 | let message_string = message.to_string(); 216 | let signature = wallet 217 | .sign_message(message_string.as_bytes()) 218 | .await 219 | .unwrap(); 220 | 221 | // Create the SignedMessage 222 | let signature_string = signature.to_string(); 223 | let mut signed_message = serde_json::Map::new(); 224 | signed_message.insert( 225 | "signature".to_string(), 226 | serde_json::Value::from(signature_string), 227 | ); 228 | signed_message.insert( 229 | "message".to_string(), 230 | serde_json::Value::from(message_string), 231 | ); 232 | let body = serde_json::Value::from(signed_message).to_string(); 233 | 234 | // Verify the session with Quay 235 | let response = client.verify(VerifyText { body }).await; 236 | match response { 237 | Ok(_) => (), 238 | Err(error) => { 239 | eprintln!("Unable to verify client. Reported error:\n{:?}", error); 240 | exit(2); 241 | } 242 | } 243 | 244 | // Check that we have an authenticated session 245 | let response = client.authenticate(Empty::default()).await; 246 | match response { 247 | Ok(_) => (), 248 | Err(error) => { 249 | eprintln!( 250 | "Unable to check authentication with Quay. Reported error:\n{:?}", 251 | error 252 | ); 253 | exit(3); 254 | } 255 | } 256 | 257 | (session_cookie, wallet.address()) 258 | } 259 | -------------------------------------------------------------------------------- /indexer.dockerfile: -------------------------------------------------------------------------------- 1 | FROM lukemathwalker/cargo-chef:latest-rust-1.61.0 as chef 2 | WORKDIR /app 3 | 4 | FROM chef as planner 5 | COPY . . 6 | # Compute a lock-like file for our project 7 | RUN cargo chef prepare --recipe-path recipe.json 8 | 9 | FROM chef as builder 10 | COPY --from=planner /app/recipe.json recipe.json 11 | # Build our project dependencies, not our application! 12 | RUN cargo chef cook --release --recipe-path recipe.json 13 | # Up to this point, if our dependency tree stays the same, 14 | # all layers should be cached. 15 | COPY . . 16 | ENV SQLX_OFFLINE true 17 | # Build our project 18 | RUN cargo build --release --bin indexer 19 | 20 | FROM debian:bullseye-slim AS runtime 21 | WORKDIR /app 22 | RUN apt-get update -y \ 23 | && apt-get install -y --no-install-recommends openssl \ 24 | # Clean up 25 | && apt-get autoremove -y \ 26 | && apt-get clean -y \ 27 | && rm -rf /var/lib/apt/lists/* 28 | COPY --from=builder /app/target/release/indexer indexer 29 | COPY configuration configuration 30 | ENV APP_ENVIRONMENT production 31 | ENTRYPOINT ["./indexer"] -------------------------------------------------------------------------------- /migrations/20220609205943_create_initial_tables.sql: -------------------------------------------------------------------------------- 1 | CREATE EXTENSION IF NOT EXISTS citext; 2 | 3 | -- Create addresses table 4 | CREATE TABLE addresses 5 | ( 6 | address citext PRIMARY KEY 7 | ); 8 | 9 | CREATE TABLE networks 10 | ( 11 | network INTEGER PRIMARY KEY, 12 | indexed_block BIGINT NOT NULL 13 | ); 14 | 15 | CREATE TABLE orders 16 | ( 17 | hash TEXT PRIMARY KEY, 18 | 19 | offerer citext REFERENCES addresses(address) NOT NULL, 20 | 21 | zone citext REFERENCES addresses(address) NOT NULL, 22 | zone_hash TEXT NOT NULL, 23 | 24 | start_time BIGINT NOT NULL, 25 | end_time BIGINT NOT NULL, 26 | 27 | order_type INT NOT NULL, 28 | total_original_consideration_items INT NOT NULL, 29 | salt TEXT NOT NULL, 30 | 31 | counter BIGINT NOT NULL, 32 | conduit_key TEXT NOT NULL, 33 | 34 | signature TEXT NOT NULL, 35 | 36 | cancelled BOOLEAN NOT NULL DEFAULT FALSE, 37 | finalized BOOLEAN NOT NULL DEFAULT FALSE, 38 | marked_invalid BOOLEAN NOT NULL DEFAULT FALSE 39 | ); 40 | 41 | CREATE TABLE offers 42 | ( 43 | "order" TEXT REFERENCES orders(hash) NOT NULL, 44 | position INT NOT NULL, 45 | item_type INT NOT NULL, 46 | 47 | token citext REFERENCES addresses(address) NOT NULL, 48 | identifier_or_criteria TEXT NOT NULL, 49 | 50 | start_amount TEXT NOT NULL, 51 | end_amount TEXT NOT NULL, 52 | 53 | PRIMARY KEY("order", position) 54 | ); 55 | 56 | CREATE TABLE considerations 57 | ( 58 | "order" TEXT REFERENCES orders(hash) NOT NULL, 59 | position INT NOT NULL, 60 | item_type INT NOT NULL, 61 | 62 | token citext REFERENCES addresses(address) NOT NULL, 63 | identifier_or_criteria TEXT NOT NULL, 64 | 65 | start_amount TEXT NOT NULL, 66 | end_amount TEXT NOT NULL, 67 | 68 | recipient citext REFERENCES addresses(address) NOT NULL, 69 | 70 | PRIMARY KEY("order", position) 71 | ); 72 | 73 | CREATE INDEX IF NOT EXISTS orders_offerer_idx on orders(offerer); 74 | CREATE INDEX IF NOT EXISTS orders_counter_idx on orders(counter); 75 | -------------------------------------------------------------------------------- /proto/quay/rfq.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | import "types.proto"; 4 | import "seaport.proto"; 5 | 6 | package quay; 7 | 8 | // Reference: https://github.com/valorem-labs-inc/valorem-core/blob/master/src/interfaces/IOptionSettlementEngine.sol#L223 9 | // Reference: https://github.com/Alcibiades-Capital/quay/blob/v0.2.0/docs/SequenceDiagrams.md#request-for-quote-rfq-json-data-structure 10 | 11 | service RFQ { 12 | rpc WebTaker (QuoteRequest) returns (stream QuoteResponse); 13 | rpc Taker (stream QuoteRequest) returns (stream QuoteResponse); 14 | rpc Maker (stream QuoteResponse) returns (stream QuoteRequest); 15 | } 16 | 17 | // The taker requesting a quote is requesting a quote to buy or sell an asset 18 | enum Action { 19 | BUY = 0; 20 | SELL = 1; 21 | } 22 | 23 | message QuoteRequest { 24 | // Ideally the maker would never know who the taker is, and vice-versa. 25 | // However, seaport reveals the makers' address to the taker. traderAddress 26 | // Ensures there is no information asymmetry between the maker and taker. 27 | // Thought the trader may not always end up being the taker 28 | H128 ulid = 1; // Optional 29 | H160 takerAddress = 2; // Optional 30 | ItemType itemType = 3; 31 | H160 tokenAddress = 4; // Optional 32 | H256 identifierOrCriteria = 5; // Optional 33 | H256 startAmount = 6; 34 | H256 endAmount = 7; 35 | Action action = 8; 36 | } 37 | 38 | message QuoteResponse { 39 | H128 ulid = 1; // Optional 40 | H160 makerAddress = 2; // Optional 41 | Order order = 3; 42 | } -------------------------------------------------------------------------------- /proto/quay/seaport.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | import "types.proto"; 4 | 5 | package quay; 6 | 7 | // Reference: https://docs.opensea.io/v2.0/reference/seaport-structs 8 | // Reference: https://docs.opensea.io/v2.0/reference/seaport-enums 9 | 10 | enum ItemType { 11 | NATIVE = 0; 12 | ERC20 = 1; 13 | ERC721 = 2; 14 | ERC1155 = 3; 15 | ERC721_WITH_CRITERIA = 4; 16 | ERC1155_WITH_CRITERIA = 5; 17 | } 18 | 19 | message ConsiderationItem { 20 | ItemType item_type = 1; 21 | H160 token = 2; 22 | H256 identifier_or_criteria = 3; // uint256 23 | H256 start_amount = 4; // uint256 24 | H256 end_amount = 5; // uint256 25 | H160 recipient = 6; 26 | } 27 | 28 | message OfferItem { 29 | ItemType item_type = 1; 30 | H160 token = 2; 31 | H256 identifier_or_criteria = 3; // uint256 32 | H256 start_amount = 4; // uint256 33 | H256 end_amount = 5; // uint256 34 | } 35 | 36 | enum OrderType { 37 | FULL_OPEN = 0; 38 | PARTIAL_OPEN = 1; 39 | FULL_RESTRICTED = 2; 40 | PARTIAL_RESTRICTED = 3; 41 | } 42 | 43 | message OrderParameters { 44 | H160 offerer = 1; 45 | H160 zone = 2; 46 | repeated OfferItem offers = 3; 47 | repeated ConsiderationItem considerations = 4; 48 | OrderType order_type = 5; 49 | H256 start_time = 6; // uint256 50 | H256 end_time = 7; // uint256 51 | H256 zone_hash = 8; // bytes32 52 | H256 total_original_consideration_items = 9; 53 | H256 salt = 10; // bytes32 54 | H256 conduit_key = 11; // bytes32 55 | H256 nonce = 12; 56 | } 57 | 58 | message Order { 59 | OrderParameters parameters = 1; 60 | H256 signature = 2; // bytes32 61 | } -------------------------------------------------------------------------------- /proto/quay/session.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package session; 4 | 5 | service Session { 6 | // Getting an EIP-4361 nonce for session 7 | rpc Nonce (Empty) returns (NonceText); 8 | 9 | // Verifying user EIP-4361 session 10 | rpc Verify (VerifyText) returns (Empty); 11 | 12 | // Checking user EIP-4361 authentication 13 | rpc Authenticate (Empty) returns (Empty); 14 | } 15 | 16 | // Nonce response message containing the generated `nonce` string. 17 | message NonceText { 18 | string nonce = 1; 19 | } 20 | 21 | // Verify request message containing an JSON encoded string of the `SignedMessage` structure (src/auth). 22 | message VerifyText { 23 | string body = 1; 24 | } 25 | 26 | message Empty {} 27 | -------------------------------------------------------------------------------- /proto/quay/types.proto: -------------------------------------------------------------------------------- 1 | // Partially from: https://github.com/ledgerwatch/interfaces/blob/master/types/types.proto 2 | syntax = "proto3"; 3 | 4 | package quay; 5 | 6 | message H40 { 7 | uint32 hi = 1; 8 | // Note: lo is really a uint8, however the closest type in Protocol Buffers is uint32. Parsing needs 9 | // to take this into consideration. 10 | uint32 lo = 2; 11 | } 12 | 13 | message H96 { 14 | uint64 hi = 1; 15 | uint32 lo = 2; 16 | } 17 | 18 | message H128 { 19 | uint64 hi = 1; 20 | uint64 lo = 2; 21 | } 22 | 23 | message H160 { 24 | H128 hi = 1; 25 | uint32 lo = 2; 26 | } 27 | 28 | message H256 { 29 | H128 hi = 1; 30 | H128 lo = 2; 31 | } -------------------------------------------------------------------------------- /scripts/init_db.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -x 3 | set -eo pipefail 4 | 5 | if ! [ -x "$(command -v psql)" ]; then 6 | echo >&2 "Error: psql is not installed." 7 | exit 1 8 | fi 9 | 10 | if ! [ -x "$(command -v sqlx)" ]; then 11 | echo >&2 "Error: sqlx is not installed." 12 | echo >&2 "Use:" 13 | echo >&2 " cargo install --version=0.5.7 sqlx-cli --no-default-features --features postgres" 14 | echo >&2 "to install it." 15 | exit 1 16 | fi 17 | 18 | # Check if a custom user has been set, otherwise default to 'postgres' 19 | DB_USER=${POSTGRES_USER:=postgres} 20 | # Check if a custom password has been set, otherwise default to 'password' 21 | DB_PASSWORD="${POSTGRES_PASSWORD:=password}" 22 | # Check if a custom database name has been set, otherwise default to 'quay' 23 | DB_NAME="${POSTGRES_DB:=quay}" 24 | # Check if a custom port has been set, otherwise default to '5432' 25 | DB_PORT="${POSTGRES_PORT:=5432}" 26 | 27 | 28 | # Allow to skip Docker if a dockerized Postgres database is already running 29 | if [[ -z "${SKIP_DOCKER}" ]] 30 | then 31 | docker run \ 32 | -e POSTGRES_USER=${DB_USER} \ 33 | -e POSTGRES_PASSWORD=${DB_PASSWORD} \ 34 | -e POSTGRES_DB=${DB_NAME} \ 35 | -p "${DB_PORT}":5432 \ 36 | -d postgres \ 37 | postgres -N 1000 38 | fi 39 | 40 | export PGPASSWORD="${DB_PASSWORD}" 41 | until psql -h "localhost" -U "${DB_USER}" -p "${DB_PORT}" -d "postgres" -c '\q'; do 42 | >&2 echo "Postgres is still unavailable - sleeping" 43 | sleep 1 44 | done 45 | 46 | >&2 echo "Postgres is up and running on port ${DB_PORT} - running migrations now!" 47 | 48 | export DATABASE_URL=postgres://${DB_USER}:${DB_PASSWORD}@localhost:${DB_PORT}/${DB_NAME} 49 | sqlx database create 50 | sqlx migrate run 51 | 52 | >&2 echo "Postgres has been migrated, ready to go!" -------------------------------------------------------------------------------- /scripts/init_redis.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -x 3 | set -eo pipefail 4 | 5 | # if a redis container is running, print instructions to kill it and exit 6 | RUNNING_CONTAINER=$(docker ps --filter 'name=redis' --format '{{.ID}}') 7 | if [[ -n $RUNNING_CONTAINER ]]; then 8 | echo >&2 "there is a redis container already running, kill it with" 9 | echo >&2 " docker kill ${RUNNING_CONTAINER}" 10 | exit 1 11 | fi 12 | 13 | # Launch Redis using Docker 14 | docker run \ 15 | -p "6379:6379" \ 16 | -d \ 17 | --name "redis_$(date '+%s')" \ 18 | redis:6 19 | 20 | >&2 echo "Redis is ready to go!" 21 | -------------------------------------------------------------------------------- /sqlx-data.json: -------------------------------------------------------------------------------- 1 | { 2 | "db": "PostgreSQL", 3 | "180037a9710afd6b9d325c5ffadccb76a077a1187cc8b325da6557b3a7375d7e": { 4 | "describe": { 5 | "columns": [], 6 | "nullable": [], 7 | "parameters": { 8 | "Left": [ 9 | "Text", 10 | "Int8" 11 | ] 12 | } 13 | }, 14 | "query": "UPDATE orders SET cancelled = true WHERE offerer = $1::TEXT::citext AND counter < $2" 15 | }, 16 | "248bbdcbea951c2ef2d2b297e0fb8a5f8dd33a0ac91c4a94dfed7d004872fb23": { 17 | "describe": { 18 | "columns": [ 19 | { 20 | "name": "hash!", 21 | "ordinal": 0, 22 | "type_info": "Text" 23 | }, 24 | { 25 | "name": "offerer!", 26 | "ordinal": 1, 27 | "type_info": "Text" 28 | }, 29 | { 30 | "name": "zone!", 31 | "ordinal": 2, 32 | "type_info": "Text" 33 | }, 34 | { 35 | "name": "zone_hash!", 36 | "ordinal": 3, 37 | "type_info": "Text" 38 | }, 39 | { 40 | "name": "start_time!", 41 | "ordinal": 4, 42 | "type_info": "Int8" 43 | }, 44 | { 45 | "name": "end_time!", 46 | "ordinal": 5, 47 | "type_info": "Int8" 48 | }, 49 | { 50 | "name": "order_type!", 51 | "ordinal": 6, 52 | "type_info": "Int4" 53 | }, 54 | { 55 | "name": "total_original_consideration_items!", 56 | "ordinal": 7, 57 | "type_info": "Int4" 58 | }, 59 | { 60 | "name": "counter!", 61 | "ordinal": 8, 62 | "type_info": "Int8" 63 | }, 64 | { 65 | "name": "salt!", 66 | "ordinal": 9, 67 | "type_info": "Text" 68 | }, 69 | { 70 | "name": "conduit_key!", 71 | "ordinal": 10, 72 | "type_info": "Text" 73 | }, 74 | { 75 | "name": "signature!", 76 | "ordinal": 11, 77 | "type_info": "Text" 78 | }, 79 | { 80 | "name": "considerations!: Vec", 81 | "ordinal": 12, 82 | "type_info": "RecordArray" 83 | }, 84 | { 85 | "name": "offers!: Vec", 86 | "ordinal": 13, 87 | "type_info": "RecordArray" 88 | } 89 | ], 90 | "nullable": [ 91 | false, 92 | null, 93 | null, 94 | false, 95 | false, 96 | false, 97 | false, 98 | false, 99 | false, 100 | false, 101 | false, 102 | false, 103 | null, 104 | null 105 | ], 106 | "parameters": { 107 | "Left": [ 108 | "Text", 109 | "TextArray", 110 | "Text", 111 | "Int8" 112 | ] 113 | } 114 | }, 115 | "query": "\n SELECT\n O.hash as \"hash!\",\n O.offerer::TEXT as \"offerer!\",\n O.zone::TEXT as \"zone!\",\n O.zone_hash as \"zone_hash!\",\n O.start_time as \"start_time!\",\n O.end_time as \"end_time!\",\n O.order_type as \"order_type!\",\n O.total_original_consideration_items as \"total_original_consideration_items!\",\n O.counter as \"counter!\",\n O.salt as \"salt!\",\n O.conduit_key as \"conduit_key!\",\n O.signature as \"signature!\",\n array_agg(DISTINCT (\n OC.position,\n OC.item_type,\n OC.token::TEXT,\n OC.identifier_or_criteria,\n OC.start_amount,\n OC.end_amount,\n OC.recipient::TEXT\n )) AS \"considerations!: Vec\",\n array_agg(DISTINCT (\n OOF.position,\n OOF.item_type,\n OOF.token::TEXT,\n OOF.identifier_or_criteria,\n OOF.start_amount,\n OOF.end_amount\n )) AS \"offers!: Vec\"\n FROM orders O\n INNER JOIN considerations OC ON O.hash = OC.order\n INNER JOIN offers OOF ON O.hash = OOF.order\n WHERE O.hash IN (\n SELECT C.order FROM considerations C \n WHERE (C.token = $1::TEXT::citext OR $1::TEXT::citext = '0x0000000000000000000000000000000000000000000000000000000000000000')\n AND (C.identifier_or_criteria = ANY($2::TEXT[]) OR cardinality($2::TEXT[]) = 0)\n )\n AND (O.offerer = $3::TEXT::citext OR $3::TEXT::citext = '0x0000000000000000000000000000000000000000000000000000000000000000')\n GROUP BY O.hash\n LIMIT $4;\n " 116 | }, 117 | "323aec6855a9c9052445832e43963355a60420dfd1f7cac72284fcbd860bac8a": { 118 | "describe": { 119 | "columns": [], 120 | "nullable": [], 121 | "parameters": { 122 | "Left": [ 123 | "Text" 124 | ] 125 | } 126 | }, 127 | "query": "\n INSERT INTO addresses (address)\n VALUES ($1::TEXT::citext)\n ON CONFLICT (address) DO NOTHING;\n " 128 | }, 129 | "3f1bbb4850f2794d879bdec046c3ebf9b8bb338e53c3d7c64309a674eee80114": { 130 | "describe": { 131 | "columns": [], 132 | "nullable": [], 133 | "parameters": { 134 | "Left": [ 135 | "Int4", 136 | "Text", 137 | "Int4", 138 | "Text", 139 | "Text", 140 | "Text", 141 | "Text", 142 | "Text" 143 | ] 144 | } 145 | }, 146 | "query": "\n INSERT INTO considerations (\n position,\n \"order\",\n item_type,\n token,\n identifier_or_criteria,\n start_amount,\n end_amount,\n recipient\n )\n VALUES ($1, $2, $3, $4::TEXT::citext, $5, $6, $7, $8::TEXT::citext)\n ON CONFLICT (\"order\", position) DO NOTHING;\n " 147 | }, 148 | "4a5443e9879815966d92e5571fda4ee937b33c81f4b79b18f9e255438e8c7e81": { 149 | "describe": { 150 | "columns": [], 151 | "nullable": [], 152 | "parameters": { 153 | "Left": [ 154 | "Text" 155 | ] 156 | } 157 | }, 158 | "query": "\n INSERT INTO addresses (address)\n VALUES ($1::TEXT::citext)\n ON CONFLICT (address) DO NOTHING;\n " 159 | }, 160 | "64d88d952a6ed486a07ca5d33cf9613e780ec3671cf9f0bf4a536ba03db23e96": { 161 | "describe": { 162 | "columns": [], 163 | "nullable": [], 164 | "parameters": { 165 | "Left": [ 166 | "Text", 167 | "Text", 168 | "Text", 169 | "Text", 170 | "Int8", 171 | "Int8", 172 | "Int4", 173 | "Int4", 174 | "Int8", 175 | "Text", 176 | "Text", 177 | "Text" 178 | ] 179 | } 180 | }, 181 | "query": "\n INSERT INTO orders (\n hash,\n offerer,\n zone,\n zone_hash,\n start_time,\n end_time,\n order_type,\n total_original_consideration_items,\n counter,\n salt,\n conduit_key,\n signature\n )\n VALUES ($1, $2::TEXT::citext, $3::TEXT::citext, $4, $5, $6, $7, $8, $9, $10, $11, $12)\n ON CONFLICT (hash) DO NOTHING;\n " 182 | }, 183 | "86e3e8111cb02ec2bc2ba1d0a1816ccf67802ef29e51be550b971d05440d30d3": { 184 | "describe": { 185 | "columns": [], 186 | "nullable": [], 187 | "parameters": { 188 | "Left": [ 189 | "Int4", 190 | "Text", 191 | "Int4", 192 | "Text", 193 | "Text", 194 | "Text", 195 | "Text", 196 | "Text" 197 | ] 198 | } 199 | }, 200 | "query": "\n INSERT INTO considerations (\n position,\n \"order\",\n item_type,\n token,\n identifier_or_criteria,\n start_amount,\n end_amount,\n recipient\n )\n VALUES ($1, $2, $3, $4::TEXT::citext, $5, $6, $7, $8::TEXT::citext)\n ON CONFLICT (\"order\", position) DO NOTHING;\n " 201 | }, 202 | "890116bfb77b847443def9a05697828f7f417074e7ffdeb07ca0762c3f53d01e": { 203 | "describe": { 204 | "columns": [], 205 | "nullable": [], 206 | "parameters": { 207 | "Left": [ 208 | "Int4", 209 | "Int8" 210 | ] 211 | } 212 | }, 213 | "query": "INSERT INTO networks (network, indexed_block) VALUES ($1, $2) ON CONFLICT DO NOTHING" 214 | }, 215 | "9e4ebef35d3b34dae77c5dcf2cbea0636cd68bbdaadaaa12f7ba76d184d23f19": { 216 | "describe": { 217 | "columns": [], 218 | "nullable": [], 219 | "parameters": { 220 | "Left": [ 221 | "Int4", 222 | "Text", 223 | "Int4", 224 | "Text", 225 | "Text", 226 | "Text", 227 | "Text" 228 | ] 229 | } 230 | }, 231 | "query": "\n INSERT INTO offers (\n position,\n \"order\",\n item_type,\n token,\n identifier_or_criteria,\n start_amount,\n end_amount\n )\n VALUES ($1, $2, $3, $4::TEXT::citext, $5, $6, $7)\n ON CONFLICT (\"order\", position) DO NOTHING;\n " 232 | }, 233 | "a0d912fb0b10dc88734914b3c16a3213febee0c92279d517ee342414e579354f": { 234 | "describe": { 235 | "columns": [], 236 | "nullable": [], 237 | "parameters": { 238 | "Left": [ 239 | "Int4", 240 | "Int8" 241 | ] 242 | } 243 | }, 244 | "query": "UPDATE networks SET indexed_block = $2 WHERE network = $1" 245 | }, 246 | "a2e5e9a0ad75fe61e25cd248de891a61949bdf0b04d6c6198a3dc7d9e61b0433": { 247 | "describe": { 248 | "columns": [ 249 | { 250 | "name": "hash!", 251 | "ordinal": 0, 252 | "type_info": "Text" 253 | }, 254 | { 255 | "name": "offerer!", 256 | "ordinal": 1, 257 | "type_info": "Text" 258 | }, 259 | { 260 | "name": "zone!", 261 | "ordinal": 2, 262 | "type_info": "Text" 263 | }, 264 | { 265 | "name": "zone_hash!", 266 | "ordinal": 3, 267 | "type_info": "Text" 268 | }, 269 | { 270 | "name": "start_time!", 271 | "ordinal": 4, 272 | "type_info": "Int8" 273 | }, 274 | { 275 | "name": "end_time!", 276 | "ordinal": 5, 277 | "type_info": "Int8" 278 | }, 279 | { 280 | "name": "order_type!", 281 | "ordinal": 6, 282 | "type_info": "Int4" 283 | }, 284 | { 285 | "name": "total_original_consideration_items!", 286 | "ordinal": 7, 287 | "type_info": "Int4" 288 | }, 289 | { 290 | "name": "counter!", 291 | "ordinal": 8, 292 | "type_info": "Int8" 293 | }, 294 | { 295 | "name": "salt!", 296 | "ordinal": 9, 297 | "type_info": "Text" 298 | }, 299 | { 300 | "name": "conduit_key!", 301 | "ordinal": 10, 302 | "type_info": "Text" 303 | }, 304 | { 305 | "name": "signature!", 306 | "ordinal": 11, 307 | "type_info": "Text" 308 | }, 309 | { 310 | "name": "considerations!: Vec", 311 | "ordinal": 12, 312 | "type_info": "RecordArray" 313 | }, 314 | { 315 | "name": "offers!: Vec", 316 | "ordinal": 13, 317 | "type_info": "RecordArray" 318 | } 319 | ], 320 | "nullable": [ 321 | false, 322 | null, 323 | null, 324 | false, 325 | false, 326 | false, 327 | false, 328 | false, 329 | false, 330 | false, 331 | false, 332 | false, 333 | null, 334 | null 335 | ], 336 | "parameters": { 337 | "Left": [ 338 | "Text", 339 | "TextArray", 340 | "Text", 341 | "Int8" 342 | ] 343 | } 344 | }, 345 | "query": "\n SELECT\n O.hash as \"hash!\",\n O.offerer::TEXT as \"offerer!\",\n O.zone::TEXT as \"zone!\",\n O.zone_hash as \"zone_hash!\",\n O.start_time as \"start_time!\",\n O.end_time as \"end_time!\",\n O.order_type as \"order_type!\",\n O.total_original_consideration_items as \"total_original_consideration_items!\",\n O.counter as \"counter!\",\n O.salt as \"salt!\",\n O.conduit_key as \"conduit_key!\",\n O.signature as \"signature!\",\n array_agg(DISTINCT (\n OC.position,\n OC.item_type,\n OC.token::TEXT,\n OC.identifier_or_criteria,\n OC.start_amount,\n OC.end_amount,\n OC.recipient::TEXT\n )) AS \"considerations!: Vec\",\n array_agg(DISTINCT (\n OOF.position,\n OOF.item_type,\n OOF.token::TEXT,\n OOF.identifier_or_criteria,\n OOF.start_amount,\n OOF.end_amount\n )) AS \"offers!: Vec\"\n FROM orders O\n INNER JOIN considerations OC ON O.hash = OC.order\n INNER JOIN offers OOF ON O.hash = OOF.order\n WHERE O.hash IN (\n SELECT OF.order FROM offers OF\n WHERE (OF.token = $1::TEXT::citext OR $1::TEXT::citext = '0x0000000000000000000000000000000000000000000000000000000000000000')\n AND (OF.identifier_or_criteria = ANY($2::TEXT[]) OR cardinality($2::TEXT[]) = 0)\n )\n AND (O.offerer = $3::TEXT::citext OR $3::TEXT::citext = '0x0000000000000000000000000000000000000000000000000000000000000000')\n GROUP BY O.hash\n LIMIT $4;\n " 346 | }, 347 | "b4b0f2f367afd2f81a8c130f30519e10232bfce97ca55ad84224fe420fd33dcc": { 348 | "describe": { 349 | "columns": [], 350 | "nullable": [], 351 | "parameters": { 352 | "Left": [ 353 | "Text", 354 | "Bool" 355 | ] 356 | } 357 | }, 358 | "query": "UPDATE orders SET cancelled = $2 WHERE hash = $1" 359 | }, 360 | "b8606b7c3f5b586901eb7ab719585e8b996d9a81ebb3c6a3e453882225c29076": { 361 | "describe": { 362 | "columns": [], 363 | "nullable": [], 364 | "parameters": { 365 | "Left": [ 366 | "Int4", 367 | "Text", 368 | "Int4", 369 | "Text", 370 | "Text", 371 | "Text", 372 | "Text" 373 | ] 374 | } 375 | }, 376 | "query": "\n INSERT INTO offers (\n position,\n \"order\",\n item_type,\n token,\n identifier_or_criteria,\n start_amount,\n end_amount\n )\n VALUES ($1, $2, $3, $4::TEXT::citext, $5, $6, $7)\n ON CONFLICT (\"order\", position) DO NOTHING;\n " 377 | }, 378 | "df998a15682d6e183e2cdd9b21413b81f8f4c3247536f174440a2ea34adf12ed": { 379 | "describe": { 380 | "columns": [], 381 | "nullable": [], 382 | "parameters": { 383 | "Left": [ 384 | "Text", 385 | "Bool" 386 | ] 387 | } 388 | }, 389 | "query": "UPDATE orders SET finalized = $2 WHERE hash = $1" 390 | }, 391 | "eb8ce485fbb3e88ecc676e8eed36873ec611eea11c581efb36efaa9d9ffd8ffe": { 392 | "describe": { 393 | "columns": [ 394 | { 395 | "name": "network", 396 | "ordinal": 0, 397 | "type_info": "Int4" 398 | }, 399 | { 400 | "name": "indexed_block", 401 | "ordinal": 1, 402 | "type_info": "Int8" 403 | } 404 | ], 405 | "nullable": [ 406 | false, 407 | false 408 | ], 409 | "parameters": { 410 | "Left": [ 411 | "Int4" 412 | ] 413 | } 414 | }, 415 | "query": "SELECT network, indexed_block FROM networks WHERE network = $1" 416 | } 417 | } -------------------------------------------------------------------------------- /src/auth.rs: -------------------------------------------------------------------------------- 1 | use std::time::{SystemTime, UNIX_EPOCH}; 2 | 3 | use axum::response::IntoResponse; 4 | use axum_sessions::extractors::ReadableSession; 5 | use ethers::abi::ethereum_types::Signature; 6 | 7 | use http::StatusCode; 8 | use serde::{Deserialize, Serialize}; 9 | use siwe::Message; 10 | 11 | pub const NONCE_KEY: &str = "nonce"; 12 | pub const EXPIRATION_TIME_KEY: &str = "expirationTime"; 13 | pub const USER_ADDRESS_KEY: &str = "userAddress"; 14 | 15 | pub fn unix_timestamp() -> Result { 16 | Ok(SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs()) 17 | } 18 | 19 | // EIP-4361 based session 20 | 21 | #[derive(Clone, Debug, Serialize, Deserialize)] 22 | pub struct SignedMessage { 23 | pub signature: Signature, 24 | pub message: Message, 25 | } 26 | 27 | pub async fn verify_session(session: &ReadableSession) -> impl IntoResponse { 28 | match session.get::(NONCE_KEY) { 29 | Some(_) => {} 30 | // Invalid nonce 31 | None => return (StatusCode::UNAUTHORIZED, "Failed to get nonce").into_response(), 32 | } 33 | let now = match unix_timestamp() { 34 | Ok(now) => now, 35 | Err(_) => { 36 | return ( 37 | StatusCode::INTERNAL_SERVER_ERROR, 38 | "Failed to get unix timestamp.", 39 | ) 40 | .into_response() 41 | } 42 | }; 43 | match session.get::(EXPIRATION_TIME_KEY) { 44 | None => { 45 | return (StatusCode::UNAUTHORIZED, "Failed to get session expiration").into_response() 46 | } 47 | Some(ts) => { 48 | if now > ts { 49 | return (StatusCode::UNAUTHORIZED, "Session expired").into_response(); 50 | } 51 | } 52 | } 53 | 54 | StatusCode::OK.into_response() 55 | } 56 | -------------------------------------------------------------------------------- /src/bin/gossip.rs: -------------------------------------------------------------------------------- 1 | // TODO(Implement Seaport Gossip Network Client) 2 | 3 | fn main() {} 4 | -------------------------------------------------------------------------------- /src/bin/indexer.rs: -------------------------------------------------------------------------------- 1 | use quay::configuration::get_configuration; 2 | use quay::indexer; 3 | use quay::telemetry::{get_subscriber, init_subscriber}; 4 | use tracing::error; 5 | 6 | fn main() { 7 | let subscriber = get_subscriber("quay".into(), "info".into(), std::io::stdout); 8 | init_subscriber(subscriber); 9 | 10 | let configuration = get_configuration().expect("Failed to read configuration."); 11 | if let Err(e) = indexer::run(configuration) { 12 | error!("Unhandled application error, panicking."); 13 | panic!("{}", e); 14 | 15 | // Later, when there are handled cases: process::exit(2..n); 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/bin/server.rs: -------------------------------------------------------------------------------- 1 | //! src/bin/api 2 | use quay::configuration::get_configuration; 3 | use quay::startup::Application; 4 | use quay::telemetry::{get_subscriber, init_subscriber}; 5 | 6 | #[tokio::main] 7 | async fn main() -> anyhow::Result<()> { 8 | let subscriber = get_subscriber( 9 | "quay".into(), 10 | "info,tower_http=trace".into(), 11 | std::io::stdout, 12 | ); 13 | init_subscriber(subscriber); 14 | 15 | let configuration = get_configuration().expect("Failed to read configuration."); 16 | let application = Application::build(configuration).await?; 17 | application.run_until_stopped().await?; 18 | Ok(()) 19 | } 20 | -------------------------------------------------------------------------------- /src/bindings/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod conduit_controller; 2 | pub mod seaport; 3 | pub mod seaport_domain_registry; 4 | 5 | pub use conduit_controller::*; 6 | pub use seaport::*; 7 | pub use seaport_domain_registry::*; 8 | -------------------------------------------------------------------------------- /src/bindings/seaport_domain_registry.rs: -------------------------------------------------------------------------------- 1 | pub use seaportdomainregistry_mod::*; 2 | #[allow(clippy::too_many_arguments, non_camel_case_types)] 3 | pub mod seaportdomainregistry_mod { 4 | #![allow(clippy::enum_variant_names)] 5 | #![allow(dead_code)] 6 | #![allow(clippy::type_complexity)] 7 | #![allow(unused_imports)] 8 | use ethers::contract::{ 9 | builders::{ContractCall, Event}, 10 | Contract, Lazy, 11 | }; 12 | use ethers::core::{ 13 | abi::{Abi, Detokenize, InvalidOutputType, Token, Tokenizable}, 14 | types::*, 15 | }; 16 | use ethers::providers::Middleware; 17 | #[doc = "SeaportDomainRegistry was auto-generated with ethers-rs Abigen. More information at: https://github.com/gakonst/ethers-rs"] 18 | use std::sync::Arc; 19 | pub static SEAPORTDOMAINREGISTRY_ABI: ethers::contract::Lazy = 20 | ethers::contract::Lazy::new(|| { 21 | serde_json :: from_str ("[\n {\n \"inputs\": [\n { \"internalType\": \"string\", \"name\": \"domain\", \"type\": \"string\" }\n ],\n \"name\": \"DomainAlreadyRegistered\",\n \"type\": \"error\"\n },\n {\n \"inputs\": [\n { \"internalType\": \"bytes4\", \"name\": \"tag\", \"type\": \"bytes4\" },\n {\n \"internalType\": \"uint256\",\n \"name\": \"maxIndex\",\n \"type\": \"uint256\"\n },\n {\n \"internalType\": \"uint256\",\n \"name\": \"suppliedIndex\",\n \"type\": \"uint256\"\n }\n ],\n \"name\": \"DomainIndexOutOfRange\",\n \"type\": \"error\"\n },\n {\n \"anonymous\": false,\n \"inputs\": [\n {\n \"indexed\": false,\n \"internalType\": \"string\",\n \"name\": \"domain\",\n \"type\": \"string\"\n },\n {\n \"indexed\": false,\n \"internalType\": \"bytes4\",\n \"name\": \"tag\",\n \"type\": \"bytes4\"\n },\n {\n \"indexed\": false,\n \"internalType\": \"uint256\",\n \"name\": \"index\",\n \"type\": \"uint256\"\n }\n ],\n \"name\": \"DomainRegistered\",\n \"type\": \"event\"\n },\n {\n \"inputs\": [\n { \"internalType\": \"bytes4\", \"name\": \"tag\", \"type\": \"bytes4\" },\n { \"internalType\": \"uint256\", \"name\": \"index\", \"type\": \"uint256\" }\n ],\n \"name\": \"getDomain\",\n \"outputs\": [\n { \"internalType\": \"string\", \"name\": \"domain\", \"type\": \"string\" }\n ],\n \"stateMutability\": \"view\",\n \"type\": \"function\"\n },\n {\n \"inputs\": [\n { \"internalType\": \"bytes4\", \"name\": \"tag\", \"type\": \"bytes4\" }\n ],\n \"name\": \"getDomains\",\n \"outputs\": [\n {\n \"internalType\": \"string[]\",\n \"name\": \"domains\",\n \"type\": \"string[]\"\n }\n ],\n \"stateMutability\": \"view\",\n \"type\": \"function\"\n },\n {\n \"inputs\": [\n { \"internalType\": \"bytes4\", \"name\": \"tag\", \"type\": \"bytes4\" }\n ],\n \"name\": \"getNumberOfDomains\",\n \"outputs\": [\n {\n \"internalType\": \"uint256\",\n \"name\": \"totalDomains\",\n \"type\": \"uint256\"\n }\n ],\n \"stateMutability\": \"view\",\n \"type\": \"function\"\n },\n {\n \"inputs\": [\n { \"internalType\": \"string\", \"name\": \"domain\", \"type\": \"string\" }\n ],\n \"name\": \"setDomain\",\n \"outputs\": [\n { \"internalType\": \"bytes4\", \"name\": \"tag\", \"type\": \"bytes4\" }\n ],\n \"stateMutability\": \"nonpayable\",\n \"type\": \"function\"\n }\n]\n") . expect ("invalid abi") 22 | }); 23 | pub struct SeaportDomainRegistry(ethers::contract::Contract); 24 | impl Clone for SeaportDomainRegistry { 25 | fn clone(&self) -> Self { 26 | SeaportDomainRegistry(self.0.clone()) 27 | } 28 | } 29 | impl std::ops::Deref for SeaportDomainRegistry { 30 | type Target = ethers::contract::Contract; 31 | fn deref(&self) -> &Self::Target { 32 | &self.0 33 | } 34 | } 35 | impl std::fmt::Debug for SeaportDomainRegistry { 36 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 37 | f.debug_tuple(stringify!(SeaportDomainRegistry)) 38 | .field(&self.address()) 39 | .finish() 40 | } 41 | } 42 | impl SeaportDomainRegistry { 43 | #[doc = r" Creates a new contract instance with the specified `ethers`"] 44 | #[doc = r" client at the given `Address`. The contract derefs to a `ethers::Contract`"] 45 | #[doc = r" object"] 46 | pub fn new>( 47 | address: T, 48 | client: ::std::sync::Arc, 49 | ) -> Self { 50 | ethers::contract::Contract::new( 51 | address.into(), 52 | SEAPORTDOMAINREGISTRY_ABI.clone(), 53 | client, 54 | ) 55 | .into() 56 | } 57 | #[doc = "Calls the contract's `getDomain` (0xeab5fc24) function"] 58 | pub fn get_domain( 59 | &self, 60 | tag: [u8; 4], 61 | index: ethers::core::types::U256, 62 | ) -> ethers::contract::builders::ContractCall { 63 | self.0 64 | .method_hash([234, 181, 252, 36], (tag, index)) 65 | .expect("method not found (this should never happen)") 66 | } 67 | #[doc = "Calls the contract's `getDomains` (0xd45619b6) function"] 68 | pub fn get_domains( 69 | &self, 70 | tag: [u8; 4], 71 | ) -> ethers::contract::builders::ContractCall> { 72 | self.0 73 | .method_hash([212, 86, 25, 182], tag) 74 | .expect("method not found (this should never happen)") 75 | } 76 | #[doc = "Calls the contract's `getNumberOfDomains` (0x432ba75c) function"] 77 | pub fn get_number_of_domains( 78 | &self, 79 | tag: [u8; 4], 80 | ) -> ethers::contract::builders::ContractCall { 81 | self.0 82 | .method_hash([67, 43, 167, 92], tag) 83 | .expect("method not found (this should never happen)") 84 | } 85 | #[doc = "Calls the contract's `setDomain` (0xe5eab096) function"] 86 | pub fn set_domain( 87 | &self, 88 | domain: String, 89 | ) -> ethers::contract::builders::ContractCall { 90 | self.0 91 | .method_hash([229, 234, 176, 150], domain) 92 | .expect("method not found (this should never happen)") 93 | } 94 | #[doc = "Gets the contract's `DomainRegistered` event"] 95 | pub fn domain_registered_filter( 96 | &self, 97 | ) -> ethers::contract::builders::Event { 98 | self.0.event() 99 | } 100 | #[doc = r" Returns an [`Event`](#ethers_contract::builders::Event) builder for all events of this contract"] 101 | pub fn events(&self) -> ethers::contract::builders::Event { 102 | self.0.event_with_filter(Default::default()) 103 | } 104 | } 105 | impl From> 106 | for SeaportDomainRegistry 107 | { 108 | fn from(contract: ethers::contract::Contract) -> Self { 109 | Self(contract) 110 | } 111 | } 112 | #[derive( 113 | Clone, 114 | Debug, 115 | Default, 116 | Eq, 117 | PartialEq, 118 | ethers :: contract :: EthEvent, 119 | ethers :: contract :: EthDisplay, 120 | )] 121 | #[ethevent( 122 | name = "DomainRegistered", 123 | abi = "DomainRegistered(string,bytes4,uint256)" 124 | )] 125 | pub struct DomainRegisteredFilter { 126 | pub domain: String, 127 | pub tag: [u8; 4], 128 | pub index: ethers::core::types::U256, 129 | } 130 | #[doc = "Container type for all input parameters for the `getDomain` function with signature `getDomain(bytes4,uint256)` and selector `[234, 181, 252, 36]`"] 131 | #[derive( 132 | Clone, 133 | Debug, 134 | Default, 135 | Eq, 136 | PartialEq, 137 | ethers :: contract :: EthCall, 138 | ethers :: contract :: EthDisplay, 139 | )] 140 | #[ethcall(name = "getDomain", abi = "getDomain(bytes4,uint256)")] 141 | pub struct GetDomainCall { 142 | pub tag: [u8; 4], 143 | pub index: ethers::core::types::U256, 144 | } 145 | #[doc = "Container type for all input parameters for the `getDomains` function with signature `getDomains(bytes4)` and selector `[212, 86, 25, 182]`"] 146 | #[derive( 147 | Clone, 148 | Debug, 149 | Default, 150 | Eq, 151 | PartialEq, 152 | ethers :: contract :: EthCall, 153 | ethers :: contract :: EthDisplay, 154 | )] 155 | #[ethcall(name = "getDomains", abi = "getDomains(bytes4)")] 156 | pub struct GetDomainsCall { 157 | pub tag: [u8; 4], 158 | } 159 | #[doc = "Container type for all input parameters for the `getNumberOfDomains` function with signature `getNumberOfDomains(bytes4)` and selector `[67, 43, 167, 92]`"] 160 | #[derive( 161 | Clone, 162 | Debug, 163 | Default, 164 | Eq, 165 | PartialEq, 166 | ethers :: contract :: EthCall, 167 | ethers :: contract :: EthDisplay, 168 | )] 169 | #[ethcall(name = "getNumberOfDomains", abi = "getNumberOfDomains(bytes4)")] 170 | pub struct GetNumberOfDomainsCall { 171 | pub tag: [u8; 4], 172 | } 173 | #[doc = "Container type for all input parameters for the `setDomain` function with signature `setDomain(string)` and selector `[229, 234, 176, 150]`"] 174 | #[derive( 175 | Clone, 176 | Debug, 177 | Default, 178 | Eq, 179 | PartialEq, 180 | ethers :: contract :: EthCall, 181 | ethers :: contract :: EthDisplay, 182 | )] 183 | #[ethcall(name = "setDomain", abi = "setDomain(string)")] 184 | pub struct SetDomainCall { 185 | pub domain: String, 186 | } 187 | #[derive(Debug, Clone, PartialEq, Eq, ethers :: contract :: EthAbiType)] 188 | pub enum SeaportDomainRegistryCalls { 189 | GetDomain(GetDomainCall), 190 | GetDomains(GetDomainsCall), 191 | GetNumberOfDomains(GetNumberOfDomainsCall), 192 | SetDomain(SetDomainCall), 193 | } 194 | impl ethers::core::abi::AbiDecode for SeaportDomainRegistryCalls { 195 | fn decode(data: impl AsRef<[u8]>) -> Result { 196 | if let Ok(decoded) = 197 | ::decode(data.as_ref()) 198 | { 199 | return Ok(SeaportDomainRegistryCalls::GetDomain(decoded)); 200 | } 201 | if let Ok(decoded) = 202 | ::decode(data.as_ref()) 203 | { 204 | return Ok(SeaportDomainRegistryCalls::GetDomains(decoded)); 205 | } 206 | if let Ok(decoded) = 207 | ::decode(data.as_ref()) 208 | { 209 | return Ok(SeaportDomainRegistryCalls::GetNumberOfDomains(decoded)); 210 | } 211 | if let Ok(decoded) = 212 | ::decode(data.as_ref()) 213 | { 214 | return Ok(SeaportDomainRegistryCalls::SetDomain(decoded)); 215 | } 216 | Err(ethers::core::abi::Error::InvalidData.into()) 217 | } 218 | } 219 | impl ethers::core::abi::AbiEncode for SeaportDomainRegistryCalls { 220 | fn encode(self) -> Vec { 221 | match self { 222 | SeaportDomainRegistryCalls::GetDomain(element) => element.encode(), 223 | SeaportDomainRegistryCalls::GetDomains(element) => element.encode(), 224 | SeaportDomainRegistryCalls::GetNumberOfDomains(element) => element.encode(), 225 | SeaportDomainRegistryCalls::SetDomain(element) => element.encode(), 226 | } 227 | } 228 | } 229 | impl ::std::fmt::Display for SeaportDomainRegistryCalls { 230 | fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { 231 | match self { 232 | SeaportDomainRegistryCalls::GetDomain(element) => element.fmt(f), 233 | SeaportDomainRegistryCalls::GetDomains(element) => element.fmt(f), 234 | SeaportDomainRegistryCalls::GetNumberOfDomains(element) => element.fmt(f), 235 | SeaportDomainRegistryCalls::SetDomain(element) => element.fmt(f), 236 | } 237 | } 238 | } 239 | impl ::std::convert::From for SeaportDomainRegistryCalls { 240 | fn from(var: GetDomainCall) -> Self { 241 | SeaportDomainRegistryCalls::GetDomain(var) 242 | } 243 | } 244 | impl ::std::convert::From for SeaportDomainRegistryCalls { 245 | fn from(var: GetDomainsCall) -> Self { 246 | SeaportDomainRegistryCalls::GetDomains(var) 247 | } 248 | } 249 | impl ::std::convert::From for SeaportDomainRegistryCalls { 250 | fn from(var: GetNumberOfDomainsCall) -> Self { 251 | SeaportDomainRegistryCalls::GetNumberOfDomains(var) 252 | } 253 | } 254 | impl ::std::convert::From for SeaportDomainRegistryCalls { 255 | fn from(var: SetDomainCall) -> Self { 256 | SeaportDomainRegistryCalls::SetDomain(var) 257 | } 258 | } 259 | #[doc = "Container type for all return fields from the `getDomain` function with signature `getDomain(bytes4,uint256)` and selector `[234, 181, 252, 36]`"] 260 | #[derive( 261 | Clone, 262 | Debug, 263 | Default, 264 | Eq, 265 | PartialEq, 266 | ethers :: contract :: EthAbiType, 267 | ethers :: contract :: EthAbiCodec, 268 | )] 269 | pub struct GetDomainReturn { 270 | pub domain: String, 271 | } 272 | #[doc = "Container type for all return fields from the `getDomains` function with signature `getDomains(bytes4)` and selector `[212, 86, 25, 182]`"] 273 | #[derive( 274 | Clone, 275 | Debug, 276 | Default, 277 | Eq, 278 | PartialEq, 279 | ethers :: contract :: EthAbiType, 280 | ethers :: contract :: EthAbiCodec, 281 | )] 282 | pub struct GetDomainsReturn { 283 | pub domains: ::std::vec::Vec, 284 | } 285 | #[doc = "Container type for all return fields from the `getNumberOfDomains` function with signature `getNumberOfDomains(bytes4)` and selector `[67, 43, 167, 92]`"] 286 | #[derive( 287 | Clone, 288 | Debug, 289 | Default, 290 | Eq, 291 | PartialEq, 292 | ethers :: contract :: EthAbiType, 293 | ethers :: contract :: EthAbiCodec, 294 | )] 295 | pub struct GetNumberOfDomainsReturn { 296 | pub total_domains: ethers::core::types::U256, 297 | } 298 | #[doc = "Container type for all return fields from the `setDomain` function with signature `setDomain(string)` and selector `[229, 234, 176, 150]`"] 299 | #[derive( 300 | Clone, 301 | Debug, 302 | Default, 303 | Eq, 304 | PartialEq, 305 | ethers :: contract :: EthAbiType, 306 | ethers :: contract :: EthAbiCodec, 307 | )] 308 | pub struct SetDomainReturn { 309 | pub tag: [u8; 4], 310 | } 311 | } 312 | -------------------------------------------------------------------------------- /src/configuration.rs: -------------------------------------------------------------------------------- 1 | use secrecy::Secret; 2 | use serde_aux::field_attributes::deserialize_number_from_string; 3 | use sqlx::postgres::{PgConnectOptions, PgSslMode}; 4 | use sqlx::ConnectOptions; 5 | use std::convert::{TryFrom, TryInto}; 6 | 7 | // All this seems a bit much rather than just using a few environment variables. 8 | 9 | #[derive(serde::Deserialize, Clone)] 10 | pub struct DatabaseSettings { 11 | pub username: String, 12 | pub password: String, 13 | #[serde(deserialize_with = "deserialize_number_from_string")] 14 | pub port: u16, 15 | pub host: String, 16 | pub database_name: String, 17 | // To encrypt, or not to encrypt 18 | pub require_ssl: bool, 19 | } 20 | 21 | impl DatabaseSettings { 22 | pub fn without_db(&self) -> PgConnectOptions { 23 | let ssl_mode = if self.require_ssl { 24 | PgSslMode::Require 25 | } else { 26 | // Try an encrypted connection, fallback to unencrypted if it fails 27 | PgSslMode::Prefer 28 | }; 29 | PgConnectOptions::new() 30 | .host(&self.host) 31 | .username(&self.username) 32 | .password(&self.password) 33 | .port(self.port) 34 | .ssl_mode(ssl_mode) 35 | } 36 | 37 | pub fn with_db(&self) -> PgConnectOptions { 38 | let mut options = self.without_db().database(&self.database_name); 39 | options.log_statements(tracing::log::LevelFilter::Trace); 40 | options 41 | } 42 | } 43 | 44 | // TODO(Alternative structure to make this weildy for heroku?) 45 | 46 | #[derive(serde::Deserialize, Clone)] 47 | pub struct ApplicationSettings { 48 | #[serde(deserialize_with = "deserialize_number_from_string")] 49 | pub port: u16, 50 | pub host: String, 51 | pub base_url: String, 52 | pub hmac_secret: Secret, 53 | } 54 | 55 | #[derive(serde::Deserialize, Clone)] 56 | pub struct RPCSettings { 57 | pub uri: String, 58 | pub chain_id: i32, 59 | } 60 | 61 | #[derive(serde::Deserialize, Clone)] 62 | pub struct IndexerSettings { 63 | pub seaport_deploy_block: i64, 64 | } 65 | 66 | #[derive(serde::Deserialize, Clone)] 67 | pub struct PaperclipSettings { 68 | pub version: Option, 69 | pub title: Option, 70 | } 71 | 72 | #[derive(serde::Deserialize, Clone)] 73 | pub struct Settings { 74 | pub database: DatabaseSettings, 75 | pub application: ApplicationSettings, 76 | pub rpc: RPCSettings, 77 | pub redis_url: Secret, 78 | pub indexer: IndexerSettings, 79 | } 80 | 81 | pub fn get_configuration() -> Result { 82 | let mut settings = config::Config::default(); 83 | let base_path = std::env::current_dir().expect("Failed to determine the current directory"); 84 | let configuration_directory = base_path.join("configuration"); 85 | settings.merge(config::File::from(configuration_directory.join("base")).required(true))?; 86 | let environment: Environment = std::env::var("APP_ENVIRONMENT") 87 | .unwrap_or_else(|_| "local".into()) 88 | .try_into() 89 | .expect("Failed to parse APP_ENVIRONMENT."); 90 | settings.merge( 91 | config::File::from(configuration_directory.join(environment.as_str())).required(false), 92 | )?; 93 | 94 | // Add in settings from environment variables (with a prefix of APP and '__' as separator) 95 | // E.g. `APP_APPLICATION__PORT=5001 would set `Settings.application.port` 96 | settings.merge(config::Environment::with_prefix("app").separator("__"))?; 97 | 98 | settings.try_into() 99 | } 100 | 101 | pub enum Environment { 102 | Local, 103 | Production, 104 | } 105 | 106 | impl Environment { 107 | pub fn as_str(&self) -> &'static str { 108 | match self { 109 | Environment::Local => "local", 110 | Environment::Production => "production", 111 | } 112 | } 113 | } 114 | 115 | impl TryFrom for Environment { 116 | type Error = String; 117 | 118 | fn try_from(s: String) -> Result { 119 | match s.to_lowercase().as_str() { 120 | "local" => Ok(Self::Local), 121 | "production" => Ok(Self::Production), 122 | other => Err(format!( 123 | "{} is not a supported environment. Use either `local` or `production`.", 124 | other 125 | )), 126 | } 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /src/database/address.rs: -------------------------------------------------------------------------------- 1 | use ethers::{abi::AbiEncode, prelude::*}; 2 | 3 | use super::InsertOnlyQuery; 4 | 5 | pub fn save_address(address: H160) -> InsertOnlyQuery { 6 | sqlx::query!( 7 | r#" 8 | INSERT INTO addresses (address) 9 | VALUES ($1::TEXT::citext) 10 | ON CONFLICT (address) DO NOTHING; 11 | "#, 12 | address.encode_hex() 13 | ) 14 | } 15 | -------------------------------------------------------------------------------- /src/database/db.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{postgres::PgArguments, query::Query, Postgres}; 2 | 3 | pub type InsertOnlyQuery = Query<'static, Postgres, PgArguments>; 4 | -------------------------------------------------------------------------------- /src/database/mod.rs: -------------------------------------------------------------------------------- 1 | mod address; 2 | mod db; 3 | mod order; 4 | 5 | pub use address::*; 6 | pub use db::*; 7 | pub use order::*; 8 | -------------------------------------------------------------------------------- /src/database/order.rs: -------------------------------------------------------------------------------- 1 | use ethers::abi::AbiEncode; 2 | 3 | use super::InsertOnlyQuery; 4 | use crate::{ 5 | bindings::{ConsiderationItem, OfferItem}, 6 | structs::OrderInput, 7 | }; 8 | 9 | pub fn save_order(hash: [u8; 32], order: &OrderInput) -> InsertOnlyQuery { 10 | sqlx::query!( 11 | r#" 12 | INSERT INTO orders ( 13 | hash, 14 | offerer, 15 | zone, 16 | zone_hash, 17 | start_time, 18 | end_time, 19 | order_type, 20 | total_original_consideration_items, 21 | counter, 22 | salt, 23 | conduit_key, 24 | signature 25 | ) 26 | VALUES ($1, $2::TEXT::citext, $3::TEXT::citext, $4, $5, $6, $7, $8, $9, $10, $11, $12) 27 | ON CONFLICT (hash) DO NOTHING; 28 | "#, 29 | hash.encode_hex(), 30 | order.parameters.offerer.encode_hex(), 31 | order.parameters.zone.encode_hex(), 32 | order.parameters.zone_hash.encode_hex(), 33 | order.parameters.start_time.as_u64() as i64, 34 | order.parameters.end_time.as_u64() as i64, 35 | order.parameters.order_type as i32, 36 | order.parameters.total_original_consideration_items as i32, 37 | order.parameters.nonce as i64, 38 | order.parameters.salt.to_string(), 39 | order.parameters.conduit_key.encode_hex(), 40 | order.signature.to_string(), 41 | ) 42 | } 43 | 44 | pub fn save_offer(hash: [u8; 32], position: i32, offer: &OfferItem) -> InsertOnlyQuery { 45 | sqlx::query!( 46 | r#" 47 | INSERT INTO offers ( 48 | position, 49 | "order", 50 | item_type, 51 | token, 52 | identifier_or_criteria, 53 | start_amount, 54 | end_amount 55 | ) 56 | VALUES ($1, $2, $3, $4::TEXT::citext, $5, $6, $7) 57 | ON CONFLICT ("order", position) DO NOTHING; 58 | "#, 59 | position, 60 | hash.encode_hex(), 61 | offer.item_type as i32, 62 | offer.token.encode_hex(), 63 | offer.identifier_or_criteria.encode_hex(), 64 | offer.start_amount.encode_hex(), 65 | offer.end_amount.encode_hex() 66 | ) 67 | } 68 | 69 | pub fn save_consideration( 70 | hash: [u8; 32], 71 | position: i32, 72 | consideration: &ConsiderationItem, 73 | ) -> InsertOnlyQuery { 74 | sqlx::query!( 75 | r#" 76 | INSERT INTO considerations ( 77 | position, 78 | "order", 79 | item_type, 80 | token, 81 | identifier_or_criteria, 82 | start_amount, 83 | end_amount, 84 | recipient 85 | ) 86 | VALUES ($1, $2, $3, $4::TEXT::citext, $5, $6, $7, $8::TEXT::citext) 87 | ON CONFLICT ("order", position) DO NOTHING; 88 | "#, 89 | position, 90 | hash.encode_hex(), 91 | consideration.item_type as i32, 92 | consideration.token.encode_hex(), 93 | consideration.identifier_or_criteria.encode_hex(), 94 | consideration.start_amount.encode_hex(), 95 | consideration.end_amount.encode_hex(), 96 | consideration.recipient.encode_hex() 97 | ) 98 | } 99 | -------------------------------------------------------------------------------- /src/indexer.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | use std::sync::Arc; 3 | use std::time::Duration; 4 | 5 | use anyhow::Result; 6 | use ethers::abi::AbiEncode; 7 | use ethers::prelude::*; 8 | use ethers::providers::Provider; 9 | use futures::future::try_join_all; 10 | use futures::try_join; 11 | use log::warn; 12 | use sqlx::PgPool; 13 | use tokio::time::sleep; 14 | use tracing::{debug, info}; 15 | 16 | use crate::bindings::seaport::{ 17 | CounterIncrementedFilter, OrderCancelledFilter, OrderFulfilledFilter, Seaport, 18 | }; 19 | use crate::configuration::Settings; 20 | use crate::startup::get_connection_pool; 21 | use crate::structs::Network; 22 | 23 | // TODO(Network id and indexed block should be U64 types, but we need sqlx bindings for those first) 24 | 25 | pub async fn init_network( 26 | pool: &PgPool, 27 | network_id: &i32, 28 | indexed_block: &i64, 29 | ) -> Result<(), sqlx::Error> { 30 | sqlx::query!( 31 | r#"INSERT INTO networks (network, indexed_block) VALUES ($1, $2) ON CONFLICT DO NOTHING"#, 32 | network_id, 33 | indexed_block 34 | ) 35 | .execute(pool) 36 | .await 37 | .map_err(|e| { 38 | tracing::error!("Failed to execute query: {:?}", e); 39 | e 40 | // Using the `?` operator to return early 41 | // if the function failed, returning a sqlx::Error 42 | })?; 43 | Ok(()) 44 | } 45 | 46 | pub async fn get_network(pool: &PgPool, network_id: &i32) -> Result { 47 | let network: Network = sqlx::query_as!( 48 | Network, 49 | r#"SELECT network, indexed_block FROM networks WHERE network = $1"#, 50 | network_id 51 | ) 52 | .fetch_one(pool) 53 | .await 54 | .map_err(|e| { 55 | tracing::error!("Failed to execute query: {:?}", e); 56 | e 57 | // Using the `?` operator to return early 58 | // if the function failed, returning a sqlx::Error 59 | })?; 60 | Ok(network) 61 | } 62 | 63 | pub async fn update_network( 64 | pool: &PgPool, 65 | network_id: &i32, 66 | indexed_block: &i64, 67 | ) -> Result<(), sqlx::Error> { 68 | sqlx::query!( 69 | r#"UPDATE networks SET indexed_block = $2 WHERE network = $1"#, 70 | network_id, 71 | indexed_block 72 | ) 73 | .execute(pool) 74 | .await 75 | .map_err(|e| { 76 | tracing::error!("Failed to execute query: {:?}", e); 77 | e 78 | // Using the `?` operator to return early 79 | // if the function failed, returning a sqlx::Error 80 | })?; 81 | Ok(()) 82 | } 83 | 84 | pub async fn update_order_fulfillment( 85 | pool: &PgPool, 86 | order_hash: String, 87 | fulfilled: bool, 88 | ) -> Result<(), sqlx::Error> { 89 | sqlx::query!( 90 | r#"UPDATE orders SET finalized = $2 WHERE hash = $1"#, 91 | order_hash, 92 | fulfilled 93 | ) 94 | .execute(pool) 95 | .await 96 | .map_err(|e| { 97 | tracing::error!("Failed to execute query: {:?}", e); 98 | e 99 | // Using the `?` operator to return early 100 | // if the function failed, returning a sqlx::Error 101 | })?; 102 | Ok(()) 103 | } 104 | 105 | pub async fn increment_offerer_counter( 106 | pool: &PgPool, 107 | offerer: Address, 108 | counter: U256, 109 | ) -> Result<(), sqlx::Error> { 110 | sqlx::query!( 111 | r#"UPDATE orders SET cancelled = true WHERE offerer = $1::TEXT::citext AND counter < $2"#, 112 | offerer.encode_hex(), 113 | counter.as_u64() as i64 114 | ) 115 | .execute(pool) 116 | .await 117 | .map_err(|e| { 118 | tracing::error!("Failed to execute query: {:?}", e); 119 | e 120 | // Using the `?` operator to return early 121 | // if the function failed, returning a sqlx::Error 122 | })?; 123 | Ok(()) 124 | } 125 | 126 | pub async fn update_order_cancellation( 127 | pool: &PgPool, 128 | order_hash: String, 129 | cancelled: bool, 130 | ) -> Result<(), sqlx::Error> { 131 | sqlx::query!( 132 | r#"UPDATE orders SET cancelled = $2 WHERE hash = $1"#, 133 | order_hash, 134 | cancelled 135 | ) 136 | .execute(pool) 137 | .await 138 | .map_err(|e| { 139 | tracing::error!("Failed to execute query: {:?}", e); 140 | e 141 | // Using the `?` operator to return early 142 | // if the function failed, returning a sqlx::Error 143 | })?; 144 | Ok(()) 145 | } 146 | 147 | struct Indexer { 148 | provider: Arc>>, 149 | seaport: Seaport>>, 150 | seaport_deploy_block: i64, 151 | chain_id: i32, 152 | pool: Arc, 153 | } 154 | 155 | impl Indexer { 156 | pub async fn new(configuration: Settings) -> Result { 157 | let pool = Arc::new(get_connection_pool(&configuration.database)); 158 | 159 | let provider: Arc>> = 160 | Arc::new(Provider::new_client(configuration.rpc.uri.as_str(), 3, 10).unwrap()); 161 | 162 | let seaport = Seaport::new( 163 | H160::from_str("0x00000000006c3852cbEf3e08E8dF289169EdE581").unwrap(), 164 | provider.clone(), 165 | ); 166 | 167 | let seaport_deploy_block = configuration.indexer.seaport_deploy_block; 168 | 169 | let chain_id = configuration.rpc.chain_id; 170 | 171 | Ok(Self { 172 | provider, 173 | seaport, 174 | seaport_deploy_block, 175 | chain_id, 176 | pool, 177 | }) 178 | } 179 | 180 | async fn get_block_events( 181 | &self, 182 | block_number: U64, 183 | ) -> Result< 184 | ( 185 | Vec<(OrderFulfilledFilter, LogMeta)>, 186 | Vec<(OrderCancelledFilter, LogMeta)>, 187 | Vec<(CounterIncrementedFilter, LogMeta)>, 188 | ), 189 | ContractError>>, 190 | > { 191 | let block_number = block_number; 192 | let fulfilled = self 193 | .seaport 194 | .order_fulfilled_filter() 195 | .from_block(block_number) 196 | .to_block(block_number); 197 | let cancelled = self 198 | .seaport 199 | .order_cancelled_filter() 200 | .from_block(block_number) 201 | .to_block(block_number); 202 | let counter_updated = self 203 | .seaport 204 | .counter_incremented_filter() 205 | .from_block(block_number) 206 | .to_block(block_number); 207 | let results = try_join!( 208 | fulfilled.query_with_meta(), 209 | cancelled.query_with_meta(), 210 | counter_updated.query_with_meta() 211 | ) 212 | .map_err(|e| { 213 | tracing::error!("Failed to get events: {:?}", e); 214 | e 215 | })?; 216 | 217 | Ok(results) 218 | } 219 | 220 | #[allow(clippy::type_complexity)] 221 | async fn process_block(&self, block_number: U64) -> Result<(), anyhow::Error> { 222 | // TODO(Build one sql transaction per block?) 223 | let (fulfilled, cancelled, counter_updated) = self.get_block_events(block_number).await?; 224 | let mut cancellations = vec![]; 225 | for cancellation in cancelled { 226 | let cancellation_event = cancellation.0; 227 | 228 | let order_hash = cancellation_event.order_hash.encode_hex(); 229 | debug!("Cancellation for {}", &order_hash); 230 | cancellations.push(update_order_cancellation(&self.pool, order_hash, true)); 231 | } 232 | let mut fulfillments = vec![]; 233 | for fulfillment in fulfilled { 234 | let fulfillment_event = fulfillment.0; 235 | 236 | let order_hash = fulfillment_event.order_hash.encode_hex(); 237 | debug!("Fulfillment for {}", &order_hash); 238 | fulfillments.push(update_order_fulfillment( 239 | &self.pool, 240 | fulfillment_event.order_hash.encode_hex(), 241 | true, 242 | )); 243 | } 244 | let mut counter_updates = vec![]; 245 | for counter_update in counter_updated { 246 | let counter_update_event = counter_update.0; 247 | 248 | counter_updates.push(increment_offerer_counter( 249 | &self.pool, 250 | counter_update_event.offerer, 251 | counter_update_event.new_counter, 252 | )); 253 | } 254 | let result = try_join!( 255 | try_join_all(cancellations), 256 | try_join_all(fulfillments), 257 | try_join_all(counter_updates) 258 | ); 259 | 260 | result.unwrap(); 261 | Ok(()) 262 | } 263 | 264 | async fn run(&mut self) -> Result<(), anyhow::Error> { 265 | // TODO(Convert from batch to ordered queue with concurrency) 266 | let batch = 16; 267 | let watcher = self.provider.clone(); 268 | let mut block_stream = watcher.watch_blocks().await?; 269 | // One block before the eth registrar controller was deployed 270 | // was block # 9380470 271 | let deploy_block = self.seaport_deploy_block; 272 | init_network(&self.pool, &self.chain_id, &deploy_block).await?; 273 | let mut next_block_to_process = 274 | U64::from(get_network(&self.pool, &self.chain_id).await?.indexed_block); 275 | let mut block_number: U64; 276 | info!("Waiting for next block from eth node"); 277 | while block_stream.next().await.is_some() { 278 | block_number = self 279 | .provider 280 | .get_block(BlockNumber::Latest) 281 | .await 282 | .unwrap() 283 | .unwrap() 284 | .number 285 | .unwrap(); 286 | info!("Got block {}", block_number); 287 | while next_block_to_process <= block_number { 288 | let blocks_remaining = block_number - next_block_to_process; 289 | let end_batch = if blocks_remaining < U64::from(batch) { 290 | next_block_to_process + blocks_remaining 291 | } else { 292 | next_block_to_process + U64::from(batch) 293 | }; 294 | info!( 295 | "Processing block: {} to {} of {}", 296 | next_block_to_process, end_batch, block_number 297 | ); 298 | let mut tasks = vec![]; 299 | while next_block_to_process <= end_batch { 300 | tasks.push(self.process_block(next_block_to_process)); 301 | next_block_to_process += U64::from(1); 302 | } 303 | try_join_all(tasks).await?; 304 | update_network(&self.pool, &self.chain_id, &(end_batch.as_u64() as i64 + 1)) 305 | .await?; 306 | } 307 | } 308 | Ok(()) 309 | } 310 | } 311 | 312 | // This is wrapped up in a thread pool for call by the binary. 313 | #[tokio::main] 314 | pub async fn run(configuration: Settings) -> Result<(), anyhow::Error> { 315 | // TODO(Handle SIGINT, SIGKILL gracefully) 316 | // We want to keep the indexer running if DB or RPC times out 317 | loop { 318 | let mut indexer = Indexer::new(configuration.clone()).await?; 319 | // Let's index and throw away errors in case of a db timeout or whatever 320 | info!("Running indexer"); 321 | let _result = indexer.run().await; 322 | warn!("Indexer stopped/timed out, restarting!"); 323 | // Sleep 1 second in case of a crash 324 | sleep(Duration::from_secs(1)).await; 325 | } 326 | } 327 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod auth; 2 | pub mod bindings; 3 | pub mod configuration; 4 | pub mod database; 5 | pub mod indexer; 6 | pub mod middleware; 7 | pub mod redis_pool; 8 | pub mod routes; 9 | pub mod services; 10 | pub mod startup; 11 | pub mod state; 12 | pub mod structs; 13 | pub mod telemetry; 14 | pub mod types; 15 | pub mod utils; 16 | 17 | pub mod rfq { 18 | #![allow(clippy::derive_partial_eq_without_eq)] 19 | tonic::include_proto!("quay"); 20 | } 21 | 22 | pub mod session { 23 | #![allow(clippy::derive_partial_eq_without_eq)] 24 | tonic::include_proto!("session"); 25 | } 26 | -------------------------------------------------------------------------------- /src/middleware/metrics.rs: -------------------------------------------------------------------------------- 1 | use axum::{extract::MatchedPath, http::Request, middleware::Next, response::IntoResponse}; 2 | use std::time::Instant; 3 | 4 | use crate::telemetry::{get_metric_storage_registry, ApiMetrics}; 5 | 6 | // TODO(This middleware should probably just live in the `telemetry` module) 7 | pub async fn track_prometheus_metrics(req: Request, next: Next) -> impl IntoResponse { 8 | let metrics = ApiMetrics::inst(get_metric_storage_registry()).unwrap(); 9 | 10 | let path = if let Some(matched_path) = req.extensions().get::() { 11 | matched_path.as_str().to_owned() 12 | } else { 13 | req.uri().path().to_owned() 14 | }; 15 | let method = req.method().clone(); 16 | let start = Instant::now(); 17 | 18 | let response = next.run(req).await; 19 | 20 | let latency = start.elapsed(); 21 | let status = response.status().as_u16(); 22 | 23 | metrics.on_request_completed(&path, method.as_str(), status, latency); 24 | 25 | response 26 | } 27 | -------------------------------------------------------------------------------- /src/middleware/mod.rs: -------------------------------------------------------------------------------- 1 | mod metrics; 2 | mod request_id; 3 | 4 | pub use metrics::*; 5 | pub use request_id::*; 6 | -------------------------------------------------------------------------------- /src/middleware/request_id.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::task::{Context, Poll}; 3 | 4 | use http::Request; 5 | use tower_layer::Layer; 6 | use tower_service::Service; 7 | use ulid::Ulid; 8 | 9 | #[derive(Debug)] 10 | pub struct RequestId(pub Ulid); 11 | 12 | impl RequestId { 13 | fn new() -> Self { 14 | Self(Ulid::new()) 15 | } 16 | } 17 | 18 | impl fmt::Display for RequestId { 19 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 20 | let mut buffer = [0; ulid::ULID_LEN]; 21 | write!(f, "{}", self.0.to_str(&mut buffer).unwrap_or_default()) 22 | } 23 | } 24 | 25 | #[derive(Clone, Debug)] 26 | pub struct RequestIdService { 27 | inner: S, 28 | } 29 | 30 | impl RequestIdService { 31 | pub fn new(inner: S) -> Self { 32 | Self { inner } 33 | } 34 | } 35 | 36 | impl Service> for RequestIdService 37 | where 38 | S: Service>, 39 | { 40 | type Response = S::Response; 41 | type Error = S::Error; 42 | type Future = S::Future; 43 | 44 | #[inline] 45 | fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { 46 | self.inner.poll_ready(cx) 47 | } 48 | 49 | fn call(&mut self, mut req: Request) -> Self::Future { 50 | let id = RequestId::new(); 51 | req.extensions_mut().insert(id); 52 | self.inner.call(req) 53 | } 54 | } 55 | 56 | #[derive(Clone, Debug)] 57 | pub struct RequestIdLayer; 58 | 59 | impl Layer for RequestIdLayer { 60 | type Service = RequestIdService; 61 | 62 | fn layer(&self, inner: S) -> Self::Service { 63 | RequestIdService { inner } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/redis_pool.rs: -------------------------------------------------------------------------------- 1 | // From https://github.com/djc/bb8/blob/main/redis/src/lib.rs 2 | // Brought in here to use tokio-native-tls-comp and later tokio-rustls-comp 3 | // TODO(https://github.com/redis-rs/redis-rs/pull/725) 4 | // Rather than tokio-comp 5 | 6 | use async_trait::async_trait; 7 | use bb8; 8 | use redis::{aio::Connection, ErrorKind}; 9 | use redis::{Client, IntoConnectionInfo, RedisError}; 10 | 11 | /// A `bb8::ManageConnection` for `redis::Client::get_async_connection`. 12 | #[derive(Clone, Debug)] 13 | pub struct RedisConnectionManager { 14 | client: Client, 15 | } 16 | 17 | impl RedisConnectionManager { 18 | /// Create a new `RedisConnectionManager`. 19 | /// See `redis::Client::open` for a description of the parameter types. 20 | pub fn new(info: T) -> Result { 21 | Ok(RedisConnectionManager { 22 | client: Client::open(info.into_connection_info()?)?, 23 | }) 24 | } 25 | } 26 | 27 | #[async_trait] 28 | impl bb8::ManageConnection for RedisConnectionManager { 29 | type Connection = Connection; 30 | type Error = RedisError; 31 | 32 | async fn connect(&self) -> Result { 33 | self.client.get_tokio_connection().await 34 | } 35 | 36 | async fn is_valid(&self, conn: &mut Self::Connection) -> Result<(), Self::Error> { 37 | let pong: String = redis::cmd("PING").query_async(conn).await?; 38 | match pong.as_str() { 39 | "PONG" => Ok(()), 40 | _ => Err((ErrorKind::ResponseError, "ping request").into()), 41 | } 42 | } 43 | 44 | fn has_broken(&self, _: &mut Self::Connection) -> bool { 45 | false 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/routes/health_check.rs: -------------------------------------------------------------------------------- 1 | use axum::response::IntoResponse; 2 | use http::StatusCode; 3 | 4 | pub async fn health_check() -> impl IntoResponse { 5 | StatusCode::OK 6 | } 7 | -------------------------------------------------------------------------------- /src/routes/metrics/mod.rs: -------------------------------------------------------------------------------- 1 | mod prometheus; 2 | 3 | pub use self::prometheus::*; 4 | -------------------------------------------------------------------------------- /src/routes/metrics/prometheus.rs: -------------------------------------------------------------------------------- 1 | use axum::response::IntoResponse; 2 | use http::StatusCode; 3 | use prometheus::Encoder; 4 | 5 | use crate::telemetry::get_metrics_registry; 6 | 7 | pub async fn metrics_prometheus() -> impl IntoResponse { 8 | let prometheus_storage_registry = get_metrics_registry(); 9 | let encoder = prometheus::TextEncoder::new(); 10 | let mut buffer = Vec::new(); 11 | match encoder.encode(&prometheus_storage_registry.gather(), &mut buffer) { 12 | Ok(_) => {} 13 | Err(err) => { 14 | tracing::error!("could not encode metrics: {}", err); 15 | return (StatusCode::INTERNAL_SERVER_ERROR).into_response(); 16 | } 17 | } 18 | 19 | let metrics = match String::from_utf8(buffer) { 20 | Ok(r) => r, 21 | Err(e) => { 22 | tracing::error!("metrics could not be from_utf8'd: {}", e); 23 | String::default() 24 | } 25 | }; 26 | 27 | metrics.into_response() 28 | } 29 | -------------------------------------------------------------------------------- /src/routes/mod.rs: -------------------------------------------------------------------------------- 1 | mod health_check; 2 | mod metrics; 3 | mod nft_market; 4 | mod sessions; 5 | 6 | pub use health_check::*; 7 | pub use metrics::*; 8 | pub use nft_market::*; 9 | pub use sessions::*; 10 | -------------------------------------------------------------------------------- /src/routes/nft_market/create_listing.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Error; 2 | use axum::{ 3 | extract::{Json, State}, 4 | response::IntoResponse, 5 | }; 6 | use axum_sessions::extractors::ReadableSession; 7 | use ethers::prelude::*; 8 | use http::StatusCode; 9 | use sqlx::PgPool; 10 | 11 | use crate::auth::verify_session; 12 | use crate::{ 13 | bindings::seaport::Seaport, 14 | database::{save_address, save_consideration, save_offer}, 15 | }; 16 | use crate::{database::save_order, structs::OrderInput}; 17 | 18 | #[tracing::instrument( 19 | name = "Adding a new listing", 20 | skip(db_pool, seaport, session, listing), 21 | fields( 22 | offerer = %listing.parameters.offerer, 23 | ) 24 | )] 25 | pub async fn create_listing( 26 | session: ReadableSession, 27 | State(db_pool): State, 28 | State(seaport): State>>, 29 | Json(listing): Json, 30 | ) -> impl IntoResponse { 31 | let authenticated = verify_session(&session).await.into_response(); 32 | if authenticated.status() != StatusCode::OK { 33 | return authenticated; 34 | } 35 | 36 | if insert_listing(&db_pool, &listing, &seaport).await.is_err() { 37 | return (StatusCode::INTERNAL_SERVER_ERROR).into_response(); 38 | } 39 | 40 | (StatusCode::OK).into_response() 41 | } 42 | 43 | #[tracing::instrument( 44 | name = "Saving new listing details in the database", 45 | skip(new_listing, pool, seaport) 46 | )] 47 | pub async fn insert_listing( 48 | pool: &PgPool, 49 | new_listing: &OrderInput, 50 | seaport: &Seaport>, 51 | ) -> Result<(), Error> { 52 | // Could we generate this without an RPC call? 53 | 54 | let order_hash = seaport 55 | .get_order_hash(new_listing.to_components().await) 56 | .call() 57 | .await 58 | .expect("failed to calculate hash"); 59 | // TODO(Ensure the order hasn't been filled) 60 | // TODO(Any other semantic validation which needs to occur from the RPC) 61 | // TODO(Implement additional queries for offers and considerations) 62 | 63 | let mut tx = pool.begin().await.map_err(|e| { 64 | tracing::error!("Failed to begin transaction: {:?}", e); 65 | e 66 | })?; 67 | save_address(new_listing.parameters.offerer) 68 | .execute(&mut tx) 69 | .await 70 | .map_err(|e| { 71 | tracing::error!("Failed to execute query: {:?}", e); 72 | e 73 | })?; 74 | save_address(new_listing.parameters.zone) 75 | .execute(&mut tx) 76 | .await 77 | .map_err(|e| { 78 | tracing::error!("Failed to execute query: {:?}", e); 79 | e 80 | })?; 81 | save_order(order_hash, new_listing) 82 | .execute(&mut tx) 83 | .await 84 | .map_err(|e| { 85 | tracing::error!("Failed to execute query: {:?}", e); 86 | e 87 | })?; 88 | let mut position = 0; 89 | for offer in &new_listing.parameters.offer { 90 | save_address(offer.token) 91 | .execute(&mut tx) 92 | .await 93 | .map_err(|e| { 94 | tracing::error!("Failed to execute query: {:?}", e); 95 | e 96 | })?; 97 | 98 | save_offer(order_hash, position, offer) 99 | .execute(&mut tx) 100 | .await 101 | .map_err(|e| { 102 | tracing::error!("Failed to execute query: {:?}", e); 103 | e 104 | })?; 105 | position += 1; 106 | } 107 | position = 0; 108 | for consideration in &new_listing.parameters.consideration { 109 | save_address(consideration.token) 110 | .execute(&mut tx) 111 | .await 112 | .map_err(|e| { 113 | tracing::error!("Failed to execute query: {:?}", e); 114 | e 115 | })?; 116 | save_address(consideration.recipient) 117 | .execute(&mut tx) 118 | .await 119 | .map_err(|e| { 120 | tracing::error!("Failed to execute query: {:?}", e); 121 | e 122 | })?; 123 | 124 | save_consideration(order_hash, position, consideration) 125 | .execute(&mut tx) 126 | .await 127 | .map_err(|e| { 128 | tracing::error!("Failed to execute query: {:?}", e); 129 | e 130 | })?; 131 | position += 1; 132 | } 133 | tx.commit().await.map_err(|e| { 134 | tracing::error!("Failed to commit transaction: {:?}", e); 135 | e 136 | })?; 137 | Ok(()) 138 | } 139 | -------------------------------------------------------------------------------- /src/routes/nft_market/create_offer.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Error; 2 | use axum::{ 3 | extract::{Json, State}, 4 | response::IntoResponse, 5 | }; 6 | use axum_sessions::extractors::ReadableSession; 7 | use ethers::prelude::*; 8 | use http::StatusCode; 9 | use sqlx::PgPool; 10 | 11 | use crate::auth::verify_session; 12 | use crate::structs::OrderInput; 13 | use crate::{ 14 | bindings::seaport::Seaport, 15 | database::{save_address, save_consideration, save_offer, save_order}, 16 | }; 17 | 18 | #[tracing::instrument( 19 | name = "Adding a new offer", 20 | skip(session, offer, db_pool, seaport), 21 | fields( 22 | offerer = %offer.parameters.offerer, 23 | ) 24 | )] 25 | pub async fn create_offer( 26 | session: ReadableSession, 27 | State(db_pool): State, 28 | State(seaport): State>>, 29 | Json(offer): Json, 30 | ) -> impl IntoResponse { 31 | let authenticated = verify_session(&session).await.into_response(); 32 | if authenticated.status() != StatusCode::OK { 33 | return authenticated; 34 | } 35 | 36 | if insert_offer_db(&db_pool, &offer, &seaport).await.is_err() { 37 | return (StatusCode::INTERNAL_SERVER_ERROR).into_response(); 38 | } 39 | 40 | (StatusCode::OK).into_response() 41 | } 42 | 43 | #[tracing::instrument( 44 | name = "Saving new offer details in the database", 45 | skip(new_offer, pool, seaport) 46 | )] 47 | pub async fn insert_offer_db( 48 | pool: &PgPool, 49 | new_offer: &OrderInput, 50 | seaport: &Seaport>, 51 | ) -> Result<(), Error> { 52 | // Could we generate this without an RPC call? 53 | 54 | let order_hash = seaport 55 | .get_order_hash(new_offer.to_components().await) 56 | .call() 57 | .await 58 | .expect("failed to calculate hash"); 59 | // TODO(Ensure the order hasn't been filled) 60 | // TODO(Any other semantic validation which needs to occur from the RPC) 61 | // TODO(Implement additional queries for offers and considerations) 62 | 63 | let mut tx = pool.begin().await.map_err(|e| { 64 | tracing::error!("Failed to begin transaction: {:?}", e); 65 | e 66 | })?; 67 | save_address(new_offer.parameters.offerer) 68 | .execute(&mut tx) 69 | .await 70 | .map_err(|e| { 71 | tracing::error!("Failed to execute query: {:?}", e); 72 | e 73 | })?; 74 | save_address(new_offer.parameters.zone) 75 | .execute(&mut tx) 76 | .await 77 | .map_err(|e| { 78 | tracing::error!("Failed to execute query: {:?}", e); 79 | e 80 | })?; 81 | save_order(order_hash, new_offer) 82 | .execute(&mut tx) 83 | .await 84 | .map_err(|e| { 85 | tracing::error!("Failed to execute query: {:?}", e); 86 | e 87 | })?; 88 | let mut position = 0; 89 | for offer in &new_offer.parameters.offer { 90 | save_address(offer.token) 91 | .execute(&mut tx) 92 | .await 93 | .map_err(|e| { 94 | tracing::error!("Failed to execute query: {:?}", e); 95 | e 96 | })?; 97 | 98 | save_offer(order_hash, position, offer) 99 | .execute(&mut tx) 100 | .await 101 | .map_err(|e| { 102 | tracing::error!("Failed to execute query: {:?}", e); 103 | e 104 | })?; 105 | position += 1; 106 | } 107 | position = 0; 108 | for consideration in &new_offer.parameters.consideration { 109 | save_address(consideration.token) 110 | .execute(&mut tx) 111 | .await 112 | .map_err(|e| { 113 | tracing::error!("Failed to execute query: {:?}", e); 114 | e 115 | })?; 116 | save_address(consideration.recipient) 117 | .execute(&mut tx) 118 | .await 119 | .map_err(|e| { 120 | tracing::error!("Failed to execute query: {:?}", e); 121 | e 122 | })?; 123 | 124 | save_consideration(order_hash, position, consideration) 125 | .execute(&mut tx) 126 | .await 127 | .map_err(|e| { 128 | tracing::error!("Failed to execute query: {:?}", e); 129 | e 130 | })?; 131 | position += 1; 132 | } 133 | tx.commit().await.map_err(|e| { 134 | tracing::error!("Failed to commit transaction: {:?}", e); 135 | e 136 | })?; 137 | Ok(()) 138 | } 139 | -------------------------------------------------------------------------------- /src/routes/nft_market/mod.rs: -------------------------------------------------------------------------------- 1 | mod create_listing; 2 | mod create_offer; 3 | mod retrieve_listings; 4 | mod retrieve_offers; 5 | 6 | pub use create_listing::*; 7 | pub use create_offer::*; 8 | pub use retrieve_listings::*; 9 | pub use retrieve_offers::*; 10 | -------------------------------------------------------------------------------- /src/routes/nft_market/retrieve_listings.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Error; 2 | use axum::{ 3 | extract::{Query, State}, 4 | response::IntoResponse, 5 | Json, 6 | }; 7 | use ethers::{abi::AbiEncode, prelude::*}; 8 | use http::StatusCode; 9 | use sqlx::{query_as, PgPool}; 10 | 11 | use crate::structs::{DBConsideration, DBOffer, DBOrder, OrderQuery, RetrieveResponse}; 12 | 13 | pub async fn retrieve_listings( 14 | State(pool): State, 15 | query: Query, 16 | ) -> impl IntoResponse { 17 | match retrieve_listings_db( 18 | &pool, 19 | query.asset_contract_address.encode_hex(), 20 | query 21 | .token_ids 22 | .clone() 23 | .into_iter() 24 | .map(|token_id| { 25 | U256::from_str_radix(&token_id, 10) 26 | .unwrap_or(U256::MAX) 27 | .encode_hex() 28 | }) 29 | .collect::>() 30 | .as_slice(), 31 | query.offerer.encode_hex(), 32 | query.limit, 33 | ) 34 | .await 35 | { 36 | Ok(retrieved_listings) => { 37 | (StatusCode::OK, Json::(retrieved_listings)).into_response() 38 | } 39 | _ => (StatusCode::INTERNAL_SERVER_ERROR).into_response(), 40 | } 41 | } 42 | 43 | async fn retrieve_listings_db( 44 | pool: &PgPool, 45 | asset_contract_address: String, 46 | token_ids: &[String], 47 | offerer: String, 48 | limit: Option, 49 | ) -> Result { 50 | let db_orders: Vec = query_as!( 51 | DBOrder, 52 | r#" 53 | SELECT 54 | O.hash as "hash!", 55 | O.offerer::TEXT as "offerer!", 56 | O.zone::TEXT as "zone!", 57 | O.zone_hash as "zone_hash!", 58 | O.start_time as "start_time!", 59 | O.end_time as "end_time!", 60 | O.order_type as "order_type!", 61 | O.total_original_consideration_items as "total_original_consideration_items!", 62 | O.counter as "counter!", 63 | O.salt as "salt!", 64 | O.conduit_key as "conduit_key!", 65 | O.signature as "signature!", 66 | array_agg(DISTINCT ( 67 | OC.position, 68 | OC.item_type, 69 | OC.token::TEXT, 70 | OC.identifier_or_criteria, 71 | OC.start_amount, 72 | OC.end_amount, 73 | OC.recipient::TEXT 74 | )) AS "considerations!: Vec", 75 | array_agg(DISTINCT ( 76 | OOF.position, 77 | OOF.item_type, 78 | OOF.token::TEXT, 79 | OOF.identifier_or_criteria, 80 | OOF.start_amount, 81 | OOF.end_amount 82 | )) AS "offers!: Vec" 83 | FROM orders O 84 | INNER JOIN considerations OC ON O.hash = OC.order 85 | INNER JOIN offers OOF ON O.hash = OOF.order 86 | WHERE O.hash IN ( 87 | SELECT OF.order FROM offers OF 88 | WHERE (OF.token = $1::TEXT::citext OR $1::TEXT::citext = '0x0000000000000000000000000000000000000000000000000000000000000000') 89 | AND (OF.identifier_or_criteria = ANY($2::TEXT[]) OR cardinality($2::TEXT[]) = 0) 90 | ) 91 | AND (O.offerer = $3::TEXT::citext OR $3::TEXT::citext = '0x0000000000000000000000000000000000000000000000000000000000000000') 92 | GROUP BY O.hash 93 | LIMIT $4; 94 | "#, 95 | asset_contract_address, 96 | &token_ids[..], 97 | offerer, 98 | limit.unwrap_or(1) 99 | ) 100 | .fetch_all(pool) 101 | .await 102 | .map_err(|e| { 103 | tracing::error!("Failed to execute query: {:?}", e); 104 | e 105 | // Using the `?` operator to return early 106 | // if the function failed, returning a sqlx::Error 107 | })?; 108 | 109 | RetrieveResponse::from_db_struct(&db_orders) 110 | } 111 | -------------------------------------------------------------------------------- /src/routes/nft_market/retrieve_offers.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Error; 2 | use axum::{ 3 | extract::{Query, State}, 4 | response::IntoResponse, 5 | Json, 6 | }; 7 | use ethers::{abi::AbiEncode, prelude::*}; 8 | use http::StatusCode; 9 | use sqlx::{query_as, PgPool}; 10 | 11 | use crate::structs::{DBConsideration, DBOffer, DBOrder, OrderQuery, RetrieveResponse}; 12 | 13 | pub async fn retrieve_offers( 14 | State(pool): State, 15 | query: Query, 16 | ) -> impl IntoResponse { 17 | match retrieve_offers_db( 18 | &pool, 19 | query.asset_contract_address.encode_hex(), 20 | query 21 | .token_ids 22 | .clone() 23 | .into_iter() 24 | .map(|token_id| { 25 | U256::from_str_radix(&token_id, 10) 26 | .unwrap_or(U256::MAX) 27 | .encode_hex() 28 | }) 29 | .collect::>() 30 | .as_slice(), 31 | query.offerer.encode_hex(), 32 | query.limit, 33 | ) 34 | .await 35 | { 36 | Ok(retrieved_listings) => { 37 | (StatusCode::OK, Json::(retrieved_listings)).into_response() 38 | } 39 | _ => (StatusCode::INTERNAL_SERVER_ERROR).into_response(), 40 | } 41 | } 42 | 43 | async fn retrieve_offers_db( 44 | pool: &PgPool, 45 | asset_contract_address: String, 46 | token_ids: &[String], 47 | offerer: String, 48 | limit: Option, 49 | ) -> Result { 50 | let db_orders: Vec = query_as!( 51 | DBOrder, 52 | r#" 53 | SELECT 54 | O.hash as "hash!", 55 | O.offerer::TEXT as "offerer!", 56 | O.zone::TEXT as "zone!", 57 | O.zone_hash as "zone_hash!", 58 | O.start_time as "start_time!", 59 | O.end_time as "end_time!", 60 | O.order_type as "order_type!", 61 | O.total_original_consideration_items as "total_original_consideration_items!", 62 | O.counter as "counter!", 63 | O.salt as "salt!", 64 | O.conduit_key as "conduit_key!", 65 | O.signature as "signature!", 66 | array_agg(DISTINCT ( 67 | OC.position, 68 | OC.item_type, 69 | OC.token::TEXT, 70 | OC.identifier_or_criteria, 71 | OC.start_amount, 72 | OC.end_amount, 73 | OC.recipient::TEXT 74 | )) AS "considerations!: Vec", 75 | array_agg(DISTINCT ( 76 | OOF.position, 77 | OOF.item_type, 78 | OOF.token::TEXT, 79 | OOF.identifier_or_criteria, 80 | OOF.start_amount, 81 | OOF.end_amount 82 | )) AS "offers!: Vec" 83 | FROM orders O 84 | INNER JOIN considerations OC ON O.hash = OC.order 85 | INNER JOIN offers OOF ON O.hash = OOF.order 86 | WHERE O.hash IN ( 87 | SELECT C.order FROM considerations C 88 | WHERE (C.token = $1::TEXT::citext OR $1::TEXT::citext = '0x0000000000000000000000000000000000000000000000000000000000000000') 89 | AND (C.identifier_or_criteria = ANY($2::TEXT[]) OR cardinality($2::TEXT[]) = 0) 90 | ) 91 | AND (O.offerer = $3::TEXT::citext OR $3::TEXT::citext = '0x0000000000000000000000000000000000000000000000000000000000000000') 92 | GROUP BY O.hash 93 | LIMIT $4; 94 | "#, 95 | asset_contract_address, 96 | &token_ids[..], 97 | offerer, 98 | limit.unwrap_or(1) 99 | ) 100 | .fetch_all(pool) 101 | .await 102 | .map_err(|e| { 103 | tracing::error!("Failed to execute query: {:?}", e); 104 | e 105 | // Using the `?` operator to return early 106 | // if the function failed, returning a sqlx::Error 107 | })?; 108 | 109 | RetrieveResponse::from_db_struct(&db_orders) 110 | } 111 | -------------------------------------------------------------------------------- /src/routes/sessions.rs: -------------------------------------------------------------------------------- 1 | use axum::extract::Json; 2 | use axum::response::IntoResponse; 3 | use axum_sessions::extractors::{ReadableSession, WritableSession}; 4 | 5 | use ethers::types::Address; 6 | use http::{header, HeaderMap, StatusCode}; 7 | 8 | use siwe::VerificationOpts; 9 | 10 | use crate::auth::*; 11 | 12 | #[tracing::instrument(name = "Getting an EIP-4361 nonce for session", skip(session))] 13 | pub async fn get_nonce(mut session: WritableSession) -> impl IntoResponse { 14 | let nonce = siwe::generate_nonce(); 15 | match &session.insert(NONCE_KEY, &nonce) { 16 | Ok(_) => {} 17 | Err(_) => { 18 | return (StatusCode::INTERNAL_SERVER_ERROR, "Failed to set nonce.").into_response() 19 | } 20 | } 21 | // Make sure we don't inherit a dirty session expiry 22 | let ts = match unix_timestamp() { 23 | Ok(ts) => ts, 24 | Err(_) => { 25 | return ( 26 | StatusCode::INTERNAL_SERVER_ERROR, 27 | "Failed to get unix timestamp.", 28 | ) 29 | .into_response() 30 | } 31 | }; 32 | match session.insert(EXPIRATION_TIME_KEY, ts) { 33 | Ok(_) => {} 34 | Err(_) => { 35 | return ( 36 | StatusCode::INTERNAL_SERVER_ERROR, 37 | "Failed to set expiration.", 38 | ) 39 | .into_response() 40 | } 41 | } 42 | let mut headers = HeaderMap::new(); 43 | headers.insert(header::CONTENT_TYPE, "text/plain".parse().unwrap()); 44 | (headers, nonce).into_response() 45 | } 46 | 47 | #[tracing::instrument( 48 | name = "Verifying user EIP-4361 session", 49 | skip(session, signed_message) 50 | )] 51 | pub async fn verify( 52 | mut session: WritableSession, 53 | signed_message: Json, 54 | ) -> impl IntoResponse { 55 | // Infallible because the signature has already been validated 56 | let message = signed_message.message.clone(); 57 | // The frontend must set a session expiry 58 | let session_nonce = match session.get(NONCE_KEY) { 59 | Some(no) => no, 60 | None => return (StatusCode::UNPROCESSABLE_ENTITY, "Failed to get nonce.").into_response(), 61 | }; 62 | 63 | // Verify the signed message 64 | match message 65 | .verify( 66 | signed_message.signature.as_ref(), 67 | &VerificationOpts { 68 | nonce: Some(session_nonce), 69 | ..Default::default() 70 | }, 71 | ) 72 | .await 73 | { 74 | Ok(_) => {} 75 | Err(error) => { 76 | return ( 77 | StatusCode::UNPROCESSABLE_ENTITY, 78 | format!("Invalid signature {error}."), 79 | ) 80 | .into_response() 81 | } 82 | } 83 | let now = match unix_timestamp() { 84 | Ok(now) => now, 85 | Err(_) => { 86 | return ( 87 | StatusCode::INTERNAL_SERVER_ERROR, 88 | "Failed to get timestamp.", 89 | ) 90 | .into_response() 91 | } 92 | }; 93 | let expiry = now + 604800; 94 | match session.insert(EXPIRATION_TIME_KEY, expiry) { 95 | Ok(_) => {} 96 | Err(_) => { 97 | return ( 98 | StatusCode::INTERNAL_SERVER_ERROR, 99 | "Failed to insert expiration time.", 100 | ) 101 | .into_response() 102 | } 103 | } 104 | match session.insert(USER_ADDRESS_KEY, Address::from(message.address)) { 105 | Ok(_) => {} 106 | Err(_) => { 107 | return ( 108 | StatusCode::INTERNAL_SERVER_ERROR, 109 | "Failed to insert user address.", 110 | ) 111 | .into_response() 112 | } 113 | } 114 | (StatusCode::OK).into_response() 115 | } 116 | 117 | #[tracing::instrument(name = "Checking user EIP-4361 authentication", skip(session))] 118 | pub async fn authenticate(session: ReadableSession) -> impl IntoResponse { 119 | verify_session(&session).await 120 | } 121 | -------------------------------------------------------------------------------- /src/services/mod.rs: -------------------------------------------------------------------------------- 1 | mod rfq; 2 | mod session; 3 | 4 | pub use rfq::*; 5 | pub use session::*; 6 | -------------------------------------------------------------------------------- /src/services/rfq.rs: -------------------------------------------------------------------------------- 1 | use crate::rfq::rfq_server::Rfq; 2 | use crate::rfq::{QuoteRequest, QuoteResponse, H128}; 3 | use ethers::prelude::U128; 4 | use std::collections::HashMap; 5 | use std::pin::Pin; 6 | use tokio::sync::broadcast::Sender; 7 | use tokio::sync::mpsc::channel; 8 | use tonic::codegen::futures_core::Stream; 9 | use tonic::{Request, Response, Status, Streaming}; 10 | use ulid::Ulid; 11 | 12 | #[derive(Debug)] 13 | pub struct RFQService { 14 | request_tx_stream: Sender, 15 | response_tx_stream: Sender, 16 | } 17 | 18 | impl RFQService { 19 | pub fn new() -> Self { 20 | let (request_tx_stream, _) = tokio::sync::broadcast::channel::(64); 21 | let (response_tx_stream, _) = tokio::sync::broadcast::channel::(64); 22 | Self { 23 | request_tx_stream, 24 | response_tx_stream, 25 | } 26 | } 27 | } 28 | 29 | impl Default for RFQService { 30 | fn default() -> Self { 31 | Self::new() 32 | } 33 | } 34 | 35 | #[tonic::async_trait] 36 | impl Rfq for RFQService { 37 | type WebTakerStream = 38 | Pin> + Send + 'static>>; 39 | 40 | async fn web_taker( 41 | &self, 42 | request: Request, 43 | ) -> Result, Status> { 44 | // Create the stream which will be sent back to the web taker (trader). This will only 45 | // hold one item at max. 46 | let (tx_trader, rx_trader) = channel::>(1); 47 | 48 | // Insert the ulid into the messages so we are able to identify a response to this request. 49 | let ulid: U128 = Ulid::new().0.into(); 50 | let mut request = request.into_inner(); 51 | request.ulid = Some(ulid.into()); 52 | 53 | // Send the request off to the makers 54 | match self.request_tx_stream.send(request) { 55 | Ok(_) => { 56 | let mut response_broadcast = self.response_tx_stream.subscribe(); 57 | let ulid: Option = Some(ulid.into()); 58 | 59 | // Using a tokio task wait until we get a response from the makers and if it 60 | // matches our ulid forward it onto the taker. 61 | tokio::spawn(async move { 62 | let mut end_stream = false; 63 | while !end_stream { 64 | match response_broadcast.recv().await { 65 | Ok(response) => { 66 | if ulid == response.ulid { 67 | tx_trader.send(Ok(response)).await.unwrap_or_default(); 68 | end_stream = true; 69 | } 70 | } 71 | Err(error) => { 72 | eprintln!("RFQService:WebTaker: Error while receiving broadcast requests. Error reported\n{}", error); 73 | tx_trader 74 | .send(Err(Status::internal("Internal server error 2"))) 75 | .await 76 | .unwrap_or_default(); 77 | end_stream = true; 78 | } 79 | } 80 | } 81 | 82 | drop(response_broadcast); 83 | }); 84 | } 85 | Err(error) => { 86 | eprintln!( 87 | "RFQService:WebTaker: Error while broadcasting request. Error reported\n{}", 88 | error 89 | ); 90 | tx_trader 91 | .send(Err(Status::internal("Internal server error 1"))) 92 | .await 93 | .unwrap_or_default(); 94 | } 95 | } 96 | 97 | // Response stream for the taker to wait on 98 | Ok(Response::new(Box::pin( 99 | tokio_stream::wrappers::ReceiverStream::new(rx_trader), 100 | ))) 101 | } 102 | 103 | type TakerStream = Pin> + Send + 'static>>; 104 | 105 | async fn taker( 106 | &self, 107 | request: Request>, 108 | ) -> Result, Status> { 109 | // Setup the private channel between the Tokio task and the connected Taker. This buffer 110 | // can hold 64 messages before it will block on the `await` waiting for a message to be 111 | // removed. 112 | let (tx_taker, rx_taker) = channel::>(64); 113 | 114 | let mut taker_stream: Streaming = request.into_inner(); 115 | let mut response_rx_stream = self.response_tx_stream.subscribe(); 116 | let request_tx_stream = self.request_tx_stream.clone(); 117 | 118 | // This task will pass messages to and from a connected taker. 119 | tokio::spawn(async move { 120 | // Loop forever until we hit one of the break conditions (i.e. a stream has died). 121 | let mut stream_closed = false; 122 | let mut message_ids = HashMap::::new(); 123 | 124 | while !stream_closed { 125 | // Wait until one of the streams return. 126 | tokio::select! { 127 | request = taker_stream.message() => { 128 | match request { 129 | Ok(request) => { 130 | match request { 131 | Some(request) => { 132 | // Insert the id for this message 133 | let ulid: U128 = Ulid::new().0.into(); 134 | let mut request = request; 135 | request.ulid = Some(ulid.into()); 136 | 137 | message_ids.insert(ulid, ()); 138 | request_tx_stream.send(request).unwrap_or_default(); 139 | }, 140 | None => { 141 | eprintln!("RFQService:Taker: Taker stream as closed."); 142 | stream_closed = true; 143 | } 144 | } 145 | }, 146 | Err(error) => { 147 | eprintln!("RFQService:Taker: Error while handling taker stream. Reported error\n{}", error); 148 | stream_closed = true; 149 | } 150 | } 151 | }, 152 | response = response_rx_stream.recv() => { 153 | match response { 154 | Ok(response) => { 155 | if let Some(ulid) = response.ulid.clone() { 156 | let ulid: U128 = ulid.into(); 157 | if message_ids.contains_key(&ulid) { 158 | // Send the request to the taker 159 | tx_taker.send(Ok(response)).await.unwrap_or_default(); 160 | message_ids.remove(&ulid); 161 | } 162 | } 163 | }, 164 | Err(error) => { 165 | eprintln!("RFQService:Taker: Error while reading response broadcast stream. Reported error\n{}", error); 166 | tx_taker.send(Err(Status::internal("Internal server error 3"))).await.unwrap_or_default(); 167 | stream_closed = true; 168 | } 169 | } 170 | } 171 | } 172 | } 173 | }); 174 | 175 | // Send back the stream to the receive side of the channel that the Tokio task above will 176 | // write into. 177 | Ok(Response::new(Box::pin( 178 | tokio_stream::wrappers::ReceiverStream::new(rx_taker), 179 | ))) 180 | } 181 | 182 | type MakerStream = Pin> + Send + 'static>>; 183 | 184 | /// When a Maker makes an initial connection, spawn a asynchronous tasks that will await on 185 | /// either a new request from a Trader/Taker (to then forward to the Maker) or a response from 186 | /// the Maker (to then forward onto the Trader/Taker). 187 | async fn maker( 188 | &self, 189 | request: Request>, 190 | ) -> Result, Status> { 191 | // Setup the private channel between the Tokio task and the connected Maker. This buffer 192 | // can hold 64 messages before it will block on the `await` waiting for a message to be 193 | // removed. 194 | let (tx_maker, rx_maker) = channel::>(64); 195 | 196 | let mut maker_stream: Streaming = request.into_inner(); 197 | let mut request_rx_stream = self.request_tx_stream.subscribe(); 198 | let response_tx_stream = self.response_tx_stream.clone(); 199 | 200 | // This task will pass messages to and from a connected Maker. 201 | tokio::spawn(async move { 202 | // Loop forever until we hit one of the break conditions (i.e. a stream has died). 203 | let mut stream_closed = false; 204 | while !stream_closed { 205 | // Wait until one of the streams return. 206 | tokio::select! { 207 | request = request_rx_stream.recv() => { 208 | // Received a new request - forward to Maker or exit if error 209 | match request { 210 | Ok(request) => tx_maker.send(Ok(request)).await.unwrap(), 211 | Err(error) => { 212 | eprintln!("RFQService:Maker: Request stream has closed. Reported error\n{:?}", error); 213 | tx_maker.send(Err(Status::internal("Internal server error 4"))).await.unwrap_or_default(); 214 | stream_closed = true; 215 | } 216 | } 217 | }, 218 | response = maker_stream.message() => { 219 | // Received a new response - forward to takers/traders or exit if error 220 | match response { 221 | Ok(Some(response)) => { 222 | match response_tx_stream.send(response) { 223 | Ok(_) => (), 224 | Err(error) => { 225 | eprintln!("RFQService:Maker: All response tx stream receivers have been dropped. Error reported\n{}", error); 226 | tx_maker.send(Err(Status::internal("Internal server error 5"))).await.unwrap_or_default(); 227 | stream_closed = true; 228 | } 229 | } 230 | }, 231 | Ok(None) => { 232 | eprintln!("RFQService:Maker: Stream to maker has been closed."); 233 | stream_closed = true; 234 | }, 235 | Err(error) => { 236 | eprintln!("RFQService:Maker: Error while handling maker stream. Reported error\n{:?}", error); 237 | stream_closed = true; 238 | } 239 | } 240 | } 241 | } 242 | } 243 | 244 | // Drop the transmit stream to allow the receive side to detect we are exiting. 245 | drop(tx_maker); 246 | }); 247 | 248 | // Send back the stream to the receive side of the channel that the Tokio task above will 249 | // write into. 250 | Ok(Response::new(Box::pin( 251 | tokio_stream::wrappers::ReceiverStream::new(rx_maker), 252 | ))) 253 | } 254 | } 255 | -------------------------------------------------------------------------------- /src/services/session.rs: -------------------------------------------------------------------------------- 1 | use crate::auth::{ 2 | unix_timestamp, SignedMessage, EXPIRATION_TIME_KEY, NONCE_KEY, USER_ADDRESS_KEY, 3 | }; 4 | use crate::session::{session_server::Session, Empty, NonceText, VerifyText}; 5 | 6 | use axum_sessions::SessionHandle; 7 | use ethers::prelude::Address; 8 | use siwe::VerificationOpts; 9 | use tokio::{sync::RwLockReadGuard, task}; 10 | use tonic::{service::Interceptor, Request, Response, Status}; 11 | 12 | const SEVEN_DAYS_IN_SECONDS: u64 = 604800u64; 13 | 14 | // Private authentication function for used by the service endpoint and the session interceptor. 15 | fn authenticate( 16 | session: &RwLockReadGuard, 17 | ) -> Result<(), Status> { 18 | // Confirm the nonce is valid. 19 | match session.get::(NONCE_KEY) { 20 | Some(_) => (), 21 | // Invalid nonce 22 | None => return Err(Status::unauthenticated("Failed to get nonce")), 23 | } 24 | 25 | // Confirm the session is still valid. 26 | let now = match unix_timestamp() { 27 | Ok(now) => now, 28 | Err(_) => return Err(Status::internal("Failed to get unix timestamp.")), 29 | }; 30 | 31 | match session.get::(EXPIRATION_TIME_KEY) { 32 | None => return Err(Status::unauthenticated("Failed to get session expiration")), 33 | Some(ts) => { 34 | if now > ts { 35 | return Err(Status::unauthenticated("Session expired")); 36 | } 37 | } 38 | } 39 | 40 | // Authenticated request 41 | Ok(()) 42 | } 43 | 44 | /// The SessionAuthenticator is a gRPC interceptor for the server to check and validate session 45 | /// authentication details in the `request`. 46 | #[derive(Clone)] 47 | pub struct SessionAuthenticator; 48 | 49 | impl Interceptor for SessionAuthenticator { 50 | fn call(&mut self, request: Request<()>) -> Result, Status> { 51 | // Use an internal scope to ensure the borrow of the request is dropped (i.e. 52 | // RwLockReadGuard is dropped at the end of the scope), in order to move it for the return 53 | // value 54 | { 55 | let session_handle = request.extensions().get::().unwrap(); 56 | let session = task::block_in_place(|| session_handle.blocking_read()); 57 | authenticate(&session) 58 | } 59 | .map(|_| request) 60 | } 61 | } 62 | 63 | #[derive(Debug, Default)] 64 | pub struct SessionService; 65 | 66 | #[tonic::async_trait] 67 | impl Session for SessionService { 68 | async fn nonce(&self, request: Request) -> Result, Status> { 69 | // Fetch a writeable session. 70 | let session_handle = request.extensions().get::().unwrap(); 71 | let mut session = session_handle.write().await; 72 | 73 | // Generate and set the nonce 74 | let nonce = siwe::generate_nonce(); 75 | match session.insert(NONCE_KEY, &nonce) { 76 | Ok(_) => (), 77 | Err(_) => return Err(Status::internal("Failed to set nonce.")), 78 | } 79 | 80 | // Make sure we don't inherit a dirty session expiry 81 | let ts = match unix_timestamp() { 82 | Ok(ts) => ts, 83 | Err(_) => return Err(Status::internal("Failed to get unix timestamp.")), 84 | }; 85 | 86 | // Set the expiry time in the session 87 | match session.insert(EXPIRATION_TIME_KEY, ts) { 88 | Ok(_) => (), 89 | Err(_) => return Err(Status::internal("Failed to set expiration.")), 90 | } 91 | 92 | // Send the response 93 | Ok(Response::new(NonceText { nonce })) 94 | } 95 | 96 | async fn verify(&self, request: Request) -> Result, Status> { 97 | // Decode the JSON message body into the expected SignedMessage structure 98 | let signed_message: SignedMessage = 99 | match serde_json::from_str(request.get_ref().body.as_str()) { 100 | Ok(msg) => msg, 101 | Err(_) => { 102 | return Err(Status::failed_precondition( 103 | "Error decoding message into a SignedMessage.", 104 | )) 105 | } 106 | }; 107 | 108 | // Now we have a valid message, fetch the session handler 109 | let session_handle = request.extensions().get::().unwrap(); 110 | let mut session = session_handle.write().await; 111 | 112 | // Verify the signed message 113 | let message = &signed_message.message; 114 | let session_nonce = match session.get(NONCE_KEY) { 115 | Some(no) => no, 116 | None => return Err(Status::unauthenticated("Failed to get nonce.")), 117 | }; 118 | 119 | match message 120 | .verify( 121 | signed_message.signature.as_ref(), 122 | &VerificationOpts { 123 | nonce: Some(session_nonce), 124 | ..Default::default() 125 | }, 126 | ) 127 | .await 128 | { 129 | Ok(_) => (), 130 | Err(error) => { 131 | return Err(Status::unauthenticated(format!( 132 | "Invalid signature {:?}.", 133 | error 134 | ))) 135 | } 136 | } 137 | 138 | // Update the session expiry time and user address 139 | let now = match unix_timestamp() { 140 | Ok(now) => now, 141 | Err(_) => return Err(Status::internal("Failed to get timestamp.")), 142 | }; 143 | 144 | let expiry = now + SEVEN_DAYS_IN_SECONDS; 145 | match session.insert(EXPIRATION_TIME_KEY, expiry) { 146 | Ok(_) => (), 147 | Err(_) => return Err(Status::internal("Failed to insert expiration time.")), 148 | } 149 | 150 | match session.insert(USER_ADDRESS_KEY, Address::from(message.address)) { 151 | Ok(_) => (), 152 | Err(_) => return Err(Status::internal("Failed to insert user address.")), 153 | } 154 | 155 | Ok(Response::new(Empty {})) 156 | } 157 | 158 | async fn authenticate(&self, request: Request) -> Result, Status> { 159 | let session_handle = request.extensions().get::().unwrap(); 160 | let session = session_handle.read().await; 161 | authenticate(&session).map(|_| Response::new(Empty::default())) 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /src/startup.rs: -------------------------------------------------------------------------------- 1 | use std::net::TcpListener; 2 | use std::str::FromStr; 3 | use std::sync::Arc; 4 | 5 | use async_redis_session::RedisSessionStore; 6 | use axum::{ 7 | middleware, 8 | routing::{get, post}, 9 | Router, 10 | }; 11 | use axum_server::Handle; 12 | use axum_sessions::SessionLayer; 13 | use bb8::Pool; 14 | use ethers::prelude::*; 15 | use futures::future::BoxFuture; 16 | use futures::FutureExt; 17 | use http::{header::CONTENT_TYPE, Request}; 18 | use hyper::Body; 19 | use redis::aio::ConnectionManager; 20 | use secrecy::ExposeSecret; 21 | use sqlx::postgres::PgPoolOptions; 22 | use sqlx::PgPool; 23 | use tonic::transport::Server; 24 | use tower::{make::Shared, steer::Steer, BoxError, ServiceExt}; 25 | use tower_http::cors::CorsLayer; 26 | use tower_http::trace::TraceLayer; 27 | 28 | use crate::middleware::{track_prometheus_metrics, RequestIdLayer}; 29 | use crate::redis_pool::RedisConnectionManager; 30 | use crate::rfq::rfq_server::RfqServer; 31 | use crate::routes::*; 32 | use crate::services::*; 33 | use crate::session::session_server::SessionServer; 34 | use crate::{bindings::Seaport, state::AppState}; 35 | use crate::{ 36 | configuration::{DatabaseSettings, Settings}, 37 | telemetry::TowerMakeSpanWithConstantId, 38 | }; 39 | 40 | pub fn get_connection_pool(configuration: &DatabaseSettings) -> PgPool { 41 | PgPoolOptions::new() 42 | .connect_timeout(std::time::Duration::from_secs(2)) 43 | .connect_lazy_with(configuration.with_db()) 44 | } 45 | 46 | pub fn run( 47 | listener: TcpListener, 48 | db_pool: PgPool, 49 | redis_pool: Pool, 50 | redis_multiplexed: ConnectionManager, 51 | session_layer: SessionLayer, 52 | rpc: Provider, 53 | ) -> BoxFuture<'static, Result<(), std::io::Error>> { 54 | let provider = Arc::new(rpc.clone()); 55 | 56 | let seaport = Seaport::new( 57 | H160::from_str("0x00000000006c3852cbEf3e08E8dF289169EdE581").unwrap(), 58 | provider, 59 | ); 60 | 61 | let cors = CorsLayer::very_permissive(); 62 | 63 | let state = AppState { 64 | db_pool, 65 | redis_pool, 66 | redis_multiplexed, 67 | rpc, 68 | seaport, 69 | }; 70 | 71 | // TODO(Cleanup duplicate state) 72 | let http = Router::new() 73 | .route("/", get(|| async { "Hello, world!" })) 74 | .route("/health_check", get(health_check)) 75 | .route("/metrics/prometheus", get(metrics_prometheus)) 76 | .route("/listings", post(create_listing).get(retrieve_listings)) 77 | .route("/offers", post(create_offer).get(retrieve_offers)) 78 | .route("/nonce", get(get_nonce)) 79 | .route("/verify", post(verify)) 80 | .route("/authenticate", get(authenticate)) 81 | // Layers/middleware 82 | .layer(TraceLayer::new_for_http().make_span_with(TowerMakeSpanWithConstantId)) 83 | .layer(RequestIdLayer) 84 | .layer(session_layer.clone()) 85 | .layer(middleware::from_fn(track_prometheus_metrics)) 86 | .layer(cors) 87 | // State 88 | .with_state(state) 89 | .map_err(BoxError::from) 90 | .boxed_clone(); 91 | 92 | let grpc = Server::builder() 93 | .layer(RequestIdLayer) 94 | .layer(TraceLayer::new_for_http().make_span_with(TowerMakeSpanWithConstantId)) 95 | .layer(session_layer) 96 | .add_service(RfqServer::with_interceptor( 97 | RFQService::new(), 98 | SessionAuthenticator, 99 | )) 100 | .add_service(SessionServer::new(SessionService::default())) 101 | .into_service() 102 | .map_response(|r| r.map(axum::body::boxed)) 103 | .boxed_clone(); 104 | 105 | let http_grpc = Steer::new(vec![http, grpc], |req: &Request, _svcs: &[_]| { 106 | usize::from( 107 | req.headers().get(CONTENT_TYPE).map(|v| v.as_bytes()) == Some(b"application/grpc"), 108 | ) 109 | }); 110 | 111 | let handle = Handle::new(); 112 | 113 | // TODO(Should we be using a tokio future here?) 114 | axum_server::from_tcp(listener) 115 | .handle(handle) 116 | .serve(Shared::new(http_grpc)) 117 | .boxed() 118 | } 119 | 120 | pub struct Application { 121 | server: BoxFuture<'static, Result<(), std::io::Error>>, 122 | port: u16, 123 | } 124 | 125 | impl Application { 126 | // We have converted the `build` function into a constructor for 127 | // `Application`. 128 | pub async fn build(configuration: Settings) -> Result { 129 | let db_pool = get_connection_pool(&configuration.database); 130 | let redis_pool = Pool::builder() 131 | .build(crate::redis_pool::RedisConnectionManager::new( 132 | configuration.redis_url.expose_secret().as_str(), 133 | )?) 134 | .await?; 135 | let redis_multiplexed = ConnectionManager::new(redis::Client::open( 136 | configuration.redis_url.expose_secret().as_str(), 137 | )?) 138 | .await?; 139 | 140 | let provider: Provider = 141 | Provider::new(Http::from_str(configuration.rpc.uri.as_str()).unwrap()); 142 | 143 | let address = format!( 144 | "{}:{}", 145 | configuration.application.host, configuration.application.port 146 | ); 147 | 148 | let listener = TcpListener::bind(address)?; 149 | let port = listener.local_addr().unwrap().port(); 150 | 151 | let store = RedisSessionStore::new(redis_multiplexed.clone(), Some("/sessions".into())); 152 | let secret = configuration 153 | .application 154 | .hmac_secret 155 | .expose_secret() 156 | .as_bytes(); // MUST be at least 64 bytes! 157 | let session_layer = SessionLayer::new(store, secret); 158 | 159 | let server = run( 160 | listener, 161 | db_pool, 162 | redis_pool, 163 | redis_multiplexed, 164 | session_layer, 165 | provider, 166 | ); 167 | 168 | Ok(Self { server, port }) 169 | } 170 | 171 | // A more expressive name that makes it clear that 172 | // this function only returns when the application is stopped. 173 | pub async fn run_until_stopped(self) -> Result<(), std::io::Error> { 174 | self.server.await 175 | } 176 | 177 | pub fn port(&self) -> u16 { 178 | self.port 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /src/state.rs: -------------------------------------------------------------------------------- 1 | use axum::extract::FromRef; 2 | use bb8::Pool; 3 | use ethers::providers::{Http, Provider}; 4 | use redis::aio::ConnectionManager; 5 | use sqlx::PgPool; 6 | 7 | use crate::bindings::Seaport; 8 | use crate::redis_pool::RedisConnectionManager; 9 | 10 | #[derive(Clone)] 11 | pub struct AppState { 12 | pub db_pool: PgPool, 13 | pub redis_pool: Pool, 14 | pub redis_multiplexed: ConnectionManager, 15 | pub rpc: Provider, 16 | pub seaport: Seaport>, 17 | } 18 | 19 | impl FromRef for PgPool { 20 | fn from_ref(app_state: &AppState) -> PgPool { 21 | app_state.db_pool.clone() 22 | } 23 | } 24 | impl FromRef for Pool { 25 | fn from_ref(app_state: &AppState) -> Pool { 26 | app_state.redis_pool.clone() 27 | } 28 | } 29 | impl FromRef for ConnectionManager { 30 | fn from_ref(app_state: &AppState) -> ConnectionManager { 31 | app_state.redis_multiplexed.clone() 32 | } 33 | } 34 | impl FromRef for Provider { 35 | fn from_ref(app_state: &AppState) -> Provider { 36 | app_state.rpc.clone() 37 | } 38 | } 39 | impl FromRef for Seaport> { 40 | fn from_ref(app_state: &AppState) -> Seaport> { 41 | app_state.seaport.clone() 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/structs/mod.rs: -------------------------------------------------------------------------------- 1 | mod network; 2 | mod seaport; 3 | 4 | pub use network::*; 5 | pub use seaport::*; 6 | -------------------------------------------------------------------------------- /src/structs/network.rs: -------------------------------------------------------------------------------- 1 | pub struct Network { 2 | pub network: i32, 3 | pub indexed_block: i64, 4 | } 5 | -------------------------------------------------------------------------------- /src/structs/seaport.rs: -------------------------------------------------------------------------------- 1 | use ethers::types::{H160, U256}; 2 | use serde::{Deserialize, Deserializer, Serialize}; 3 | 4 | use crate::bindings::seaport::{ConsiderationItem, OfferItem, Order, OrderComponents}; 5 | 6 | #[derive(Clone, Debug, Deserialize, Serialize)] 7 | pub struct OrderQuery { 8 | #[serde(default)] 9 | pub asset_contract_address: H160, 10 | #[serde(deserialize_with = "token_ids_deserialize", default)] 11 | pub token_ids: Vec, 12 | #[serde(default)] 13 | pub offerer: H160, 14 | pub limit: Option, 15 | } 16 | 17 | #[derive(Clone, Debug, Deserialize, Serialize)] 18 | pub struct RetrieveResponse { 19 | pub orders: Vec, 20 | } 21 | impl RetrieveResponse { 22 | pub fn from_db_struct(db_orders: &[DBOrder]) -> Result { 23 | Ok(RetrieveResponse { 24 | orders: db_orders 25 | .iter() 26 | .map(ComplexOrder::from_db_struct) 27 | .collect::, anyhow::Error>>()?, 28 | }) 29 | } 30 | } 31 | 32 | #[derive(Clone, Debug, Deserialize, Serialize)] 33 | pub struct ComplexOrder { 34 | pub protocol_data: Order, 35 | } 36 | impl ComplexOrder { 37 | pub fn from_db_struct(db_order: &DBOrder) -> Result { 38 | Ok(ComplexOrder { 39 | protocol_data: Order::from_db_struct(db_order)?, 40 | }) 41 | } 42 | } 43 | 44 | #[derive(Clone, Debug, Deserialize, Serialize)] 45 | #[serde(rename_all = "camelCase")] 46 | pub struct DBOrder { 47 | pub hash: String, 48 | pub offerer: String, 49 | pub zone: String, 50 | pub zone_hash: String, 51 | pub start_time: i64, 52 | pub end_time: i64, 53 | pub order_type: i32, 54 | pub total_original_consideration_items: i32, 55 | pub counter: i64, 56 | pub salt: String, 57 | pub conduit_key: String, 58 | pub signature: String, 59 | 60 | pub offers: Vec, 61 | pub considerations: Vec, 62 | } 63 | 64 | #[derive(Clone, Debug, Deserialize, Serialize, sqlx::Type, sqlx::FromRow)] 65 | #[serde(rename_all = "camelCase")] 66 | pub struct DBConsideration { 67 | pub position: i32, 68 | pub item_type: i32, 69 | pub token: String, 70 | pub identifier_or_criteria: String, 71 | pub start_amount: String, 72 | pub end_amount: String, 73 | pub recipient: String, 74 | } 75 | 76 | #[derive(Clone, Debug, Deserialize, Serialize, sqlx::Type, sqlx::FromRow)] 77 | #[serde(rename_all = "camelCase")] 78 | pub struct DBOffer { 79 | pub position: i32, 80 | pub item_type: i32, 81 | pub token: String, 82 | pub identifier_or_criteria: String, 83 | pub start_amount: String, 84 | pub end_amount: String, 85 | } 86 | 87 | fn token_ids_deserialize<'de, D>(deserializer: D) -> Result, D::Error> 88 | where 89 | D: Deserializer<'de>, 90 | { 91 | let str_sequence = String::deserialize(deserializer)?; 92 | Ok(str_sequence 93 | .split(',') 94 | .map(|item| item.to_owned()) 95 | .collect()) 96 | } 97 | 98 | #[derive(Clone, Debug, Deserialize, Serialize)] 99 | #[serde(rename_all = "camelCase")] 100 | pub struct OrderInputParameters { 101 | pub offerer: ethers::core::types::Address, 102 | pub zone: ethers::core::types::Address, 103 | pub offer: ::std::vec::Vec, 104 | pub consideration: ::std::vec::Vec, 105 | pub order_type: u8, 106 | pub start_time: ethers::core::types::U256, 107 | pub end_time: ethers::core::types::U256, 108 | pub zone_hash: ethers::core::types::U256, 109 | pub total_original_consideration_items: u32, 110 | pub salt: ethers::core::types::U256, 111 | pub conduit_key: ethers::core::types::U256, 112 | pub nonce: u64, 113 | } 114 | 115 | #[derive(Clone, Debug, Deserialize, Serialize)] 116 | #[serde(rename_all = "camelCase")] 117 | pub struct OrderInput { 118 | pub parameters: OrderInputParameters, 119 | pub signature: ethers::core::types::Bytes, 120 | } 121 | impl OrderInput { 122 | pub async fn to_components(&self) -> OrderComponents { 123 | OrderComponents { 124 | offerer: self.parameters.offerer, 125 | zone: self.parameters.zone, 126 | offer: self.parameters.offer.clone(), 127 | consideration: self.parameters.consideration.clone(), 128 | order_type: self.parameters.order_type, 129 | start_time: self.parameters.start_time, 130 | end_time: self.parameters.end_time, 131 | zone_hash: <[u8; 32]>::from(self.parameters.zone_hash), 132 | counter: U256::from(self.parameters.nonce), 133 | salt: self.parameters.salt, 134 | conduit_key: <[u8; 32]>::from(self.parameters.conduit_key), 135 | } 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /src/telemetry/metrics/api.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use prometheus::{HistogramVec, IntCounterVec}; 4 | use prometheus_metric_storage::{MetricStorage, StorageRegistry}; 5 | 6 | #[derive(MetricStorage, Clone, Debug)] 7 | #[metric(subsystem = "api")] 8 | pub struct ApiMetrics { 9 | /// Number of completed API requests. 10 | #[metric(labels("path", "method", "status_code"))] 11 | requests_complete: IntCounterVec, 12 | 13 | /// Execution time for each API request. 14 | #[metric(labels("path", "method"))] 15 | requests_duration_miliseconds: HistogramVec, 16 | } 17 | 18 | impl ApiMetrics { 19 | pub fn inst(registry: &StorageRegistry) -> Result<&ApiMetrics, prometheus::Error> { 20 | ApiMetrics::instance(registry) 21 | } 22 | 23 | pub fn on_request_completed( 24 | &self, 25 | path: &str, 26 | method: &str, 27 | status: u16, 28 | request_time: Duration, 29 | ) { 30 | let rt = (request_time.as_nanos() as f64) / 1_000_000.0; 31 | 32 | self.requests_complete 33 | .with_label_values(&[path, method, &status.to_string()]) 34 | .inc(); 35 | self.requests_duration_miliseconds 36 | .with_label_values(&[path, method]) 37 | .observe(rt); 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/telemetry/metrics/database.rs: -------------------------------------------------------------------------------- 1 | use prometheus::HistogramVec; 2 | use prometheus_metric_storage::{MetricStorage, StorageRegistry}; 3 | 4 | use super::get_metric_storage_registry; 5 | 6 | #[derive(MetricStorage, Clone, Debug)] 7 | #[metric(subsystem = "database")] 8 | pub struct DatabaseMetrics { 9 | /// Execution times of DB queries. 10 | #[metric(labels("type"))] 11 | pub database_queries: HistogramVec, 12 | } 13 | 14 | impl DatabaseMetrics { 15 | pub fn inst(registry: &StorageRegistry) -> Result<&Self, prometheus::Error> { 16 | DatabaseMetrics::instance(registry) 17 | } 18 | 19 | pub fn get() -> &'static Self { 20 | DatabaseMetrics::instance(get_metric_storage_registry()).unwrap() 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/telemetry/metrics/mod.rs: -------------------------------------------------------------------------------- 1 | mod api; 2 | mod database; 3 | mod registry; 4 | 5 | pub use api::*; 6 | pub use database::*; 7 | pub use registry::*; 8 | -------------------------------------------------------------------------------- /src/telemetry/metrics/registry.rs: -------------------------------------------------------------------------------- 1 | use once_cell::sync::Lazy; 2 | use prometheus::Registry; 3 | use prometheus_metric_storage::StorageRegistry; 4 | 5 | static METRICS_REGISTRY: Lazy = Lazy::new(|| { 6 | let prometheus_registry = Registry::new_custom(Some("quay".to_string()), None).unwrap(); 7 | 8 | StorageRegistry::new(prometheus_registry) 9 | }); 10 | 11 | pub fn get_metric_storage_registry() -> &'static StorageRegistry { 12 | &METRICS_REGISTRY 13 | } 14 | 15 | pub fn get_metrics_registry() -> &'static Registry { 16 | get_metric_storage_registry().registry() 17 | } 18 | -------------------------------------------------------------------------------- /src/telemetry/mod.rs: -------------------------------------------------------------------------------- 1 | mod metrics; 2 | mod tracing; 3 | 4 | pub use self::tracing::*; 5 | pub use metrics::*; 6 | -------------------------------------------------------------------------------- /src/telemetry/tracing.rs: -------------------------------------------------------------------------------- 1 | use http::Request; 2 | use tower_http::trace::MakeSpan; 3 | use tracing::subscriber::set_global_default; 4 | use tracing::{error_span, Span, Subscriber}; 5 | use tracing_bunyan_formatter::{BunyanFormattingLayer, JsonStorageLayer}; 6 | use tracing_log::LogTracer; 7 | use tracing_subscriber::fmt::MakeWriter; 8 | use tracing_subscriber::{layer::SubscriberExt, EnvFilter, Registry}; 9 | 10 | use crate::middleware::RequestId; 11 | 12 | /// Compose multiple layers into a `tracing`'s subscriber. 13 | /// 14 | /// # Implementation Notes 15 | /// 16 | /// We are using `impl Subscriber` as return type to avoid having to 17 | /// spell out the actual type of the returned subscriber, which is 18 | /// indeed quite complex. 19 | /// We need to explicitly call out that the returned subscriber is 20 | /// `Send` and `Sync` to make it possible to pass it to `init_subscriber` 21 | /// later on. 22 | pub fn get_subscriber( 23 | name: String, 24 | env_filter: String, 25 | sink: Sink, 26 | ) -> impl Subscriber + Sync + Send 27 | where 28 | Sink: for<'a> MakeWriter<'a> + Send + Sync + 'static, 29 | { 30 | let env_filter = 31 | EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(env_filter)); 32 | 33 | let formatting_layer = BunyanFormattingLayer::new(name, sink); 34 | Registry::default() 35 | .with(env_filter) 36 | .with(JsonStorageLayer) 37 | .with(formatting_layer) 38 | } 39 | 40 | /// Register a subscriber as global default to process span data. 41 | /// 42 | /// It should only be called once! 43 | pub fn init_subscriber(subscriber: impl Subscriber + Send + Sync) { 44 | LogTracer::init().expect("Failed to set logger"); 45 | set_global_default(subscriber).expect("Failed to set subscriber"); 46 | } 47 | 48 | #[derive(Clone, Copy)] 49 | pub(crate) struct TowerMakeSpanWithConstantId; 50 | 51 | impl MakeSpan for TowerMakeSpanWithConstantId { 52 | fn make_span(&mut self, request: &Request) -> Span { 53 | let request_id = request 54 | .extensions() 55 | .get::() 56 | .map(ToString::to_string) 57 | .unwrap_or_else(|| "unknown".into()); 58 | error_span!( 59 | "request", 60 | id = %request_id, 61 | method = %request.method(), 62 | uri = %request.uri(), 63 | ) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/types/mod.rs: -------------------------------------------------------------------------------- 1 | // Setup From traits allowing the conversion between proto types and ethers types. 2 | // Reference: https://github.com/ledgerwatch/interfaces/blob/master/src/lib.rs 3 | use crate::rfq::*; 4 | use arrayref::array_ref; 5 | 6 | // Macro allowing for proto types to be converted into numbers (and vice versa), moving 7 | // through the fixed hash type first. 8 | macro_rules! into_from { 9 | ($proto:ty, $hash:ty, $num:ty) => { 10 | impl From<$num> for $proto { 11 | fn from(value: $num) -> Self { 12 | Self::from(<$hash>::from(<[u8; <$hash>::len_bytes()]>::from(value))) 13 | } 14 | } 15 | 16 | impl From<$proto> for $num { 17 | fn from(value: $proto) -> Self { 18 | Self::from(<$hash>::from(value).0) 19 | } 20 | } 21 | }; 22 | } 23 | 24 | // TODO handle H40, H96 and H160 25 | into_from!(H128, ethers::types::H128, ethers::types::U128); 26 | into_from!(H256, ethers::types::H256, ethers::types::U256); 27 | 28 | // Ethers will always upscale the types if required (i.e. it doesn't define a type small enough 29 | // for it) 30 | impl From for H40 { 31 | fn from(value: ethers::types::H64) -> Self { 32 | Self { 33 | hi: u32::from_be_bytes(*array_ref!(value, 0, 4)), 34 | lo: u32::from_be_bytes(*array_ref!(value, 4, 4)), 35 | } 36 | } 37 | } 38 | 39 | impl From for ethers::types::H64 { 40 | fn from(value: H40) -> Self { 41 | let mut v = [0; Self::len_bytes()]; 42 | v[..4].copy_from_slice(&value.hi.to_be_bytes()); 43 | v[4..].copy_from_slice(&value.lo.to_be_bytes()); 44 | v.into() 45 | } 46 | } 47 | 48 | // Ethers will always upscale the types if required (i.e. it doesn't define a type small enough 49 | // for it) 50 | impl From for H96 { 51 | fn from(value: ethers::types::H128) -> Self { 52 | Self { 53 | hi: u64::from_be_bytes(*array_ref!(value, 0, 8)), 54 | lo: u32::from_be_bytes(*array_ref!(value, 8, 4)), 55 | } 56 | } 57 | } 58 | 59 | impl From for ethers::types::H128 { 60 | fn from(value: H96) -> Self { 61 | let mut v = [0; Self::len_bytes()]; 62 | v[..8].copy_from_slice(&value.hi.to_be_bytes()); 63 | v[8..].copy_from_slice(&value.lo.to_be_bytes()); 64 | v.into() 65 | } 66 | } 67 | 68 | impl From for H128 { 69 | fn from(value: ethers::types::H128) -> Self { 70 | Self { 71 | hi: u64::from_be_bytes(*array_ref!(value, 0, 8)), 72 | lo: u64::from_be_bytes(*array_ref!(value, 8, 8)), 73 | } 74 | } 75 | } 76 | 77 | impl From for ethers::types::H128 { 78 | fn from(value: H128) -> Self { 79 | let mut v = [0; Self::len_bytes()]; 80 | v[..8].copy_from_slice(&value.hi.to_be_bytes()); 81 | v[8..].copy_from_slice(&value.lo.to_be_bytes()); 82 | v.into() 83 | } 84 | } 85 | 86 | impl From for H160 { 87 | fn from(value: ethers::types::H160) -> Self { 88 | Self { 89 | hi: Some(ethers::types::H128::from_slice(&value[..16]).into()), 90 | lo: u32::from_be_bytes(*array_ref!(value, 16, 4)), 91 | } 92 | } 93 | } 94 | 95 | impl From for ethers::types::H160 { 96 | fn from(value: H160) -> Self { 97 | type H = ethers::types::H128; 98 | 99 | let mut v = [0; Self::len_bytes()]; 100 | v[..H::len_bytes()].copy_from_slice(H::from(value.hi.unwrap_or_default()).as_fixed_bytes()); 101 | v[H::len_bytes()..].copy_from_slice(&value.lo.to_be_bytes()); 102 | 103 | v.into() 104 | } 105 | } 106 | 107 | impl From for H256 { 108 | fn from(value: ethers::types::H256) -> Self { 109 | Self { 110 | hi: Some(ethers::types::H128::from_slice(&value[..16]).into()), 111 | lo: Some(ethers::types::H128::from_slice(&value[16..]).into()), 112 | } 113 | } 114 | } 115 | 116 | impl From for ethers::types::H256 { 117 | fn from(value: H256) -> Self { 118 | type H = ethers::types::H128; 119 | 120 | let mut v = [0; Self::len_bytes()]; 121 | v[..H::len_bytes()].copy_from_slice(H::from(value.hi.unwrap_or_default()).as_fixed_bytes()); 122 | v[H::len_bytes()..].copy_from_slice(H::from(value.lo.unwrap_or_default()).as_fixed_bytes()); 123 | 124 | v.into() 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | mod seaport; 2 | pub mod session_interceptor; 3 | 4 | pub use seaport::*; 5 | -------------------------------------------------------------------------------- /src/utils/seaport.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use ethers::{abi::AbiEncode, prelude::*}; 3 | 4 | pub fn token_ids_to_u256_abi_encoded(token_ids: &[String]) -> Result> { 5 | token_ids 6 | .iter() 7 | .map(|token_id| { 8 | Ok::( 9 | U256::from_str_radix(token_id, 10)?.encode_hex(), 10 | ) 11 | }) 12 | .collect() 13 | } 14 | -------------------------------------------------------------------------------- /src/utils/session_interceptor.rs: -------------------------------------------------------------------------------- 1 | use tonic::metadata::AsciiMetadataValue; 2 | use tonic::service::Interceptor; 3 | use tonic::{Request, Status}; 4 | 5 | const COOKIE_HEADER_KEY: &str = "cookie"; 6 | 7 | /// The Session Interceptor is a gRPC interceptor for the client to add session 8 | /// authentication details into the `request` header information such that the server can 9 | /// validate/confirm the client is using a valid session. 10 | #[derive(Default)] 11 | pub struct SessionInterceptor { 12 | pub session_cookie: String, 13 | } 14 | 15 | impl Interceptor for SessionInterceptor { 16 | fn call(&mut self, request: Request<()>) -> Result, Status> { 17 | // If we have established a session set the appropriate session headers before the request 18 | // goes to the server. 19 | let request = if !self.session_cookie.is_empty() { 20 | let mut request = request; 21 | 22 | // We should always be able to transform a String into an `AsciiMetadataValue` so its 23 | // safe to unwrap without checking. 24 | let cookie_value = AsciiMetadataValue::try_from(&self.session_cookie).unwrap(); 25 | 26 | // Insert the session cookie. 27 | request 28 | .metadata_mut() 29 | .insert(COOKIE_HEADER_KEY, cookie_value); 30 | request 31 | } else { 32 | request 33 | }; 34 | 35 | Ok(request) 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /tests/api/health_check.rs: -------------------------------------------------------------------------------- 1 | use crate::helpers::spawn_app; 2 | 3 | // `actix_rt::test` is the testing equivalent of `actix_web::main`. 4 | // It also spares you from having to specify the `#[test]` attribute. 5 | // 6 | // Use `cargo add actix-rt --dev --vers 2` to add `actix-rt` 7 | // under `[dev-dependencies]` in Cargo.toml 8 | // 9 | // You can inspect what code gets generated using 10 | // `cargo expand --test health_check` (<- name of the test file) 11 | #[tokio::test] 12 | async fn health_check_works() { 13 | // Arrange 14 | let app = spawn_app().await; 15 | 16 | // Act 17 | let response = app 18 | .api 19 | // Use the returned application address 20 | .get(&format!("{}/health_check", &app.address)) 21 | .send() 22 | .await 23 | .expect("Failed to execute request."); 24 | 25 | // Assert 26 | assert!(response.status().is_success()); 27 | assert_eq!(Some(0), response.content_length()); 28 | } 29 | -------------------------------------------------------------------------------- /tests/api/helpers.rs: -------------------------------------------------------------------------------- 1 | use once_cell::sync::Lazy; 2 | use sqlx::{Connection, Executor, PgConnection, PgPool}; 3 | use uuid::Uuid; 4 | 5 | use quay::configuration::{get_configuration, DatabaseSettings}; 6 | use quay::startup::{get_connection_pool, Application}; 7 | use quay::telemetry::{get_subscriber, init_subscriber}; 8 | 9 | // Ensure that the `tracing` stack is only initialised once using `once_cell` 10 | static TRACING: Lazy<()> = Lazy::new(|| { 11 | let default_filter_level = "info".to_string(); 12 | let subscriber_name = "test".to_string(); 13 | // We cannot assign the output of `get_subscriber` to a variable based on the value of `TEST_LOG` 14 | // because the sink is part of the type returned by `get_subscriber`, therefore they are not the 15 | // same type. We could work around it, but this is the most straight-forward way of moving forward. 16 | if std::env::var("TEST_LOG").is_ok() { 17 | let subscriber = get_subscriber(subscriber_name, default_filter_level, std::io::stdout); 18 | init_subscriber(subscriber); 19 | } else { 20 | let subscriber = get_subscriber(subscriber_name, default_filter_level, std::io::sink); 21 | init_subscriber(subscriber); 22 | }; 23 | }); 24 | 25 | pub struct TestApp { 26 | pub address: String, 27 | pub db_pool: PgPool, 28 | pub rpc: String, 29 | pub api: reqwest::Client, 30 | } 31 | 32 | pub async fn spawn_app() -> TestApp { 33 | Lazy::force(&TRACING); 34 | 35 | // Randomise configuration to ensure test isolation 36 | let configuration = { 37 | let mut c = get_configuration().expect("Failed to read configuration."); 38 | // Use a different database for each test case 39 | c.database.database_name = Uuid::new_v4().to_string(); 40 | // Use a random OS port 41 | c.application.port = 0; 42 | c 43 | }; 44 | 45 | // Create and migrate the database 46 | configure_database(&configuration.database).await; 47 | 48 | // Launch the application as a background task 49 | let application = Application::build(configuration.clone()) 50 | .await 51 | .expect("Failed to build application."); 52 | // Get the port before spawning the application 53 | let address = format!("http://127.0.0.1:{}", application.port()); 54 | let _ = tokio::spawn(application.run_until_stopped()); 55 | let api = reqwest::Client::builder() 56 | .cookie_store(true) 57 | .build() 58 | .unwrap(); 59 | TestApp { 60 | address, 61 | db_pool: get_connection_pool(&configuration.database), 62 | rpc: configuration.rpc.uri.clone(), 63 | api, 64 | } 65 | } 66 | 67 | async fn configure_database(config: &DatabaseSettings) -> PgPool { 68 | // Create database 69 | let mut connection = PgConnection::connect_with(&config.without_db()) 70 | .await 71 | .expect("Failed to connect to Postgres"); 72 | connection 73 | .execute(format!(r#"CREATE DATABASE "{}";"#, config.database_name).as_str()) 74 | .await 75 | .expect("Failed to create database."); 76 | 77 | // Migrate database 78 | let connection_pool = PgPool::connect_with(config.with_db()) 79 | .await 80 | .expect("Failed to connect to Postgres."); 81 | sqlx::migrate!("./migrations") 82 | .run(&connection_pool) 83 | .await 84 | .expect("Failed to migrate the database"); 85 | 86 | connection_pool 87 | } 88 | -------------------------------------------------------------------------------- /tests/api/listings.rs: -------------------------------------------------------------------------------- 1 | use crate::helpers::spawn_app; 2 | use ethers::prelude::{LocalWallet, Signer}; 3 | use quay::structs::OrderInput; 4 | use quay::structs::RetrieveResponse; 5 | use siwe::{TimeStamp, Version}; 6 | use std::str::FromStr; 7 | use time::OffsetDateTime; 8 | 9 | extern crate serde_json; 10 | 11 | // `actix_rt::test` is the testing equivalent of `actix_web::main`. 12 | // It also spares you from having to specify the `#[test]` attribute. 13 | // 14 | // Use `cargo add actix-rt --dev --vers 2` to add `actix-rt` 15 | // under `[dev-dependencies]` in Cargo.toml 16 | // 17 | // You can inspect what code gets generated using 18 | // `cargo expand --test health_check` (<- name of the test file) 19 | #[tokio::test] 20 | async fn create_and_retrieve_listing_works() { 21 | // Arrange 22 | let app = spawn_app().await; 23 | 24 | // Get the session nonce 25 | let nonce_response = app 26 | .api 27 | // Use the returned application address 28 | .get(&format!("{}/nonce", &app.address)) 29 | .send() 30 | .await 31 | .expect("Failed to execute request."); 32 | 33 | // Assert we got a nonce back 34 | assert!(nonce_response.status().is_success()); 35 | 36 | //let cookie = nonce_response.headers().get("set-cookie").unwrap().clone(); 37 | let nonce = nonce_response.text().await.unwrap(); 38 | 39 | // Setup a local wallet 40 | let wallet = 41 | LocalWallet::from_str("380eb0f3d505f087e438eca80bc4df9a7faa24f868e69fc0440261a0fc0567dc") 42 | .unwrap(); 43 | 44 | // Create a sign in with ethereum message 45 | let message = siwe::Message { 46 | domain: "localhost.com".parse().unwrap(), 47 | address: wallet.address().0, 48 | statement: None, 49 | uri: "http://localhost/".parse().unwrap(), 50 | version: Version::V1, 51 | chain_id: 1, 52 | nonce, 53 | issued_at: TimeStamp::from(OffsetDateTime::now_utc()), 54 | expiration_time: None, 55 | not_before: None, 56 | request_id: None, 57 | resources: vec![], 58 | }; 59 | 60 | let message_string = message.to_string(); 61 | 62 | // Generate a signature 63 | let signature = wallet 64 | .sign_message(message_string.as_bytes()) 65 | .await 66 | .unwrap(); 67 | 68 | // Get a string version of the signature 69 | let signature_string = signature.to_string(); 70 | 71 | let mut signed_message = serde_json::Map::new(); 72 | signed_message.insert( 73 | "signature".to_string(), 74 | serde_json::Value::from(signature_string), 75 | ); 76 | signed_message.insert( 77 | "message".to_string(), 78 | serde_json::Value::from(message_string), 79 | ); 80 | 81 | let json_body = serde_json::Value::from(signed_message); 82 | 83 | // Act 84 | let response = app 85 | .api 86 | // Use the returned application address 87 | .post(&format!("{}/verify", &app.address)) 88 | .json(&json_body) 89 | .send() 90 | .await 91 | .expect("Failed to execute request."); 92 | 93 | // Assert 94 | assert!(response.status().is_success()); 95 | 96 | // This listing sample came from the original API spec with the keys modified to match the rust 97 | // struct keys. 98 | let listing_file = r#" 99 | { 100 | "parameters": { 101 | "offerer": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 102 | "zone": "0x004c00500000ad104d7dbd00e3ae0a5c00560c00", 103 | "zoneHash": "0x3000000000000000000000000000000000000000000000000000000000000000", 104 | "startTime": "0", 105 | "endTime": "1656044994000", 106 | "orderType": 0, 107 | "offer": [ 108 | { 109 | "itemType": 2, 110 | "token": "0x0165878A594ca255338adfa4d48449f69242Eb8F", 111 | "identifierOrCriteria": "1", 112 | "startAmount": "1", 113 | "endAmount": "1" 114 | } 115 | ], 116 | "consideration": [ 117 | { 118 | "itemType": 0, 119 | "token": "0x0000000000000000000000000000000000000000", 120 | "identifierOrCriteria": "0", 121 | "startAmount": "9750000000000000000", 122 | "endAmount": "9750000000000000000", 123 | "recipient": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" 124 | }, 125 | { 126 | "itemType": 0, 127 | "token": "0x0000000000000000000000000000000000000000", 128 | "identifierOrCriteria": "0", 129 | "startAmount": "250000000000000000", 130 | "endAmount": "250000000000000000", 131 | "recipient": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8" 132 | }, 133 | { 134 | "itemType": 0, 135 | "token": "0x0000000000000000000000000000000000000000", 136 | "identifierOrCriteria": "0", 137 | "startAmount": "500000000000000000", 138 | "endAmount": "500000000000000000", 139 | "recipient": "0x8a90cab2b38dba80c64b7734e58ee1db38b8992e" 140 | } 141 | ], 142 | "totalOriginalConsiderationItems": 2, 143 | "salt": "12686911856931635052326433555881236148", 144 | "conduitKey": "0x0000007b02230091a7ed01230072f7006a004d60a8d4e71d599b8104250f0000", 145 | "nonce": 0 146 | }, 147 | "signature": "0x" 148 | } 149 | "#; 150 | let json_body: OrderInput = serde_json::from_str(listing_file).expect("bad test file"); 151 | 152 | // Act 153 | let create_response = app 154 | .api 155 | // Use the returned application address 156 | .post(&format!("{}/listings", &app.address)) 157 | .json(&json_body) 158 | .send() 159 | .await 160 | .expect("Failed to execute creation request."); 161 | 162 | // Assert 163 | assert!(create_response.status().is_success()); 164 | 165 | let retrieve_response = app 166 | .api 167 | // Use the returned application address 168 | .get(&format!("{}/listings?asset_contract_address=0x0165878A594ca255338adfa4d48449f69242Eb8F&token_ids=1", &app.address)) 169 | .send() 170 | .await 171 | .expect("Failed to execute retrieve request.") 172 | .json::() 173 | .await 174 | .expect("Failed to get retrieve request json result."); 175 | 176 | let _first_order = retrieve_response 177 | .orders 178 | .first() 179 | .expect("There should be at least 1 order.") 180 | .protocol_data 181 | .clone(); 182 | 183 | // assert_eq!(first_order, json_body); 184 | } 185 | 186 | #[tokio::test] 187 | async fn retrieve_listing_by_contract_address_works() { 188 | let app = spawn_app().await; 189 | 190 | // Get the session nonce 191 | let nonce_response = app 192 | .api 193 | // Use the returned application address 194 | .get(&format!("{}/nonce", &app.address)) 195 | .send() 196 | .await 197 | .expect("Failed to execute request."); 198 | 199 | // Assert we got a nonce back 200 | assert!(nonce_response.status().is_success()); 201 | 202 | //let cookie = nonce_response.headers().get("set-cookie").unwrap().clone(); 203 | let nonce = nonce_response.text().await.unwrap(); 204 | 205 | // Setup a local wallet 206 | let wallet = 207 | LocalWallet::from_str("380eb0f3d505f087e438eca80bc4df9a7faa24f868e69fc0440261a0fc0567dc") 208 | .unwrap(); 209 | 210 | // Create a sign in with ethereum message 211 | let message = siwe::Message { 212 | domain: "localhost.com".parse().unwrap(), 213 | address: wallet.address().0, 214 | statement: None, 215 | uri: "http://localhost/".parse().unwrap(), 216 | version: Version::V1, 217 | chain_id: 1, 218 | nonce, 219 | issued_at: TimeStamp::from(OffsetDateTime::now_utc()), 220 | expiration_time: None, 221 | not_before: None, 222 | request_id: None, 223 | resources: vec![], 224 | }; 225 | 226 | let message_string = message.to_string(); 227 | 228 | // Generate a signature 229 | let signature = wallet 230 | .sign_message(message_string.as_bytes()) 231 | .await 232 | .unwrap(); 233 | 234 | // Get a string version of the signature 235 | let signature_string = signature.to_string(); 236 | 237 | let mut signed_message = serde_json::Map::new(); 238 | signed_message.insert( 239 | "signature".to_string(), 240 | serde_json::Value::from(signature_string), 241 | ); 242 | signed_message.insert( 243 | "message".to_string(), 244 | serde_json::Value::from(message_string), 245 | ); 246 | 247 | let json_body = serde_json::Value::from(signed_message); 248 | 249 | // Act 250 | let response = app 251 | .api 252 | // Use the returned application address 253 | .post(&format!("{}/verify", &app.address)) 254 | .json(&json_body) 255 | .send() 256 | .await 257 | .expect("Failed to execute request."); 258 | 259 | // Assert 260 | assert!(response.status().is_success()); 261 | 262 | // This listing sample came from the original API spec with the keys modified to match the rust 263 | // struct keys. 264 | let listing_file = r#" 265 | { 266 | "parameters": { 267 | "offerer": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 268 | "zone": "0x004c00500000ad104d7dbd00e3ae0a5c00560c00", 269 | "zoneHash": "0x3000000000000000000000000000000000000000000000000000000000000000", 270 | "startTime": "0", 271 | "endTime": "1656044994000", 272 | "orderType": 0, 273 | "offer": [ 274 | { 275 | "itemType": 2, 276 | "token": "0x0165878A594ca255338adfa4d48449f69242Eb8F", 277 | "identifierOrCriteria": "1", 278 | "startAmount": "1", 279 | "endAmount": "1" 280 | } 281 | ], 282 | "consideration": [ 283 | { 284 | "itemType": 0, 285 | "token": "0x0000000000000000000000000000000000000000", 286 | "identifierOrCriteria": "0", 287 | "startAmount": "9750000000000000000", 288 | "endAmount": "9750000000000000000", 289 | "recipient": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" 290 | }, 291 | { 292 | "itemType": 0, 293 | "token": "0x0000000000000000000000000000000000000000", 294 | "identifierOrCriteria": "0", 295 | "startAmount": "250000000000000000", 296 | "endAmount": "250000000000000000", 297 | "recipient": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8" 298 | }, 299 | { 300 | "itemType": 0, 301 | "token": "0x0000000000000000000000000000000000000000", 302 | "identifierOrCriteria": "0", 303 | "startAmount": "500000000000000000", 304 | "endAmount": "500000000000000000", 305 | "recipient": "0x8a90cab2b38dba80c64b7734e58ee1db38b8992e" 306 | } 307 | ], 308 | "totalOriginalConsiderationItems": 2, 309 | "salt": "12686911856931635052326433555881236148", 310 | "conduitKey": "0x0000007b02230091a7ed01230072f7006a004d60a8d4e71d599b8104250f0000", 311 | "nonce": 0 312 | }, 313 | "signature": "0x" 314 | } 315 | "#; 316 | let json_body: OrderInput = serde_json::from_str(listing_file).expect("bad test file"); 317 | 318 | // Act 319 | let create_response = app 320 | .api 321 | // Use the returned application address 322 | .post(&format!("{}/listings", &app.address)) 323 | .json(&json_body) 324 | .send() 325 | .await 326 | .expect("Failed to execute creation request."); 327 | 328 | // Assert 329 | assert!(create_response.status().is_success()); 330 | 331 | let retrieve_response = app 332 | .api 333 | // Use the returned application address 334 | .get(&format!( 335 | "{}/listings?asset_contract_address=0x0165878A594ca255338adfa4d48449f69242Eb8F", 336 | &app.address 337 | )) 338 | .send() 339 | .await 340 | .expect("Failed to execute retrieve request.") 341 | .json::() 342 | .await 343 | .expect("Failed to get retrieve request json result."); 344 | 345 | let _first_order = retrieve_response 346 | .orders 347 | .first() 348 | .expect("There should be at least 1 order.") 349 | .protocol_data 350 | .clone(); 351 | } 352 | 353 | #[tokio::test] 354 | async fn retrieve_listing_by_offerer_works() { 355 | let app = spawn_app().await; 356 | 357 | // Get the session nonce 358 | let nonce_response = app 359 | .api 360 | // Use the returned application address 361 | .get(&format!("{}/nonce", &app.address)) 362 | .send() 363 | .await 364 | .expect("Failed to execute request."); 365 | 366 | // Assert we got a nonce back 367 | assert!(nonce_response.status().is_success()); 368 | 369 | //let cookie = nonce_response.headers().get("set-cookie").unwrap().clone(); 370 | let nonce = nonce_response.text().await.unwrap(); 371 | 372 | // Setup a local wallet 373 | let wallet = 374 | LocalWallet::from_str("380eb0f3d505f087e438eca80bc4df9a7faa24f868e69fc0440261a0fc0567dc") 375 | .unwrap(); 376 | 377 | // Create a sign in with ethereum message 378 | let message = siwe::Message { 379 | domain: "localhost.com".parse().unwrap(), 380 | address: wallet.address().0, 381 | statement: None, 382 | uri: "http://localhost/".parse().unwrap(), 383 | version: Version::V1, 384 | chain_id: 1, 385 | nonce, 386 | issued_at: TimeStamp::from(OffsetDateTime::now_utc()), 387 | expiration_time: None, 388 | not_before: None, 389 | request_id: None, 390 | resources: vec![], 391 | }; 392 | 393 | let message_string = message.to_string(); 394 | 395 | // Generate a signature 396 | let signature = wallet 397 | .sign_message(message_string.as_bytes()) 398 | .await 399 | .unwrap(); 400 | 401 | // Get a string version of the signature 402 | let signature_string = signature.to_string(); 403 | 404 | let mut signed_message = serde_json::Map::new(); 405 | signed_message.insert( 406 | "signature".to_string(), 407 | serde_json::Value::from(signature_string), 408 | ); 409 | signed_message.insert( 410 | "message".to_string(), 411 | serde_json::Value::from(message_string), 412 | ); 413 | 414 | let json_body = serde_json::Value::from(signed_message); 415 | 416 | // Act 417 | let response = app 418 | .api 419 | // Use the returned application address 420 | .post(&format!("{}/verify", &app.address)) 421 | .json(&json_body) 422 | .send() 423 | .await 424 | .expect("Failed to execute request."); 425 | 426 | // Assert 427 | assert!(response.status().is_success()); 428 | 429 | // This listing sample came from the original API spec with the keys modified to match the rust 430 | // struct keys. 431 | let listing_file = r#" 432 | { 433 | "parameters": { 434 | "offerer": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 435 | "zone": "0x004c00500000ad104d7dbd00e3ae0a5c00560c00", 436 | "zoneHash": "0x3000000000000000000000000000000000000000000000000000000000000000", 437 | "startTime": "0", 438 | "endTime": "1656044994000", 439 | "orderType": 0, 440 | "offer": [ 441 | { 442 | "itemType": 2, 443 | "token": "0x0165878A594ca255338adfa4d48449f69242Eb8F", 444 | "identifierOrCriteria": "1", 445 | "startAmount": "1", 446 | "endAmount": "1" 447 | } 448 | ], 449 | "consideration": [ 450 | { 451 | "itemType": 0, 452 | "token": "0x0000000000000000000000000000000000000000", 453 | "identifierOrCriteria": "0", 454 | "startAmount": "9750000000000000000", 455 | "endAmount": "9750000000000000000", 456 | "recipient": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" 457 | }, 458 | { 459 | "itemType": 0, 460 | "token": "0x0000000000000000000000000000000000000000", 461 | "identifierOrCriteria": "0", 462 | "startAmount": "250000000000000000", 463 | "endAmount": "250000000000000000", 464 | "recipient": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8" 465 | }, 466 | { 467 | "itemType": 0, 468 | "token": "0x0000000000000000000000000000000000000000", 469 | "identifierOrCriteria": "0", 470 | "startAmount": "500000000000000000", 471 | "endAmount": "500000000000000000", 472 | "recipient": "0x8a90cab2b38dba80c64b7734e58ee1db38b8992e" 473 | } 474 | ], 475 | "totalOriginalConsiderationItems": 2, 476 | "salt": "12686911856931635052326433555881236148", 477 | "conduitKey": "0x0000007b02230091a7ed01230072f7006a004d60a8d4e71d599b8104250f0000", 478 | "nonce": 0 479 | }, 480 | "signature": "0x" 481 | } 482 | "#; 483 | let json_body: OrderInput = serde_json::from_str(listing_file).expect("bad test file"); 484 | 485 | // Act 486 | let create_response = app 487 | .api 488 | // Use the returned application address 489 | .post(&format!("{}/listings", &app.address)) 490 | .json(&json_body) 491 | .send() 492 | .await 493 | .expect("Failed to execute creation request."); 494 | 495 | // Assert 496 | assert!(create_response.status().is_success()); 497 | 498 | let retrieve_response = app 499 | .api 500 | // Use the returned application address 501 | .get(&format!( 502 | "{}/listings?offerer=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 503 | &app.address 504 | )) 505 | .send() 506 | .await 507 | .expect("Failed to execute retrieve request.") 508 | .json::() 509 | .await 510 | .expect("Failed to get retrieve request json result."); 511 | 512 | let _first_order = retrieve_response 513 | .orders 514 | .first() 515 | .expect("There should be at least 1 order.") 516 | .protocol_data 517 | .clone(); 518 | } 519 | -------------------------------------------------------------------------------- /tests/api/main.rs: -------------------------------------------------------------------------------- 1 | mod health_check; 2 | mod helpers; 3 | mod listings; 4 | mod offers; 5 | mod rpc_sessions; 6 | mod sessions; 7 | -------------------------------------------------------------------------------- /tests/api/offers.rs: -------------------------------------------------------------------------------- 1 | use crate::helpers::spawn_app; 2 | use ethers::prelude::{LocalWallet, Signer}; 3 | use quay::structs::OrderInput; 4 | use quay::structs::RetrieveResponse; 5 | use siwe::{TimeStamp, Version}; 6 | use std::str::FromStr; 7 | use time::OffsetDateTime; 8 | 9 | extern crate serde_json; 10 | 11 | // `actix_rt::test` is the testing equivalent of `actix_web::main`. 12 | // It also spares you from having to specify the `#[test]` attribute. 13 | // 14 | // Use `cargo add actix-rt --dev --vers 2` to add `actix-rt` 15 | // under `[dev-dependencies]` in Cargo.toml 16 | // 17 | // You can inspect what code gets generated using 18 | // `cargo expand --test health_check` (<- name of the test file) 19 | #[tokio::test] 20 | async fn create_and_retrieve_offer_works() { 21 | // Arrange 22 | let app = spawn_app().await; 23 | 24 | // Get the session nonce 25 | let nonce_response = app 26 | .api 27 | // Use the returned application address 28 | .get(&format!("{}/nonce", &app.address)) 29 | .send() 30 | .await 31 | .expect("Failed to execute request."); 32 | 33 | // Assert we got a nonce back 34 | assert!(nonce_response.status().is_success()); 35 | 36 | //let cookie = nonce_response.headers().get("set-cookie").unwrap().clone(); 37 | let nonce = nonce_response.text().await.unwrap(); 38 | 39 | // Setup a local wallet 40 | let wallet = 41 | LocalWallet::from_str("380eb0f3d505f087e438eca80bc4df9a7faa24f868e69fc0440261a0fc0567dc") 42 | .unwrap(); 43 | 44 | // Create a sign in with ethereum message 45 | let message = siwe::Message { 46 | domain: "localhost.com".parse().unwrap(), 47 | address: wallet.address().0, 48 | statement: None, 49 | uri: "http://localhost/".parse().unwrap(), 50 | version: Version::V1, 51 | chain_id: 1, 52 | nonce, 53 | issued_at: TimeStamp::from(OffsetDateTime::now_utc()), 54 | expiration_time: None, 55 | not_before: None, 56 | request_id: None, 57 | resources: vec![], 58 | }; 59 | 60 | let message_string = message.to_string(); 61 | 62 | // Generate a signature 63 | let signature = wallet 64 | .sign_message(message_string.as_bytes()) 65 | .await 66 | .unwrap(); 67 | 68 | // Get a string version of the signature 69 | let signature_string = signature.to_string(); 70 | 71 | let mut signed_message = serde_json::Map::new(); 72 | signed_message.insert( 73 | "signature".to_string(), 74 | serde_json::Value::from(signature_string), 75 | ); 76 | signed_message.insert( 77 | "message".to_string(), 78 | serde_json::Value::from(message_string), 79 | ); 80 | 81 | let json_body = serde_json::Value::from(signed_message); 82 | 83 | // Act 84 | let response = app 85 | .api 86 | // Use the returned application address 87 | .post(&format!("{}/verify", &app.address)) 88 | .json(&json_body) 89 | .send() 90 | .await 91 | .expect("Failed to execute request."); 92 | 93 | // Assert 94 | assert!(response.status().is_success()); 95 | 96 | // This pffer sample came from the original API spec with the keys modified to match the rust 97 | // struct keys. 98 | let offer_file = r#" 99 | { 100 | "parameters": { 101 | "offerer": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 102 | "zone": "0x004c00500000ad104d7dbd00e3ae0a5c00560c00", 103 | "zoneHash": "0x3000000000000000000000000000000000000000000000000000000000000000", 104 | "startTime": "0", 105 | "endTime": "1656044994000", 106 | "orderType": 0, 107 | "offer": [ 108 | { 109 | "itemType": 0, 110 | "token": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", 111 | "identifierOrCriteria": "0", 112 | "startAmount": "10000000000000000000", 113 | "endAmount": "10000000000000000000" 114 | } 115 | ], 116 | "consideration": [ 117 | { 118 | "itemType": 2, 119 | "token": "0x0165878A594ca255338adfa4d48449f69242Eb8F", 120 | "identifierOrCriteria": "1", 121 | "startAmount": "1", 122 | "endAmount": "1", 123 | "recipient": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" 124 | }, 125 | { 126 | "itemType": 0, 127 | "token": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", 128 | "identifierOrCriteria": "0", 129 | "startAmount": "250000000000000000", 130 | "endAmount": "250000000000000000", 131 | "recipient": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8" 132 | }, 133 | { 134 | "itemType": 0, 135 | "token": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", 136 | "identifierOrCriteria": "0", 137 | "startAmount": "500000000000000000", 138 | "endAmount": "500000000000000000", 139 | "recipient": "0x8a90cab2b38dba80c64b7734e58ee1db38b8992e" 140 | } 141 | ], 142 | "totalOriginalConsiderationItems": 2, 143 | "salt": "12686911856931635052326433555881236148", 144 | "conduitKey": "0x0000007b02230091a7ed01230072f7006a004d60a8d4e71d599b8104250f0000", 145 | "nonce": 0 146 | }, 147 | "signature": "0x" 148 | } 149 | "#; 150 | 151 | let json_body: OrderInput = serde_json::from_str(offer_file).expect("bad test file"); 152 | 153 | // Act 154 | let create_response = app 155 | .api 156 | // Use the returned application address 157 | .post(&format!("{}/offers", &app.address)) 158 | .json(&json_body) 159 | .send() 160 | .await 161 | .expect("Failed to execute create request."); 162 | 163 | // Assert 164 | assert!(create_response.status().is_success()); 165 | 166 | let retrieve_response = app 167 | .api 168 | // Use the returned application address 169 | .get(&format!("{}/offers?asset_contract_address=0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2&token_ids=0", &app.address)) 170 | .send() 171 | .await 172 | .expect("Failed to execute retrieve request.") 173 | .json::() 174 | .await 175 | .expect("Failed to get retrieve request json result."); 176 | 177 | let _first_order = retrieve_response 178 | .orders 179 | .first() 180 | .expect("There should be at least 1 order.") 181 | .protocol_data 182 | .clone(); 183 | 184 | //assert_eq!(first_order, json_body); 185 | } 186 | 187 | #[tokio::test] 188 | async fn retrieve_offer_by_offerer_works() { 189 | // Arrange 190 | let app = spawn_app().await; 191 | 192 | // Get the session nonce 193 | let nonce_response = app 194 | .api 195 | // Use the returned application address 196 | .get(&format!("{}/nonce", &app.address)) 197 | .send() 198 | .await 199 | .expect("Failed to execute request."); 200 | 201 | // Assert we got a nonce back 202 | assert!(nonce_response.status().is_success()); 203 | 204 | //let cookie = nonce_response.headers().get("set-cookie").unwrap().clone(); 205 | let nonce = nonce_response.text().await.unwrap(); 206 | 207 | // Setup a local wallet 208 | let wallet = 209 | LocalWallet::from_str("380eb0f3d505f087e438eca80bc4df9a7faa24f868e69fc0440261a0fc0567dc") 210 | .unwrap(); 211 | 212 | // Create a sign in with ethereum message 213 | let message = siwe::Message { 214 | domain: "localhost.com".parse().unwrap(), 215 | address: wallet.address().0, 216 | statement: None, 217 | uri: "http://localhost/".parse().unwrap(), 218 | version: Version::V1, 219 | chain_id: 1, 220 | nonce, 221 | issued_at: TimeStamp::from(OffsetDateTime::now_utc()), 222 | expiration_time: None, 223 | not_before: None, 224 | request_id: None, 225 | resources: vec![], 226 | }; 227 | 228 | let message_string = message.to_string(); 229 | 230 | // Generate a signature 231 | let signature = wallet 232 | .sign_message(message_string.as_bytes()) 233 | .await 234 | .unwrap(); 235 | 236 | // Get a string version of the signature 237 | let signature_string = signature.to_string(); 238 | 239 | let mut signed_message = serde_json::Map::new(); 240 | signed_message.insert( 241 | "signature".to_string(), 242 | serde_json::Value::from(signature_string), 243 | ); 244 | signed_message.insert( 245 | "message".to_string(), 246 | serde_json::Value::from(message_string), 247 | ); 248 | 249 | let json_body = serde_json::Value::from(signed_message); 250 | 251 | // Act 252 | let response = app 253 | .api 254 | // Use the returned application address 255 | .post(&format!("{}/verify", &app.address)) 256 | .json(&json_body) 257 | .send() 258 | .await 259 | .expect("Failed to execute request."); 260 | 261 | // Assert 262 | assert!(response.status().is_success()); 263 | 264 | // This pffer sample came from the original API spec with the keys modified to match the rust 265 | // struct keys. 266 | let offer_file = r#" 267 | { 268 | "parameters": { 269 | "offerer": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 270 | "zone": "0x004c00500000ad104d7dbd00e3ae0a5c00560c00", 271 | "zoneHash": "0x3000000000000000000000000000000000000000000000000000000000000000", 272 | "startTime": "0", 273 | "endTime": "1656044994000", 274 | "orderType": 0, 275 | "offer": [ 276 | { 277 | "itemType": 0, 278 | "token": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", 279 | "identifierOrCriteria": "0", 280 | "startAmount": "10000000000000000000", 281 | "endAmount": "10000000000000000000" 282 | } 283 | ], 284 | "consideration": [ 285 | { 286 | "itemType": 2, 287 | "token": "0x0165878A594ca255338adfa4d48449f69242Eb8F", 288 | "identifierOrCriteria": "1", 289 | "startAmount": "1", 290 | "endAmount": "1", 291 | "recipient": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" 292 | }, 293 | { 294 | "itemType": 0, 295 | "token": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", 296 | "identifierOrCriteria": "0", 297 | "startAmount": "250000000000000000", 298 | "endAmount": "250000000000000000", 299 | "recipient": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8" 300 | }, 301 | { 302 | "itemType": 0, 303 | "token": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", 304 | "identifierOrCriteria": "0", 305 | "startAmount": "500000000000000000", 306 | "endAmount": "500000000000000000", 307 | "recipient": "0x8a90cab2b38dba80c64b7734e58ee1db38b8992e" 308 | } 309 | ], 310 | "totalOriginalConsiderationItems": 2, 311 | "salt": "12686911856931635052326433555881236148", 312 | "conduitKey": "0x0000007b02230091a7ed01230072f7006a004d60a8d4e71d599b8104250f0000", 313 | "nonce": 0 314 | }, 315 | "signature": "0x" 316 | } 317 | "#; 318 | 319 | let json_body: OrderInput = serde_json::from_str(offer_file).expect("bad test file"); 320 | 321 | // Act 322 | let create_response = app 323 | .api 324 | // Use the returned application address 325 | .post(&format!("{}/offers", &app.address)) 326 | .json(&json_body) 327 | .send() 328 | .await 329 | .expect("Failed to execute create request."); 330 | 331 | // Assert 332 | assert!(create_response.status().is_success()); 333 | 334 | let retrieve_response = app 335 | .api 336 | // Use the returned application address 337 | .get(&format!( 338 | "{}/offers?offerer=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 339 | &app.address 340 | )) 341 | .send() 342 | .await 343 | .expect("Failed to execute retrieve request.") 344 | .json::() 345 | .await 346 | .expect("Failed to get retrieve request json result."); 347 | 348 | let _first_order = retrieve_response 349 | .orders 350 | .first() 351 | .expect("There should be at least 1 order.") 352 | .protocol_data 353 | .clone(); 354 | } 355 | -------------------------------------------------------------------------------- /tests/api/rpc_sessions.rs: -------------------------------------------------------------------------------- 1 | use crate::helpers::spawn_app; 2 | use ethers::signers::{LocalWallet, Signer}; 3 | use http::Uri; 4 | use quay::session::session_client::SessionClient; 5 | use quay::session::{Empty, VerifyText}; 6 | use quay::utils::session_interceptor::SessionInterceptor; 7 | use siwe::{TimeStamp, Version}; 8 | use std::str::FromStr; 9 | use time::OffsetDateTime; 10 | use tonic::transport::Channel; 11 | 12 | const SESSION_COOKIE_KEY: &str = "set-cookie"; 13 | 14 | #[tokio::test] 15 | async fn verify_session_works() { 16 | // Arrange 17 | let app = spawn_app().await; 18 | 19 | let mut client = SessionClient::new( 20 | Channel::builder(app.address.parse::().unwrap()) 21 | .connect() 22 | .await 23 | .unwrap(), 24 | ); 25 | 26 | // Get the session nonce 27 | let nonce_response = client.nonce(Empty::default()).await; 28 | 29 | // Assert we got a nonce back 30 | assert!(nonce_response.is_ok()); 31 | let nonce_response = nonce_response.unwrap(); 32 | 33 | // Fetch the session details and nonce. 34 | let session_cookie = nonce_response 35 | .metadata() 36 | .get(SESSION_COOKIE_KEY) 37 | .expect("Session cookie was not returned in Nonce response") 38 | .to_str() 39 | .expect("Unable to fetch session cookie from Nonce response") 40 | .to_string(); 41 | 42 | let nonce = nonce_response.into_inner().nonce; 43 | 44 | // Setup the session client with our newly created session. 45 | let mut client = SessionClient::with_interceptor( 46 | Channel::builder(app.address.parse::().unwrap()) 47 | .connect() 48 | .await 49 | .unwrap(), 50 | SessionInterceptor { session_cookie }, 51 | ); 52 | 53 | // Setup a local wallet 54 | let wallet = 55 | LocalWallet::from_str("380eb0f3d505f087e438eca80bc4df9a7faa24f868e69fc0440261a0fc0567dc") 56 | .unwrap(); 57 | 58 | // Create a sign in with ethereum message 59 | let message = siwe::Message { 60 | domain: "localhost.com".parse().unwrap(), 61 | address: wallet.address().0, 62 | statement: None, 63 | uri: "http://localhost/".parse().unwrap(), 64 | version: Version::V1, 65 | chain_id: 1, 66 | nonce, 67 | issued_at: TimeStamp::from(OffsetDateTime::now_utc()), 68 | expiration_time: None, 69 | not_before: None, 70 | request_id: None, 71 | resources: vec![], 72 | }; 73 | 74 | let message_string = message.to_string(); 75 | 76 | // Generate a signature 77 | let signature = wallet 78 | .sign_message(message_string.as_bytes()) 79 | .await 80 | .unwrap(); 81 | 82 | // Get a string version of the signature 83 | let signature_string = signature.to_string(); 84 | 85 | let mut signed_message = serde_json::Map::new(); 86 | signed_message.insert( 87 | "signature".to_string(), 88 | serde_json::Value::from(signature_string), 89 | ); 90 | signed_message.insert( 91 | "message".to_string(), 92 | serde_json::Value::from(message_string), 93 | ); 94 | 95 | let json_body = serde_json::Value::from(signed_message); 96 | 97 | // Act 98 | let response = client 99 | .verify(VerifyText { 100 | body: json_body.to_string(), 101 | }) 102 | .await; 103 | 104 | // Assert 105 | assert!(response.is_ok()); 106 | 107 | // Check that we have an authenticated session 108 | let response = client.authenticate(Empty::default()).await; 109 | 110 | // Assert 111 | assert!(response.is_ok()); 112 | } 113 | -------------------------------------------------------------------------------- /tests/api/sessions.rs: -------------------------------------------------------------------------------- 1 | use crate::helpers::spawn_app; 2 | use ethers::signers::{LocalWallet, Signer}; 3 | use siwe::{TimeStamp, Version}; 4 | use std::str::FromStr; 5 | use time::OffsetDateTime; 6 | 7 | #[tokio::test] 8 | async fn verify_session_works() { 9 | // Arrange 10 | let app = spawn_app().await; 11 | 12 | // Get the session nonce 13 | let nonce_response = app 14 | .api 15 | // Use the returned application address 16 | .get(&format!("{}/nonce", &app.address)) 17 | .send() 18 | .await 19 | .expect("Failed to execute request."); 20 | 21 | // Assert we got a nonce back 22 | assert!(nonce_response.status().is_success()); 23 | 24 | //let cookie = nonce_response.headers().get("set-cookie").unwrap().clone(); 25 | let nonce = nonce_response.text().await.unwrap(); 26 | 27 | // Setup a local wallet 28 | let wallet = 29 | LocalWallet::from_str("380eb0f3d505f087e438eca80bc4df9a7faa24f868e69fc0440261a0fc0567dc") 30 | .unwrap(); 31 | 32 | // Create a sign in with ethereum message 33 | let message = siwe::Message { 34 | domain: "localhost.com".parse().unwrap(), 35 | address: wallet.address().0, 36 | statement: None, 37 | uri: "http://localhost/".parse().unwrap(), 38 | version: Version::V1, 39 | chain_id: 1, 40 | nonce, 41 | issued_at: TimeStamp::from(OffsetDateTime::now_utc()), 42 | expiration_time: None, 43 | not_before: None, 44 | request_id: None, 45 | resources: vec![], 46 | }; 47 | 48 | let message_string = message.to_string(); 49 | 50 | // Generate a signature 51 | let signature = wallet 52 | .sign_message(message_string.as_bytes()) 53 | .await 54 | .unwrap(); 55 | 56 | // Get a string version of the signature 57 | let signature_string = signature.to_string(); 58 | 59 | let mut signed_message = serde_json::Map::new(); 60 | signed_message.insert( 61 | "signature".to_string(), 62 | serde_json::Value::from(signature_string), 63 | ); 64 | signed_message.insert( 65 | "message".to_string(), 66 | serde_json::Value::from(message_string), 67 | ); 68 | 69 | let json_body = serde_json::Value::from(signed_message); 70 | 71 | // Act 72 | let response = app 73 | .api 74 | // Use the returned application address 75 | .post(&format!("{}/verify", &app.address)) 76 | .json(&json_body) 77 | .send() 78 | .await 79 | .expect("Failed to execute request."); 80 | 81 | // Assert 82 | assert!(response.status().is_success()); 83 | 84 | // Check that we have an authenticated session 85 | let response = app 86 | .api 87 | // Use the returned application address 88 | .get(&format!("{}/authenticate", &app.address)) 89 | .send() 90 | .await 91 | .expect("Failed to execute request."); 92 | 93 | // Assert 94 | assert!(response.status().is_success()); 95 | } 96 | -------------------------------------------------------------------------------- /tests/test_offer.json: -------------------------------------------------------------------------------- 1 | { 2 | "parameters": { 3 | "offerer": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 4 | "zone": "0x004c00500000ad104d7dbd00e3ae0a5c00560c00", 5 | "zone_hash": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 6 | "start_time": "0", 7 | "end_time": "1656044994000", 8 | "order_type": 0, 9 | "offer": [ 10 | { 11 | "item_type": 0, 12 | "token": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", 13 | "identifier_or_criteria": "0", 14 | "start_amount": "10000000000000000000", 15 | "end_amount": "10000000000000000000" 16 | } 17 | ], 18 | "consideration": [ 19 | { 20 | "item_type": 2, 21 | "token": "0x0165878A594ca255338adfa4d48449f69242Eb8F", 22 | "identifier_or_criteria": "1", 23 | "start_amount": "1", 24 | "end_amount": "1", 25 | "recipient": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" 26 | }, 27 | { 28 | "item_type": 0, 29 | "token": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", 30 | "identifier_or_criteria": "0", 31 | "start_amount": "250000000000000000", 32 | "end_amount": "250000000000000000", 33 | "recipient": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8" 34 | }, 35 | { 36 | "item_type": 0, 37 | "token": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", 38 | "identifier_or_criteria": "0", 39 | "start_amount": "500000000000000000", 40 | "end_amount": "500000000000000000", 41 | "recipient": "0x8a90cab2b38dba80c64b7734e58ee1db38b8992e" 42 | } 43 | ], 44 | "total_original_consideration_items": "2", 45 | "salt": "12686911856931635052326433555881236148", 46 | "conduit_key": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], 47 | "nonce": 0 48 | }, 49 | "signature": "0x" 50 | } -------------------------------------------------------------------------------- /tests/test_session.json: -------------------------------------------------------------------------------- 1 | { 2 | "message": "localhost.com wants you to sign in with your Ethereum account:\n0x3cDB3d9e1B74692Bb1E3bb5fc81938151cA64b02\n\n\nURI: http://localhost/\nVersion: 1\nChain ID: 1\nNonce: XFCvXASVe1R\nIssued At: 2022-08-23T23:04:43.375829Z", 3 | "signature": "1f4ef4b4ebd62e012355e9d510a7f548e2720b9aff73cf8d75180a22db35275c251a81cf831a687c801e1f867bff50e62af688f9c9739cab34cbfa92eb2b01721b" 4 | } 5 | --------------------------------------------------------------------------------